You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by qi...@apache.org on 2019/06/22 09:32:55 UTC

[incubator-iotdb] branch feature_async_close_tsfile updated (0e52c55 -> ce6770c)

This is an automated email from the ASF dual-hosted git repository.

qiaojialin pushed a change to branch feature_async_close_tsfile
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git.


    from 0e52c55  Merge branch 'feature_async_close_tsfile' of https://github.com/apache/incubator-iotdb into feature_async_close_tsfile
     new d66a237  add logNode in unsealedTsFileprocessor and change the insert record interface to InsertPlan
     new 9e4acf8  add unseq doc
     new ce6770c  remove FileNodeManager

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 iotdb/iotdb/conf/iotdb-engine.properties           |   17 +-
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |    1 +
 .../{Directories.java => DirectoryManager.java}    |   11 +-
 .../java/org/apache/iotdb/db/engine/Processor.java |    2 -
 .../engine/bufferwrite/BufferWriteProcessor.java   |    3 +-
 .../iotdb/db/engine/filenode/FileNodeManager.java  | 2630 ++++++------
 .../db/engine/filenode/FileNodeProcessor.java      | 4232 ++++++++++----------
 .../iotdb/db/engine/filenode/TsFileResource.java   |   11 +-
 .../db/engine/filenodeV2/FileNodeManagerV2.java    |   21 +-
 .../db/engine/filenodeV2/FileNodeProcessorV2.java  |   75 +-
 .../db/engine/filenodeV2/TsFileResourceV2.java     |   16 +
 .../filenodeV2/UnsealedTsFileProcessorV2.java      |   26 +-
 .../db/engine/memcontrol/FlushPartialPolicy.java   |    9 +-
 .../db/engine/memcontrol/ForceFLushAllPolicy.java  |   11 +-
 .../iotdb/db/engine/memtable/AbstractMemTable.java |   14 +-
 .../apache/iotdb/db/engine/memtable/IMemTable.java |    3 +-
 .../db/engine/overflow/io/OverflowMemtable.java    |  111 -
 .../db/engine/overflow/io/OverflowProcessor.java   |  819 ----
 .../org/apache/iotdb/db/monitor/StatMonitor.java   |    9 +-
 .../iotdb/db/monitor/collector/FileSize.java       |    9 +-
 .../db/qp/executor/IQueryProcessExecutor.java      |   18 +-
 .../iotdb/db/qp/executor/OverflowQPExecutor.java   |   64 +-
 .../iotdb/db/qp/physical/crud/InsertPlan.java      |   25 +
 .../db/query/control/QueryResourceManager.java     |   46 +-
 .../iotdb/db/query/executor/EngineQueryRouter.java |    8 +-
 .../db/query/factory/SeriesReaderFactory.java      |   73 +-
 .../db/query/factory/SeriesReaderFactoryImpl.java  |   21 +-
 .../apache/iotdb/db/service/CloseMergeService.java |    6 +-
 .../java/org/apache/iotdb/db/service/IoTDB.java    |   13 +-
 .../org/apache/iotdb/db/service/TSServiceImpl.java |    7 +-
 .../iotdb/db/sync/receiver/SyncServiceImpl.java    |   19 +-
 .../org/apache/iotdb/db/utils/LoadDataUtils.java   |    9 +-
 .../java/org/apache/iotdb/db/utils/MemUtils.java   |   64 +-
 .../org/apache/iotdb/db/utils/OpenFileNumUtil.java |    6 +-
 .../db/writelog/node/ExclusiveWriteLogNode.java    |    4 +-
 .../iotdb/db/writelog/recover/LogReplayer.java     |   21 +-
 .../recover/SeqTsFileRecoverPerformer.java         |    7 +-
 .../recover/UnSeqTsFileRecoverPerformer.java       |    7 +-
 .../java/org/apache/iotdb/db/engine/PathUtils.java |    6 +-
 .../bufferwrite/BufferWriteProcessorNewTest.java   |    6 +-
 .../bufferwrite/BufferWriteProcessorTest.java      |   14 +-
 .../filenode/FileNodeProcessorStoreTest.java       |   91 -
 .../db/engine/filenode/FileNodeProcessorTest.java  |  134 -
 .../db/engine/filenode/TsFileResourceTest.java     |   98 -
 .../filenodeV2/FileNodeManagerBenchmark.java       |    5 +-
 .../engine/filenodeV2/FileNodeProcessorV2Test.java |    7 +-
 .../filenodeV2/UnsealedTsFileProcessorV2Test.java  |    7 +-
 .../memcontrol/BufferwriteFileSizeControlTest.java |    4 +-
 .../memcontrol/BufferwriteMetaSizeControlTest.java |    4 +-
 .../memcontrol/OverflowFileSizeControlTest.java    |  145 -
 .../memcontrol/OverflowMetaSizeControlTest.java    |  146 -
 .../engine/modification/DeletionFileNodeTest.java  |   73 +-
 .../db/engine/modification/DeletionQueryTest.java  |   92 +-
 .../db/engine/overflow/io/OverflowIOTest.java      |   65 -
 .../engine/overflow/io/OverflowMemtableTest.java   |  100 -
 .../overflow/io/OverflowProcessorBenchmark.java    |  124 -
 .../engine/overflow/io/OverflowProcessorTest.java  |  212 -
 .../engine/overflow/io/OverflowResourceTest.java   |   99 -
 .../db/engine/overflow/io/OverflowTestUtils.java   |   79 -
 .../overflow/metadata/OFFileMetadataTest.java      |   90 -
 .../metadata/OFRowGroupListMetadataTest.java       |   93 -
 .../metadata/OFSeriesListMetadataTest.java         |   88 -
 .../overflow/metadata/OverflowTestHelper.java      |   84 -
 .../db/engine/overflow/metadata/OverflowUtils.java |  138 -
 .../org/apache/iotdb/db/monitor/MonitorTest.java   |  294 +-
 .../iotdb/db/qp/plan/LogicalPlanSmallTest.java     |    9 +-
 .../apache/iotdb/db/qp/plan/PhysicalPlanTest.java  |    9 +-
 .../apache/iotdb/db/qp/utils/MemIntQpExecutor.java |   30 +-
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |   18 +-
 .../iotdb/db/writelog/recover/LogReplayerTest.java |    7 +-
 .../db/writelog/recover/SeqTsFileRecoverTest.java  |    5 +-
 71 files changed, 4084 insertions(+), 6740 deletions(-)
 rename iotdb/src/main/java/org/apache/iotdb/db/conf/directories/{Directories.java => DirectoryManager.java} (93%)
 delete mode 100644 iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowMemtable.java
 delete mode 100644 iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessor.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorStoreTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/TsFileResourceTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowFileSizeControlTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowMetaSizeControlTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/io/OverflowIOTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/io/OverflowMemtableTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessorBenchmark.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessorTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/io/OverflowResourceTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/io/OverflowTestUtils.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/metadata/OFFileMetadataTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/metadata/OFRowGroupListMetadataTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/metadata/OFSeriesListMetadataTest.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/metadata/OverflowTestHelper.java
 delete mode 100644 iotdb/src/test/java/org/apache/iotdb/db/engine/overflow/metadata/OverflowUtils.java


[incubator-iotdb] 03/03: remove FileNodeManager

Posted by qi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

qiaojialin pushed a commit to branch feature_async_close_tsfile
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit ce6770c0278a011b489b8ef453b757daa2ad4969
Merge: 9e4acf8 0e52c55
Author: qiaojialin <64...@qq.com>
AuthorDate: Sat Jun 22 17:32:35 2019 +0800

    remove FileNodeManager

 .../java/org/apache/iotdb/db/engine/Processor.java |    2 -
 .../engine/bufferwrite/BufferWriteProcessor.java   |    3 +-
 .../iotdb/db/engine/filenode/FileNodeManager.java  | 2630 ++++++------
 .../db/engine/filenode/FileNodeProcessor.java      | 4232 ++++++++++----------
 .../db/engine/filenodeV2/FileNodeManagerV2.java    |   10 +-
 .../db/engine/filenodeV2/FileNodeProcessorV2.java  |    3 +-
 .../db/engine/memcontrol/FlushPartialPolicy.java   |    9 +-
 .../db/engine/memcontrol/ForceFLushAllPolicy.java  |   11 +-
 .../db/engine/overflow/io/OverflowMemtable.java    |  111 -
 .../db/engine/overflow/io/OverflowProcessor.java   |  820 ----
 .../org/apache/iotdb/db/monitor/StatMonitor.java   |    9 +-
 .../iotdb/db/monitor/collector/FileSize.java       |    9 +-
 .../iotdb/db/qp/physical/crud/InsertPlan.java      |   15 +
 .../db/query/control/QueryResourceManager.java     |   46 +-
 .../apache/iotdb/db/query/dataset/AuthDataSet.java |    6 +-
 .../dataset/EngineDataSetWithTimeGenerator.java    |    2 -
 .../groupby/GroupByWithOnlyTimeFilterDataSet.java  |   29 +-
 .../groupby/GroupByWithValueFilterDataSet.java     |    8 +-
 .../AbstractExecutorWithoutTimeGenerator.java      |   84 -
 .../AbstractExecutorWithoutTimeGeneratorV2.java    |   85 -
 .../db/query/executor/AggregateEngineExecutor.java |   39 +-
 .../executor/EngineExecutorWithTimeGenerator.java  |   16 +-
 .../EngineExecutorWithoutTimeGenerator.java        |   29 +-
 .../iotdb/db/query/executor/EngineQueryRouter.java |    8 +-
 .../db/query/executor/FillEngineExecutor.java      |    7 +-
 .../db/query/factory/ISeriesReaderFactory.java     |   28 +-
 .../db/query/factory/SeriesReaderFactory.java      |   73 +-
 .../db/query/factory/SeriesReaderFactoryImpl.java  |  125 +-
 .../java/org/apache/iotdb/db/query/fill/IFill.java |   26 +-
 .../org/apache/iotdb/db/query/fill/LinearFill.java |    8 +-
 .../apache/iotdb/db/query/fill/PreviousFill.java   |    8 +-
 .../reader/sequence/SequenceDataReaderV2.java      |   13 +-
 .../timegenerator/AbstractNodeConstructor.java     |   34 -
 .../query/timegenerator/EngineNodeConstructor.java |   10 +-
 .../apache/iotdb/db/service/CloseMergeService.java |    6 +-
 .../java/org/apache/iotdb/db/service/IoTDB.java    |   13 +-
 .../org/apache/iotdb/db/service/TSServiceImpl.java |    7 +-
 .../iotdb/db/sync/receiver/SyncServiceImpl.java    |   15 +-
 .../org/apache/iotdb/db/utils/LoadDataUtils.java   |    9 +-
 .../iotdb/db/writelog/recover/LogReplayer.java     |   11 +-
 .../recover/SeqTsFileRecoverPerformer.java         |    2 +-
 ...ormer.java => UnSeqTsFileRecoverPerformer.java} |   84 +-
 .../recover/UnseqTsFileRecoverPerformer.java       |   75 -
 .../filenode/FileNodeProcessorStoreTest.java       |   91 -
 .../db/engine/filenode/FileNodeProcessorTest.java  |  134 -
 .../db/engine/filenode/TsFileResourceTest.java     |   98 -
 .../filenodeV2/FileNodeManagerBenchmark.java       |    5 +-
 .../engine/filenodeV2/FileNodeProcessorV2Test.java |    7 +-
 .../filenodeV2/UnsealedTsFileProcessorV2Test.java  |    7 +-
 .../memcontrol/OverflowFileSizeControlTest.java    |  145 -
 .../memcontrol/OverflowMetaSizeControlTest.java    |  146 -
 .../engine/modification/DeletionFileNodeTest.java  |   69 +-
 .../db/engine/modification/DeletionQueryTest.java  |   92 +-
 .../db/engine/overflow/io/OverflowIOTest.java      |   65 -
 .../engine/overflow/io/OverflowMemtableTest.java   |  100 -
 .../overflow/io/OverflowProcessorBenchmark.java    |  124 -
 .../engine/overflow/io/OverflowProcessorTest.java  |  212 -
 .../engine/overflow/io/OverflowResourceTest.java   |   99 -
 .../db/engine/overflow/io/OverflowTestUtils.java   |   79 -
 .../overflow/metadata/OFFileMetadataTest.java      |   90 -
 .../metadata/OFRowGroupListMetadataTest.java       |   93 -
 .../metadata/OFSeriesListMetadataTest.java         |   88 -
 .../overflow/metadata/OverflowTestHelper.java      |   84 -
 .../db/engine/overflow/metadata/OverflowUtils.java |  138 -
 .../org/apache/iotdb/db/monitor/MonitorTest.java   |  294 +-
 .../apache/iotdb/db/qp/plan/PhysicalPlanTest.java  |    9 +-
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |   10 +-
 .../iotdb/db/writelog/recover/LogReplayerTest.java |    9 +-
 .../db/writelog/recover/SeqTsFileRecoverTest.java  |    5 +-
 .../writelog/recover/UnseqTsFileRecoverTest.java   |  153 -
 70 files changed, 4066 insertions(+), 7150 deletions(-)

diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/Processor.java
index 76b9704,76b9704..1453c25
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/Processor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/Processor.java
@@@ -23,7 -23,7 +23,6 @@@ import java.util.concurrent.Future
  import java.util.concurrent.locks.ReadWriteLock;
  import java.util.concurrent.locks.ReentrantReadWriteLock;
  import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
--import org.apache.iotdb.db.engine.filenode.FileNodeProcessor;
  import org.apache.iotdb.db.exception.ProcessorException;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -32,7 -32,7 +31,6 @@@
   * Processor is used for implementing different processor with different operation.<br>
   *
   * @see BufferWriteProcessor
-- * @see FileNodeProcessor
   */
  public abstract class Processor {
  
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
index 71495b0,71495b0..e0d5ab8
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
@@@ -52,6 -52,6 +52,7 @@@ import org.apache.iotdb.db.exception.Bu
  import org.apache.iotdb.db.monitor.collector.MemTableWriteTimeCost;
  import org.apache.iotdb.db.monitor.collector.MemTableWriteTimeCost.MemTableWriteTimeCostType;
  import org.apache.iotdb.db.qp.constant.DatetimeUtils;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.db.utils.ImmediateFuture;
  import org.apache.iotdb.db.utils.MemUtils;
  import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
@@@ -198,7 -198,7 +199,7 @@@ public class BufferWriteProcessor exten
    public boolean write(TSRecord tsRecord) throws BufferWriteProcessorException {
      MemTableWriteTimeCost.getInstance().init();
      long start1 = System.currentTimeMillis();
--    long memUsage = MemUtils.getRecordSize(tsRecord);
++    long memUsage = MemUtils.getRecordSize(new InsertPlan(tsRecord));
      BasicMemController.UsageLevel level = BasicMemController.getInstance()
          .acquireUsage(this, memUsage);
  
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
index cd0d5de,13f2086..ff7d187
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
@@@ -1,1317 -1,1317 +1,1317 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *      http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--
--package org.apache.iotdb.db.engine.filenode;
--
--import java.io.File;
--import java.io.IOException;
--import java.util.ArrayList;
--import java.util.HashMap;
--import java.util.Iterator;
--import java.util.List;
--import java.util.Map;
--import java.util.concurrent.ConcurrentHashMap;
--import java.util.concurrent.ExecutionException;
--import java.util.concurrent.Future;
--import java.util.concurrent.ScheduledExecutorService;
--import java.util.concurrent.TimeUnit;
--import java.util.concurrent.atomic.AtomicLong;
--import org.apache.commons.io.FileUtils;
--import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBConstant;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
- import org.apache.iotdb.db.conf.directories.DirectoryManager;
 -import org.apache.iotdb.db.conf.directories.Directories;
--import org.apache.iotdb.db.engine.Processor;
--import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
--import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
--import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
--import org.apache.iotdb.db.engine.pool.FlushPoolManager;
--import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
--import org.apache.iotdb.db.exception.BufferWriteProcessorException;
--import org.apache.iotdb.db.exception.FileNodeManagerException;
--import org.apache.iotdb.db.exception.FileNodeProcessorException;
--import org.apache.iotdb.db.exception.PathErrorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.metadata.MManager;
--import org.apache.iotdb.db.monitor.IStatistic;
--import org.apache.iotdb.db.monitor.MonitorConstants;
--import org.apache.iotdb.db.monitor.StatMonitor;
--import org.apache.iotdb.db.qp.physical.crud.DeletePlan;
--import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
--import org.apache.iotdb.db.qp.physical.crud.UpdatePlan;
--import org.apache.iotdb.db.query.context.QueryContext;
--import org.apache.iotdb.db.query.control.FileReaderManager;
--import org.apache.iotdb.db.service.IService;
--import org.apache.iotdb.db.service.ServiceType;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
--import org.apache.iotdb.db.writelog.node.WriteLogNode;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
--import org.apache.iotdb.tsfile.read.common.Path;
--import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--import org.slf4j.Logger;
--import org.slf4j.LoggerFactory;
--
--public class FileNodeManager implements IStatistic, IService {
--
--  private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeManager.class);
--  private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
-   private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
 -  private static final Directories directories = Directories.getInstance();
--  /**
--   * a folder that persist FileNodeProcessorStore classes. Each stroage group will have a subfolder.
--   * by default, it is system/info
--   */
--  private final String baseDir;
--
--  /**
--   * This map is used to manage all filenode processor,<br> the key is filenode name which is
--   * storage group seriesPath.
--   */
--  private ConcurrentHashMap<String, FileNodeProcessor> processorMap;
--  /**
--   * This set is used to store overflowed filenode name.<br> The overflowed filenode will be merge.
--   */
--  private volatile FileNodeManagerStatus fileNodeManagerStatus = FileNodeManagerStatus.NONE;
--  // There is no need to add concurrently
--  private HashMap<String, AtomicLong> statParamsHashMap;
--
--  ScheduledExecutorService closedProcessorCleaner = IoTDBThreadPoolFactory.newScheduledThreadPool(1,
--      "Closed FileNodeProcessors Cleaner");
--
--  private FileNodeManager(String baseDir) {
--    processorMap = new ConcurrentHashMap<>();
--    statParamsHashMap = new HashMap<>();
--    //label: A
--    for (MonitorConstants.FileNodeManagerStatConstants fileNodeManagerStatConstant :
--        MonitorConstants.FileNodeManagerStatConstants.values()) {
--      statParamsHashMap.put(fileNodeManagerStatConstant.name(), new AtomicLong(0));
--    }
--
--    String normalizedBaseDir = baseDir;
--    if (normalizedBaseDir.charAt(normalizedBaseDir.length() - 1) != File.separatorChar) {
--      normalizedBaseDir += Character.toString(File.separatorChar);
--    }
--    this.baseDir = normalizedBaseDir;
--    File dir = new File(normalizedBaseDir);
--    if (dir.mkdirs()) {
--      LOGGER.info("{} dir home doesn't exist, create it", dir.getPath());
--    }
--    //TODO merge this with label A
--    if (TsFileDBConf.isEnableStatMonitor()) {
--      StatMonitor statMonitor = StatMonitor.getInstance();
--      registerStatMetadata();
--      statMonitor.registerStatistics(MonitorConstants.STAT_STORAGE_DELTA_NAME, this);
--    }
--
--    closedProcessorCleaner.scheduleWithFixedDelay(()->{
--      int size = 0;
--      for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
--        size += fileNodeProcessor.getClosingBufferWriteProcessor().size();
--      }
--      if (size > 5) {
--        LOGGER.info("Current closing processor number is {}", size);
--      }
++///**
++// * Licensed to the Apache Software Foundation (ASF) under one
++// * or more contributor license agreements.  See the NOTICE file
++// * distributed with this work for additional information
++// * regarding copyright ownership.  The ASF licenses this file
++// * to you under the Apache License, Version 2.0 (the
++// * "License"); you may not use this file except in compliance
++// * with the License.  You may obtain a copy of the License at
++// *
++// *      http://www.apache.org/licenses/LICENSE-2.0
++// *
++// * Unless required by applicable law or agreed to in writing,
++// * software distributed under the License is distributed on an
++// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++// * KIND, either express or implied.  See the License for the
++// * specific language governing permissions and limitations
++// * under the License.
++// */
++//
++//package org.apache.iotdb.db.engine.filenode;
++//
++//import java.io.File;
++//import java.io.IOException;
++//import java.util.ArrayList;
++//import java.util.HashMap;
++//import java.util.Iterator;
++//import java.util.List;
++//import java.util.Map;
++//import java.util.concurrent.ConcurrentHashMap;
++//import java.util.concurrent.ExecutionException;
++//import java.util.concurrent.Future;
++//import java.util.concurrent.ScheduledExecutorService;
++//import java.util.concurrent.TimeUnit;
++//import java.util.concurrent.atomic.AtomicLong;
++//import org.apache.commons.io.FileUtils;
++//import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
++//import org.apache.iotdb.db.conf.IoTDBConfig;
++//import org.apache.iotdb.db.conf.IoTDBConstant;
++//import org.apache.iotdb.db.conf.IoTDBDescriptor;
++//import org.apache.iotdb.db.conf.directories.DirectoryManager;
++//import org.apache.iotdb.db.engine.Processor;
++//import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
++//import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
++//import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
++//import org.apache.iotdb.db.engine.pool.FlushPoolManager;
++//import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
++//import org.apache.iotdb.db.exception.BufferWriteProcessorException;
++//import org.apache.iotdb.db.exception.FileNodeManagerException;
++//import org.apache.iotdb.db.exception.FileNodeProcessorException;
++//import org.apache.iotdb.db.exception.PathErrorException;
++//import org.apache.iotdb.db.exception.ProcessorException;
++//import org.apache.iotdb.db.metadata.MManager;
++//import org.apache.iotdb.db.monitor.IStatistic;
++//import org.apache.iotdb.db.monitor.MonitorConstants;
++//import org.apache.iotdb.db.monitor.StatMonitor;
++//import org.apache.iotdb.db.qp.physical.crud.DeletePlan;
++//import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
++//import org.apache.iotdb.db.qp.physical.crud.UpdatePlan;
++//import org.apache.iotdb.db.query.context.QueryContext;
++//import org.apache.iotdb.db.query.control.FileReaderManager;
++//import org.apache.iotdb.db.service.IService;
++//import org.apache.iotdb.db.service.ServiceType;
++//import org.apache.iotdb.db.utils.MemUtils;
++//import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
++//import org.apache.iotdb.db.writelog.node.WriteLogNode;
++//import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
++//import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
++//import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
++//import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
++//import org.apache.iotdb.tsfile.read.common.Path;
++//import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
++//import org.apache.iotdb.tsfile.write.record.TSRecord;
++//import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
++//import org.slf4j.Logger;
++//import org.slf4j.LoggerFactory;
++//
++//public class FileNodeManager implements IStatistic, IService {
++//
++//  private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeManagerV2.class);
++//  private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
++//  private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
++//  /**
++//   * a folder that persist FileNodeProcessorStore classes. Each stroage group will have a subfolder.
++//   * by default, it is system/info
++//   */
++//  private final String baseDir;
++//
++//  /**
++//   * This map is used to manage all filenode processor,<br> the key is filenode name which is
++//   * storage group seriesPath.
++//   */
++//  private ConcurrentHashMap<String, FileNodeProcessor> processorMap;
++//  /**
++//   * This set is used to store overflowed filenode name.<br> The overflowed filenode will be merge.
++//   */
++//  private volatile FileNodeManagerStatus fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++//  // There is no need to add concurrently
++//  private HashMap<String, AtomicLong> statParamsHashMap;
++//
++//  ScheduledExecutorService closedProcessorCleaner = IoTDBThreadPoolFactory.newScheduledThreadPool(1,
++//      "Closed FileNodeProcessors Cleaner");
++//
++//  private FileNodeManager(String baseDir) {
++//    processorMap = new ConcurrentHashMap<>();
++//    statParamsHashMap = new HashMap<>();
++//    //label: A
++//    for (MonitorConstants.FileNodeManagerStatConstants fileNodeManagerStatConstant :
++//        MonitorConstants.FileNodeManagerStatConstants.values()) {
++//      statParamsHashMap.put(fileNodeManagerStatConstant.name(), new AtomicLong(0));
++//    }
++//
++//    String normalizedBaseDir = baseDir;
++//    if (normalizedBaseDir.charAt(normalizedBaseDir.length() - 1) != File.separatorChar) {
++//      normalizedBaseDir += Character.toString(File.separatorChar);
++//    }
++//    this.baseDir = normalizedBaseDir;
++//    File dir = new File(normalizedBaseDir);
++//    if (dir.mkdirs()) {
++//      LOGGER.info("{} dir home doesn't exist, create it", dir.getPath());
++//    }
++//    //TODO merge this with label A
++//    if (TsFileDBConf.isEnableStatMonitor()) {
++//      StatMonitor statMonitor = StatMonitor.getInstance();
++//      registerStatMetadata();
++//      statMonitor.registerStatistics(MonitorConstants.STAT_STORAGE_DELTA_NAME, this);
++//    }
++//
++//    closedProcessorCleaner.scheduleWithFixedDelay(()->{
++//      int size = 0;
++//      for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
++//        size += fileNodeProcessor.getClosingBufferWriteProcessor().size();
++//      }
++//      if (size > 5) {
++//        LOGGER.info("Current closing processor number is {}", size);
++//      }
++////      for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
++////        fileNodeProcessor.checkAllClosingProcessors();
++////      }
++//    }, 0, 30000, TimeUnit.MILLISECONDS);
++//
++//  }
++//
++//  public static FileNodeManager getInstance() {
++//    return FileNodeManagerHolder.INSTANCE;
++//  }
++//
++//  private void updateStatHashMapWhenFail(TSRecord tsRecord) {
++//    statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_REQ_FAIL.name())
++//        .incrementAndGet();
++//    statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS_FAIL.name())
++//        .addAndGet(tsRecord.dataPointList.size());
++//  }
++//
++//  /**
++//   * get stats parameter hash map.
++//   *
++//   * @return the key represents the params' name, values is AtomicLong type
++//   */
++//  @Override
++//  public Map<String, AtomicLong> getStatParamsHashMap() {
++//    return statParamsHashMap;
++//  }
++//
++//  @Override
++//  public List<String> getAllPathForStatistic() {
++//    List<String> list = new ArrayList<>();
++//    for (MonitorConstants.FileNodeManagerStatConstants statConstant :
++//        MonitorConstants.FileNodeManagerStatConstants.values()) {
++//      list.add(MonitorConstants.STAT_STORAGE_DELTA_NAME + MonitorConstants.MONITOR_PATH_SEPARATOR
++//          + statConstant.name());
++//    }
++//    return list;
++//  }
++//
++//  @Override
++//  public Map<String, TSRecord> getAllStatisticsValue() {
++//    long curTime = System.currentTimeMillis();
++//    TSRecord tsRecord = StatMonitor
++//        .convertToTSRecord(getStatParamsHashMap(), MonitorConstants.STAT_STORAGE_DELTA_NAME,
++//            curTime);
++//    HashMap<String, TSRecord> ret = new HashMap<>();
++//    ret.put(MonitorConstants.STAT_STORAGE_DELTA_NAME, tsRecord);
++//    return ret;
++//  }
++//
++//  /**
++//   * Init Stat MetaDta.
++//   */
++//  @Override
++//  public void registerStatMetadata() {
++//    Map<String, String> hashMap = new HashMap<>();
++//    for (MonitorConstants.FileNodeManagerStatConstants statConstant :
++//        MonitorConstants.FileNodeManagerStatConstants.values()) {
++//      hashMap
++//          .put(MonitorConstants.STAT_STORAGE_DELTA_NAME + MonitorConstants.MONITOR_PATH_SEPARATOR
++//              + statConstant.name(), MonitorConstants.DATA_TYPE_INT64);
++//    }
++//    StatMonitor.getInstance().registerStatStorageGroup(hashMap);
++//  }
++//
++//  /**
++//   * This function is just for unit test.
++//   */
++//  public synchronized void resetFileNodeManager() {
++//    for (String key : statParamsHashMap.keySet()) {
++//      statParamsHashMap.put(key, new AtomicLong());
++//    }
++//    processorMap.clear();
++//  }
++//
++//  /**
++//   * @param filenodeName storage name, e.g., root.a.b
++//   */
++//  private FileNodeProcessor constructNewProcessor(String filenodeName)
++//      throws FileNodeManagerException {
++//    try {
++//      return new FileNodeProcessor(baseDir, filenodeName);
++//    } catch (FileNodeProcessorException e) {
++//      LOGGER.error("Can't construct the FileNodeProcessor, the filenode is {}", filenodeName, e);
++//      throw new FileNodeManagerException(e);
++//    }
++//  }
++//
++//  private FileNodeProcessor getProcessor(String path, boolean isWriteLock)
++//      throws FileNodeManagerException {
++//    String filenodeName;
++//    try {
++//      // return the stroage name
++//      filenodeName = MManager.getInstance().getFileNameByPath(path);
++//    } catch (PathErrorException e) {
++//      LOGGER.error("MManager get filenode name error, seriesPath is {}", path);
++//      throw new FileNodeManagerException(e);
++//    }
++//    FileNodeProcessor processor;
++//    processor = processorMap.get(filenodeName);
++//    if (processor != null) {
++//      processor.lock(isWriteLock);
++//    } else {
++//      filenodeName = filenodeName.intern();
++//      // calculate the value with same key synchronously
++//      synchronized (filenodeName) {
++//        processor = processorMap.get(filenodeName);
++//        if (processor != null) {
++//          processor.lock(isWriteLock);
++//        } else {
++//          // calculate the value with the key monitor
++//          LOGGER.debug("construct a processor instance, the filenode is {}, Thread is {}",
++//              filenodeName, Thread.currentThread().getId());
++//          processor = constructNewProcessor(filenodeName);
++//          processor.lock(isWriteLock);
++//          processorMap.put(filenodeName, processor);
++//        }
++//      }
++//    }
++//    return processor;
++//  }
++//
++//  /**
++//   * recovery the filenode processor.
++//   */
++//  public void recovery() {
++//    List<String> filenodeNames = null;
++//    try {
++//      filenodeNames = MManager.getInstance().getAllFileNames();
++//    } catch (PathErrorException e) {
++//      LOGGER.error("Restoring all FileNodes failed.", e);
++//      return;
++//    }
++//    for (String filenodeName : filenodeNames) {
++//      FileNodeProcessor fileNodeProcessor = null;
++//      try {
++//        // recover in initialization
++//        fileNodeProcessor = getProcessor(filenodeName, true);
++//      } catch (FileNodeManagerException e) {
++//        LOGGER.error("Restoring fileNode {} failed.", filenodeName, e);
++//      } finally {
++//        if (fileNodeProcessor != null) {
++//          fileNodeProcessor.writeUnlock();
++//        }
++//      }
++//      // add index check sum
++//    }
++//  }
++//
++//  /**
++//   * insert TsRecord into storage group.
++//   *
++//   * @param tsRecord input Data
++//   * @param isMonitor if true, the insertion is done by StatMonitor and the statistic Info will not
++//   * be recorded. if false, the statParamsHashMap will be updated.
++//   * @return an int value represents the insert type, 0: failed; 1: overflow; 2: bufferwrite
++//   */
++//  public int insert(TSRecord tsRecord, boolean isMonitor) throws FileNodeManagerException {
++//    long timestamp = tsRecord.time;
++//
++//    String deviceId = tsRecord.deviceId;
++//    checkTimestamp(tsRecord);
++////    //if memory is dangerous, directly reject
++////    long memUsage = MemUtils.getRecordSize(tsRecord);
++////    BasicMemController.UsageLevel level = BasicMemController.getInstance()
++////        .acquireUsage(this, memUsage);
++////    if (level == UsageLevel.DANGEROUS) {
++////      return 0;
++////    }
++//
++//    updateStat(isMonitor, tsRecord);
++//
++//    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++//    int insertType;
++//
++//    try {
++//      long lastUpdateTime = fileNodeProcessor.getFlushLastUpdateTime(deviceId);
++//      if (timestamp < lastUpdateTime) {
++//
++//        long startOverflow = System.currentTimeMillis();
++//
++//        insertOverflow(fileNodeProcessor, timestamp, tsRecord, isMonitor, deviceId);
++//
++//        startOverflow = System.currentTimeMillis() - startOverflow;
++//        if (startOverflow > 1000) {
++//          LOGGER.info("has overflow data, insert cost: {}", startOverflow);
++//        }
++//
++//        insertType = 1;
++//      } else {
++//        insertBufferWrite(fileNodeProcessor, timestamp, isMonitor, tsRecord, deviceId);
++//        insertType = 2;
++//      }
++//    } catch (FileNodeProcessorException e) {
++//      LOGGER.error(String.format("Encounter an error when closing the buffer insert processor %s.",
++//          fileNodeProcessor.getProcessorName()), e);
++//      throw new FileNodeManagerException(e);
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//    // Modify the insert
++//    if (!isMonitor) {
++//      fileNodeProcessor.getStatParamsHashMap()
++//          .get(MonitorConstants.FileNodeProcessorStatConstants.TOTAL_POINTS_SUCCESS.name())
++//          .addAndGet(tsRecord.dataPointList.size());
++//      fileNodeProcessor.getStatParamsHashMap()
++//          .get(MonitorConstants.FileNodeProcessorStatConstants.TOTAL_REQ_SUCCESS.name())
++//          .incrementAndGet();
++//      statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_REQ_SUCCESS.name())
++//          .incrementAndGet();
++//      statParamsHashMap
++//          .get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS_SUCCESS.name())
++//          .addAndGet(tsRecord.dataPointList.size());
++//    }
++//    return insertType;
++//  }
++//
++//  private void writeLog(TSRecord tsRecord, boolean isMonitor, WriteLogNode logNode)
++//      throws FileNodeManagerException {
++//    try {
++//      if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
++//        String[] measurementList = new String[tsRecord.dataPointList.size()];
++//        String[] insertValues = new String[tsRecord.dataPointList.size()];
++//        int i=0;
++//        for (DataPoint dp : tsRecord.dataPointList) {
++//          measurementList[i] = dp.getMeasurementId();
++//          insertValues[i] = dp.getValue().toString();
++//          i++;
++//        }
++//        logNode.write(new InsertPlan(2, tsRecord.deviceId, tsRecord.time, measurementList,
++//            insertValues));
++//      }
++//    } catch (IOException e) {
++//      if (!isMonitor) {
++//        updateStatHashMapWhenFail(tsRecord);
++//      }
++//      throw new FileNodeManagerException(e);
++//    }
++//  }
++//
++//  private void checkTimestamp(TSRecord tsRecord) throws FileNodeManagerException {
++//    if (tsRecord.time < 0) {
++//      LOGGER.error("The insert time lt 0, {}.", tsRecord);
++//      throw new FileNodeManagerException("The insert time lt 0, the tsrecord is " + tsRecord);
++//    }
++//  }
++//
++//  private void updateStat(boolean isMonitor, TSRecord tsRecord) {
++//    if (!isMonitor) {
++//      statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS.name())
++//          .addAndGet(tsRecord.dataPointList.size());
++//    }
++//  }
++//
++//  private void insertOverflow(FileNodeProcessor fileNodeProcessor, long timestamp,
++//      TSRecord tsRecord, boolean isMonitor, String deviceId)
++//      throws FileNodeManagerException {
++//    // get overflow processor
++//    OverflowProcessor overflowProcessor;
++//    String filenodeName = fileNodeProcessor.getProcessorName();
++//    try {
++//      overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
++//    } catch (ProcessorException e) {
++//      LOGGER.error("Get the overflow processor failed, the filenode is {}, insert time is {}",
++//          filenodeName, timestamp);
++//      if (!isMonitor) {
++//        updateStatHashMapWhenFail(tsRecord);
++//      }
++//      throw new FileNodeManagerException(e);
++//    }
++//    // insert wal
++//    try {
++//      writeLog(tsRecord, isMonitor, overflowProcessor.getLogNode());
++//    } catch (IOException e) {
++//      throw new FileNodeManagerException(e);
++//    }
++//    // insert overflow data
++//    try {
++//      overflowProcessor.insert(tsRecord);
++//      fileNodeProcessor.changeTypeToChanged(deviceId, timestamp);
++//      fileNodeProcessor.setOverflowed(true);
++//    } catch (IOException e) {
++//      LOGGER.error("Insert into overflow error, the reason is {}", e);
++//      if (!isMonitor) {
++//        updateStatHashMapWhenFail(tsRecord);
++//      }
++//      throw new FileNodeManagerException(e);
++//    }
++//  }
++//
++//  private void insertBufferWrite(FileNodeProcessor fileNodeProcessor, long timestamp,
++//      boolean isMonitor, TSRecord tsRecord, String deviceId)
++//      throws FileNodeManagerException, FileNodeProcessorException {
++//
++//    long start1 = System.currentTimeMillis();
++//    // get bufferwrite processor
++//    BufferWriteProcessor bufferWriteProcessor;
++//    String filenodeName = fileNodeProcessor.getProcessorName();
++//    try {
++//      bufferWriteProcessor = fileNodeProcessor.getBufferWriteProcessor(filenodeName, timestamp);
++//
++//    } catch (FileNodeProcessorException e) {
++//      LOGGER.error("Get the bufferwrite processor failed, the filenode is {}, insert time is {}",
++//          filenodeName, timestamp);
++//      if (!isMonitor) {
++//        updateStatHashMapWhenFail(tsRecord);
++//      }
++//      throw new FileNodeManagerException(e);
++//    } finally {
++//      long start1_1 = System.currentTimeMillis() - start1;
++//      if (start1_1 > 1000) {
++//        LOGGER.info("FileNodeManagerV2.insertBufferWrite step-1-1, cost: {}", start1_1);
++//      }
++//    }
++//
++//    long start1_2 = System.currentTimeMillis();
++//    // Add a new interval file to newfilelist
++//    if (bufferWriteProcessor.isNewProcessor()) {
++//      bufferWriteProcessor.setNewProcessor(false);
++//      String bufferwriteBaseDir = bufferWriteProcessor.getBaseDir();
++//      String bufferwriteRelativePath = bufferWriteProcessor.getFileRelativePath();
++//      try {
++//        bufferWriteProcessor.setCurrentTsFileResource(new TsFileResource(new File(new File(bufferwriteBaseDir), bufferwriteRelativePath), false));
++//        fileNodeProcessor.addIntervalFileNode(bufferWriteProcessor.getCurrentTsFileResource());
++//      } catch (Exception e) {
++//        if (!isMonitor) {
++//          updateStatHashMapWhenFail(tsRecord);
++//        }
++//        throw new FileNodeManagerException(e);
++//      }
++//    }
++//    start1_2 = System.currentTimeMillis() - start1_2;
++//    if (start1_2 > 1000) {
++//      LOGGER.info("FileNodeManagerV2.insertBufferWrite step-1-2, cost: {}", start1_2);
++//    }
++//
++//    start1 = System.currentTimeMillis() - start1;
++//    if (start1 > 1000) {
++//      LOGGER.info("FileNodeManagerV2.insertBufferWrite step-1, cost: {}", start1);
++//    }
++//
++//    long start2 = System.currentTimeMillis();
++//
++//    long start2_1 = start2;
++//    // insert wal
++//    try {
++//      writeLog(tsRecord, isMonitor, bufferWriteProcessor.getLogNode());
++//    } catch (IOException e) {
++//      throw new FileNodeManagerException(e);
++//    }
++//    start2_1 = System.currentTimeMillis() - start2_1;
++//    if (start2_1 > 1000) {
++//      LOGGER.info("FileNodeManagerV2.insertBufferWrite step2-1 cost: {}", start2_1);
++//    }
++//
++//    long start2_2 = System.currentTimeMillis();
++//    // Write data
++//    long prevStartTime = fileNodeProcessor.getIntervalFileNodeStartTime(deviceId);
++//    long prevUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
++//
++//    fileNodeProcessor.setIntervalFileNodeStartTime(deviceId);
++//    fileNodeProcessor.setLastUpdateTime(deviceId, timestamp);
++//
++//    start2_2 = System.currentTimeMillis() - start2_2;
++//    if (start2_2 > 1000) {
++//      LOGGER.info("FileNodeManagerV2.insertBufferWrite step2-2 cost: {}", start2_2);
++//    }
++//    try {
++//      long start2_3 = System.currentTimeMillis();
++//
++//      // insert tsrecord and check flushMetadata
++//      if (!bufferWriteProcessor.write(tsRecord)) {
++//        start2_3 = System.currentTimeMillis() - start2_3;
++//        if (start2_3 > 1000) {
++//          LOGGER.info("FileNodeManagerV2.insertBufferWrite step2-3 cost: {}", start2_3);
++//        }
++//
++//        long start2_4 = System.currentTimeMillis();
++//        // undo time update
++//        fileNodeProcessor.setIntervalFileNodeStartTime(deviceId, prevStartTime);
++//        fileNodeProcessor.setLastUpdateTime(deviceId, prevUpdateTime);
++//        start2_4 = System.currentTimeMillis() - start2_4;
++//        if (start2_4 > 1000) {
++//          LOGGER.info("FileNodeManagerV2.insertBufferWrite step2-4 cost: {}", start2_4);
++//        }
++//      }
++//    } catch (BufferWriteProcessorException e) {
++//      if (!isMonitor) {
++//        updateStatHashMapWhenFail(tsRecord);
++//      }
++//      throw new FileNodeManagerException(e);
++//    }
++//    start2 = System.currentTimeMillis() - start2;
++//    if (start2 > 1000) {
++//      LOGGER.info("FileNodeManagerV2.insertBufferWrite step-2, cost: {}", start2);
++//    }
++//
++//    long start3 = System.currentTimeMillis();
++//
++//    // check if the file should be closed
++//    if (bufferWriteProcessor
++//        .getFileSize() > IoTDBDescriptor.getInstance()
++//        .getConfig().getBufferwriteFileSizeThreshold()) {
++//      if (LOGGER.isInfoEnabled()) {
++//        LOGGER.info(
++//            "The filenode processor {} will setCloseMark the bufferwrite processor, "
++//                + "because the size[{}] of tsfile {} reaches the threshold {}",
++//            filenodeName, MemUtils.bytesCntToStr(bufferWriteProcessor.getFileSize()),
++//            bufferWriteProcessor.getInsertFilePath(), MemUtils.bytesCntToStr(
++//                IoTDBDescriptor.getInstance().getConfig().getBufferwriteFileSizeThreshold()));
++//      }
++//
++//      fileNodeProcessor.closeBufferWrite();
++//      start3 = System.currentTimeMillis() - start3;
++//      if (start3 > 1000) {
++//        LOGGER.info("FileNodeManagerV2.insertBufferWrite step-3, setCloseMark buffer insert cost: {}", start3);
++//      }
++//    }
++//  }
++//
++//  /**
++//   * update data.
++//   */
++//  public void update(String deviceId, String measurementId, long startTime, long endTime,
++//      TSDataType type, String v)
++//      throws FileNodeManagerException {
++//
++//    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++//    try {
++//
++//      long lastUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
++//      if (startTime > lastUpdateTime) {
++//        LOGGER.warn("The update range is error, startTime {} is great than lastUpdateTime {}",
++//            startTime,
++//            lastUpdateTime);
++//        return;
++//      }
++//      long finalEndTime = endTime > lastUpdateTime ? lastUpdateTime : endTime;
++//
++//      String filenodeName = fileNodeProcessor.getProcessorName();
++//      // get overflow processor
++//      OverflowProcessor overflowProcessor;
++//      try {
++//        overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
++//      } catch (ProcessorException e) {
++//        LOGGER.error(
++//            "Get the overflow processor failed, the filenode is {}, "
++//                + "insert time range is from {} to {}",
++//            filenodeName, startTime, finalEndTime);
++//        throw new FileNodeManagerException(e);
++//      }
++//      overflowProcessor.update(deviceId, measurementId, startTime, finalEndTime, type, v);
++//      // change the type of tsfile to overflowed
++//      fileNodeProcessor.changeTypeToChanged(deviceId, startTime, finalEndTime);
++//      fileNodeProcessor.setOverflowed(true);
++//
++//      // insert wal
++//      try {
++//        if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
++//          overflowProcessor.getLogNode().write(
++//              new UpdatePlan(startTime, finalEndTime, v, new Path(deviceId
++//                  + "." + measurementId)));
++//        }
++//      } catch (IOException e) {
++//        throw new FileNodeManagerException(e);
++//      }
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//  }
++//
++//  /**
++//   * delete data.
++//   */
++//  public void delete(String deviceId, String measurementId, long timestamp)
++//      throws FileNodeManagerException {
++//
++//    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++//    try {
++//      long lastUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
++//      // no tsfile data, the delete operation is invalid
++//      if (lastUpdateTime == -1) {
++//        LOGGER.warn("The last update time is -1, delete overflow is invalid, "
++//                + "the filenode processor is {}",
++//            fileNodeProcessor.getProcessorName());
++//      } else {
++//        // insert wal
++//        if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
++//          // get processors for wal
++//          String filenodeName = fileNodeProcessor.getProcessorName();
++//          OverflowProcessor overflowProcessor;
++//          BufferWriteProcessor bufferWriteProcessor;
++//          try {
++//            overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
++//            // in case that no BufferWriteProcessor is available, a new BufferWriteProcessor is
++//            // needed to access LogNode.
++//            // TODO this may make the time range of the next TsFile a little wider
++//            bufferWriteProcessor = fileNodeProcessor.getBufferWriteProcessor(filenodeName,
++//                lastUpdateTime + 1);
++//          } catch (ProcessorException e) {
++//            LOGGER.error("Getting the processor failed, the filenode is {}, delete time is {}.",
++//                filenodeName, timestamp);
++//            throw new FileNodeManagerException(e);
++//          }
++//          try {
++//            overflowProcessor.getLogNode().write(new DeletePlan(timestamp,
++//                new Path(deviceId + "." + measurementId)));
++//            bufferWriteProcessor.getLogNode().write(new DeletePlan(timestamp,
++//                new Path(deviceId + "." + measurementId)));
++//          } catch (IOException e) {
++//            throw new FileNodeManagerException(e);
++//          }
++//        }
++//
++//        try {
++//          fileNodeProcessor.delete(deviceId, measurementId, timestamp);
++//        } catch (IOException e) {
++//          throw new FileNodeManagerException(e);
++//        }
++//        // change the type of tsfile to overflowed
++//        fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
++//        fileNodeProcessor.setOverflowed(true);
++//
++//      }
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//  }
++//
++//  private void delete(String processorName,
++//      Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator)
++//      throws FileNodeManagerException {
++//    if (!processorMap.containsKey(processorName)) {
++//      //TODO do we need to call processorIterator.remove() ?
++//      LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
++//      return;
++//    }
++//    LOGGER.info("Try to delete the filenode processor {}.", processorName);
++//    FileNodeProcessor processor = processorMap.get(processorName);
++//    if (!processor.tryWriteLock()) {
++//      throw new FileNodeManagerException(String
++//          .format("Can't delete the filenode processor %s because Can't get the insert lock.",
++//              processorName));
++//    }
++//
++//    try {
++//      if (!processor.canBeClosed()) {
++//        LOGGER.warn("The filenode processor {} can't be deleted.", processorName);
++//        return;
++//      }
++//
++//      try {
++//        LOGGER.info("Delete the filenode processor {}.", processorName);
++//        processor.delete();
++//        processorIterator.remove();
++//      } catch (ProcessorException e) {
++//        LOGGER.error("Delete the filenode processor {} by iterator error.", processorName, e);
++//        throw new FileNodeManagerException(e);
++//      }
++//    } finally {
++//      processor.writeUnlock();
++//    }
++//  }
++//
++//  /**
++//   * Similar to delete(), but only deletes data in BufferWrite. Only used by WAL recovery.
++//   */
++//  public void deleteBufferWrite(String deviceId, String measurementId, long timestamp)
++//      throws FileNodeManagerException {
++//    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++//    try {
++//      fileNodeProcessor.deleteBufferWrite(deviceId, measurementId, timestamp);
++//    } catch (BufferWriteProcessorException | IOException e) {
++//      throw new FileNodeManagerException(e);
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//    // change the type of tsfile to overflowed
++//    fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
++//    fileNodeProcessor.setOverflowed(true);
++//  }
++//
++//  /**
++//   * Similar to delete(), but only deletes data in Overflow. Only used by WAL recovery.
++//   */
++//  public void deleteOverflow(String deviceId, String measurementId, long timestamp)
++//      throws FileNodeManagerException {
++//    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++//    try {
++//      fileNodeProcessor.deleteOverflow(deviceId, measurementId, timestamp);
++//    } catch (ProcessorException e) {
++//      throw new FileNodeManagerException(e);
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//    // change the type of tsfile to overflowed
++//    fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
++//    fileNodeProcessor.setOverflowed(true);
++//  }
++//
++//  /**
++//   * begin query.
++//   *
++//   * @param deviceId queried deviceId
++//   * @return a query token for the device.
++//   */
++//  public int beginQuery(String deviceId) throws FileNodeManagerException {
++//    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++//    try {
++//      LOGGER.debug("Get the FileNodeProcessor: filenode is {}, begin query.",
++//          fileNodeProcessor.getProcessorName());
++//      return fileNodeProcessor.addMultiPassCount();
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//  }
++//
++//  /**
++//   * query data.
++//   */
++//  public QueryDataSource query(SingleSeriesExpression seriesExpression, QueryContext context)
++//      throws FileNodeManagerException {
++//    String deviceId = seriesExpression.getSeriesPath().getDevice();
++//    String measurementId = seriesExpression.getSeriesPath().getMeasurement();
++//    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, false);
++//    LOGGER.debug("Get the FileNodeProcessor: filenode is {}, query.",
++//        fileNodeProcessor.getProcessorName());
++//    try {
++//      QueryDataSource queryDataSource;
++//      // query operation must have overflow processor
++//      if (!fileNodeProcessor.hasOverflowProcessor()) {
++//        try {
++//          fileNodeProcessor.getOverflowProcessor(fileNodeProcessor.getProcessorName());
++//        } catch (ProcessorException e) {
++//          LOGGER.error("Get the overflow processor failed, the filenode is {}, query is {},{}",
++//              fileNodeProcessor.getProcessorName(), deviceId, measurementId);
++//          throw new FileNodeManagerException(e);
++//        }
++//      }
++//      try {
++//        queryDataSource = fileNodeProcessor.query(deviceId, measurementId, context);
++//      } catch (FileNodeProcessorException e) {
++//        LOGGER.error("Query error: the deviceId {}, the measurementId {}", deviceId, measurementId,
++//            e);
++//        throw new FileNodeManagerException(e);
++//      }
++//      // return query structure
++//      return queryDataSource;
++//    } finally {
++//      fileNodeProcessor.readUnlock();
++//    }
++//  }
++//
++//  /**
++//   * end query.
++//   */
++//  public void endQuery(String deviceId, int token) throws FileNodeManagerException {
++//
++//    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++//    try {
++//      LOGGER.debug("Get the FileNodeProcessor: {} end query.",
++//          fileNodeProcessor.getProcessorName());
++//      fileNodeProcessor.decreaseMultiPassCount(token);
++//    } catch (FileNodeProcessorException e) {
++//      LOGGER.error("Failed to end query: the deviceId {}, token {}.", deviceId, token, e);
++//      throw new FileNodeManagerException(e);
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//  }
++//
++//  /**
++//   * Append one specified tsfile to the storage group. <b>This method is only provided for
++//   * transmission module</b>
++//   *
++//   * @param fileNodeName the seriesPath of storage group
++//   * @param appendFile the appended tsfile information
++//   */
++//  public boolean appendFileToFileNode(String fileNodeName, TsFileResource appendFile,
++//      String appendFilePath) throws FileNodeManagerException {
++//    FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
++//    try {
++//      // check append file
++//      for (Map.Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
++//        if (fileNodeProcessor.getLastUpdateTime(entry.getKey()) >= entry.getValue()) {
++//          return false;
++//        }
++//      }
++//      // setCloseMark bufferwrite file
++//      fileNodeProcessor.closeBufferWrite();
++//      // append file to storage group.
++//      fileNodeProcessor.appendFile(appendFile, appendFilePath);
++//    } catch (FileNodeProcessorException e) {
++//      LOGGER.error("Cannot append the file {} to {}", appendFile.getFile().getAbsolutePath(), fileNodeName, e);
++//      throw new FileNodeManagerException(e);
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//    return true;
++//  }
++//
++//  /**
++//   * get all overlap tsfiles which are conflict with the appendFile.
++//   *
++//   * @param fileNodeName the seriesPath of storage group
++//   * @param appendFile the appended tsfile information
++//   */
++//  public List<String> getOverlapFilesFromFileNode(String fileNodeName, TsFileResource appendFile,
++//      String uuid) throws FileNodeManagerException {
++//    FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
++//    List<String> overlapFiles;
++//    try {
++//      overlapFiles = fileNodeProcessor.getOverlapFiles(appendFile, uuid);
++//    } catch (FileNodeProcessorException e) {
++//      throw new FileNodeManagerException(e);
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//    return overlapFiles;
++//  }
++//
++//  /**
++//   * merge all overflowed filenode.
++//   *
++//   * @throws FileNodeManagerException FileNodeManagerException
++//   */
++//  public void mergeAll() throws FileNodeManagerException {
++//    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++//      LOGGER.warn("Failed to merge all overflowed filenode, because filenode manager status is {}",
++//          fileNodeManagerStatus);
++//      return;
++//    }
++//
++//    fileNodeManagerStatus = FileNodeManagerStatus.MERGE;
++//    LOGGER.info("Start to merge all overflowed filenode");
++//    List<String> allFileNodeNames;
++//    try {
++//      allFileNodeNames = MManager.getInstance().getAllFileNames();
++//    } catch (PathErrorException e) {
++//      LOGGER.error("Get all storage group seriesPath error,", e);
++//      throw new FileNodeManagerException(e);
++//    }
++//    List<Future<?>> futureTasks = new ArrayList<>();
++//    for (String fileNodeName : allFileNodeNames) {
++//      FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
++//      try {
++//        Future<?> task = fileNodeProcessor.submitToMerge();
++//        if (task != null) {
++//          LOGGER.info("Submit the filenode {} to the merge pool", fileNodeName);
++//          futureTasks.add(task);
++//        }
++//      } finally {
++//        fileNodeProcessor.writeUnlock();
++//      }
++//    }
++//    long totalTime = 0;
++//    // loop waiting for merge to end, the longest waiting time is
++//    // 60s.
++//    int time = 2;
++//    List<Exception> mergeException = new ArrayList<>();
++//    for (Future<?> task : futureTasks) {
++//      while (!task.isDone()) {
++//        try {
++//          LOGGER.info(
++//              "Waiting for the end of merge, already waiting for {}s, "
++//                  + "continue to wait anothor {}s",
++//              totalTime, time);
++//          TimeUnit.SECONDS.sleep(time);
++//          totalTime += time;
++//          time = updateWaitTime(time);
++//        } catch (InterruptedException e) {
++//          LOGGER.error("Unexpected interruption {}", e);
++//          Thread.currentThread().interrupt();
++//        }
++//      }
++//      try {
++//        task.get();
++//      } catch (InterruptedException e) {
++//        LOGGER.error("Unexpected interruption {}", e);
++//      } catch (ExecutionException e) {
++//        mergeException.add(e);
++//        LOGGER.error("The exception for merge: {}", e);
++//      }
++//    }
++//    if (!mergeException.isEmpty()) {
++//      // just throw the first exception
++//      throw new FileNodeManagerException(mergeException.get(0));
++//    }
++//    fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++//    LOGGER.info("End to merge all overflowed filenode");
++//  }
++//
++//  private int updateWaitTime(int time) {
++//    return time < 32 ? time * 2 : 60;
++//  }
++//
++//  /**
++//   * delete one filenode.
++//   */
++//  public void deleteOneFileNode(String processorName) throws FileNodeManagerException {
++//    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++//      return;
++//    }
++//
++//    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
++//    try {
++//      if (processorMap.containsKey(processorName)) {
++//        deleteFileNodeBlocked(processorName);
++//      }
++//      String fileNodePath = TsFileDBConf.getFileNodeDir();
++//      fileNodePath = standardizeDir(fileNodePath) + processorName;
++//      FileUtils.deleteDirectory(new File(fileNodePath));
++//
++//      cleanBufferWrite(processorName);
++//
++//      MultiFileLogNodeManager.getInstance()
++//          .deleteNode(processorName + IoTDBConstant.BUFFERWRITE_LOG_NODE_SUFFIX);
++//      MultiFileLogNodeManager.getInstance()
++//          .deleteNode(processorName + IoTDBConstant.OVERFLOW_LOG_NODE_SUFFIX);
++//    } catch (IOException e) {
++//      LOGGER.error("Delete the filenode processor {} error.", processorName, e);
++//      throw new FileNodeManagerException(e);
++//    } finally {
++//      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++//    }
++//  }
++//
++//  private void cleanBufferWrite(String processorName) throws IOException {
++//    List<String> bufferwritePathList = DIRECTORY_MANAGER.getAllTsFileFolders();
++//    for (String bufferwritePath : bufferwritePathList) {
++//      bufferwritePath = standardizeDir(bufferwritePath) + processorName;
++//      File bufferDir = new File(bufferwritePath);
++//      // free and setCloseMark the streams under this bufferwrite directory
++//      if (!bufferDir.exists()) {
++//        continue;
++//      }
++//      File[] bufferFiles = bufferDir.listFiles();
++//      if (bufferFiles != null) {
++//        for (File bufferFile : bufferFiles) {
++//          FileReaderManager.getInstance().closeFileAndRemoveReader(bufferFile.getPath());
++//        }
++//      }
++//      FileUtils.deleteDirectory(new File(bufferwritePath));
++//    }
++//  }
++//
++//  private void deleteFileNodeBlocked(String processorName) throws FileNodeManagerException {
++//    LOGGER.info("Forced to delete the filenode processor {}", processorName);
++//    FileNodeProcessor processor = processorMap.get(processorName);
++//    while (true) {
++//      if (processor.tryWriteLock()) {
++//        try {
++//          if (processor.canBeClosed()) {
++//            LOGGER.info("Delete the filenode processor {}.", processorName);
++//            processor.delete();
++//            processorMap.remove(processorName);
++//            break;
++//          } else {
++//            LOGGER.info(
++//                "Can't delete the filenode processor {}, "
++//                    + "because the filenode processor can't be closed."
++//                    + " Wait 100ms to retry");
++//          }
++//        } catch (ProcessorException e) {
++//          LOGGER.error("Delete the filenode processor {} error.", processorName, e);
++//          throw new FileNodeManagerException(e);
++//        } finally {
++//          processor.writeUnlock();
++//        }
++//      } else {
++//        LOGGER.info(
++//            "Can't delete the filenode processor {}, because it can't get the insert lock."
++//                + " Wait 100ms to retry", processorName);
++//      }
++//      try {
++//        TimeUnit.MILLISECONDS.sleep(100);
++//      } catch (InterruptedException e) {
++//        LOGGER.error(e.getMessage());
++//        Thread.currentThread().interrupt();
++//      }
++//    }
++//  }
++//
++//  private String standardizeDir(String originalPath) {
++//    String res = originalPath;
++//    if ((originalPath.length() > 0
++//        && originalPath.charAt(originalPath.length() - 1) != File.separatorChar)
++//        || originalPath.length() == 0) {
++//      res = originalPath + File.separatorChar;
++//    }
++//    return res;
++//  }
++//
++//  /**
++//   * add time series.
++//   */
++//  public void addTimeSeries(Path path, TSDataType dataType, TSEncoding encoding,
++//      CompressionType compressor,
++//      Map<String, String> props) throws FileNodeManagerException {
++//    FileNodeProcessor fileNodeProcessor = getProcessor(path.getFullPath(), true);
++//    try {
++//      fileNodeProcessor.addTimeSeries(path.getMeasurement(), dataType, encoding, compressor, props);
++//    } finally {
++//      fileNodeProcessor.writeUnlock();
++//    }
++//  }
++//
++//
++//  /**
++//   * Force to setCloseMark the filenode processor.
++//   */
++//  public void closeOneFileNode(String processorName) throws FileNodeManagerException {
++//    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++//      return;
++//    }
++//
++//    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
++//    try {
++//      LOGGER.info("Force to setCloseMark the filenode processor {}.", processorName);
++//      while (!closeOneProcessor(processorName)) {
++//        try {
++//          LOGGER.info("Can't force to setCloseMark the filenode processor {}, wait 100ms to retry",
++//              processorName);
++//          TimeUnit.MILLISECONDS.sleep(100);
++//        } catch (InterruptedException e) {
++//          // ignore the interrupted exception
++//          LOGGER.error("Unexpected interruption {}", e);
++//          Thread.currentThread().interrupt();
++//        }
++//      }
++//    } finally {
++//      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++//    }
++//  }
++//
++//
++//  /**
++//   * try to setCloseMark the filenode processor. The name of filenode processor is processorName
++//   * notice: this method has the same function with close()
++//   */
++//  private boolean closeOneProcessor(String processorName) throws FileNodeManagerException {
++//    if (!processorMap.containsKey(processorName)) {
++//      return true;
++//    }
++//
++//    Processor processor = processorMap.get(processorName);
++//    if (processor.tryWriteLock()) {
++//      try {
++//        if (processor.canBeClosed()) {
++//          processor.close();
++//          return true;
++//        } else {
++//          return false;
++//        }
++//      } catch (ProcessorException e) {
++//        LOGGER.error("Close the filenode processor {} error.", processorName, e);
++//        throw new FileNodeManagerException(e);
++//      } finally {
++//        processor.writeUnlock();
++//      }
++//    } else {
++//      return false;
++//    }
++//  }
++//
++//  /**
++//   * try to setCloseMark the filenode processor.
++//   * notice: This method has the same function with closeOneProcessor()
++//   */
++//  private void close(String processorName) throws FileNodeManagerException {
++//    if (!processorMap.containsKey(processorName)) {
++//      LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
++//      return;
++//    }
++//    LOGGER.info("Try to setCloseMark the filenode processor {}.", processorName);
++//    FileNodeProcessor processor = processorMap.get(processorName);
++//    if (!processor.tryWriteLock()) {
++//      LOGGER.warn("Can't get the insert lock of the filenode processor {}.", processorName);
++//      return;
++//    }
++//    try {
++//      if (processor.canBeClosed()) {
++//        try {
++//          LOGGER.info("Close the filenode processor {}.", processorName);
++//          processor.close();
++//        } catch (ProcessorException e) {
++//          LOGGER.error("Close the filenode processor {} error.", processorName, e);
++//          throw new FileNodeManagerException(e);
++//        }
++//      } else {
++//        LOGGER.warn("The filenode processor {} can't be closed.", processorName);
++//      }
++//    } finally {
++//      processor.writeUnlock();
++//    }
++//  }
++//
++//  /**
++//   * delete all filenode.
++//   */
++//  public synchronized boolean deleteAll() throws FileNodeManagerException {
++//    LOGGER.info("Start deleting all filenode");
++//    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++//      LOGGER.info("Failed to delete all filenode processor because of merge operation");
++//      return false;
++//    }
++//
++//    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
++//    try {
++//      Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator = processorMap.entrySet()
++//          .iterator();
++//      while (processorIterator.hasNext()) {
++//        Map.Entry<String, FileNodeProcessor> processorEntry = processorIterator.next();
++//        delete(processorEntry.getKey(), processorIterator);
++//      }
++//      return processorMap.isEmpty();
++//    } finally {
++//      LOGGER.info("Deleting all FileNodeProcessors ends");
++//      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++//    }
++//  }
++//
++//  /**
++//   * Try to setCloseMark All.
++//   */
++//  public void closeAll() throws FileNodeManagerException {
++//    LOGGER.info("Start closing all filenode processor");
++//    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++//      LOGGER.info("Failed to setCloseMark all filenode processor because of merge operation");
++//      return;
++//    }
++//    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
++//    try {
++//      for (Map.Entry<String, FileNodeProcessor> processorEntry : processorMap.entrySet()) {
++//        close(processorEntry.getKey());
++//      }
++//    } finally {
++//      LOGGER.info("Close all FileNodeProcessors ends");
++//      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++//    }
++//  }
++//
++//  /**
++//   * force flushMetadata to control memory usage.
++//   */
++//  public void forceFlush(BasicMemController.UsageLevel level) {
++//    // you may add some delicate process like below
++//    // or you could provide multiple methods for different urgency
++//    switch (level) {
++//      // only select the most urgent (most active or biggest in size)
++//      // processors to flushMetadata
++//      // only select top 10% active memory user to flushMetadata
++//      case WARNING:
++//        try {
++//          flushTop(0.1f);
++//        } catch (IOException e) {
++//          LOGGER.error("force flushMetadata memory data error: {}", e);
++//        }
++//        break;
++//      // force all processors to flushMetadata
++//      case DANGEROUS:
++//        try {
++//          flushAll();
++//        } catch (IOException e) {
++//          LOGGER.error("force flushMetadata memory data error: {}", e);
++//        }
++//        break;
++//      // if the flushMetadata thread pool is not full ( or half full), start a new
++//      // flushMetadata task
++//      case SAFE:
++//        if (FlushPoolManager.getInstance().getActiveCnt() < 0.5 * FlushPoolManager.getInstance()
++//            .getThreadCnt()) {
++//          try {
++//            flushTop(0.01f);
++//          } catch (IOException e) {
++//            LOGGER.error("force flushMetadata memory data error: ", e);
++//          }
++//        }
++//        break;
++//      default:
++//    }
++//  }
++//
++//  private void flushAll() throws IOException {
++//    for (FileNodeProcessor processor : processorMap.values()) {
++//      if (!processor.tryLock(true)) {
++//        continue;
++//      }
++//      try {
++//        boolean isMerge = processor.flush().isHasOverflowFlushTask();
++//        if (isMerge) {
++//          processor.submitToMerge();
++//        }
++//      } finally {
++//        processor.unlock(true);
++//      }
++//    }
++//  }
++//
++//  private void flushTop(float percentage) throws IOException {
++//    List<FileNodeProcessor> tempProcessors = new ArrayList<>(processorMap.values());
++//    // sort the tempProcessors as descending order
++//    tempProcessors.sort((o1, o2) -> (int) (o2.memoryUsage() - o1.memoryUsage()));
++//    int flushNum =
++//        (int) (tempProcessors.size() * percentage) > 1
++//            ? (int) (tempProcessors.size() * percentage)
++//            : 1;
++//    for (int i = 0; i < flushNum && i < tempProcessors.size(); i++) {
++//      FileNodeProcessor processor = tempProcessors.get(i);
++//      // 64M
++//      if (processor.memoryUsage() <= TSFileConfig.groupSizeInByte / 2) {
++//        continue;
++//      }
++//      long start = System.currentTimeMillis();
++//      processor.writeLock();
++//      try {
++//        boolean isMerge = processor.flush().isHasOverflowFlushTask();
++//        if (isMerge) {
++//          processor.submitToMerge();
++//        }
++//      } finally {
++//        processor.writeUnlock();
++//      }
++//      start = System.currentTimeMillis() - start;
++//      LOGGER.info("flushMetadata Top cost: {}", start);
++//    }
++//  }
++//
++//  @Override
++//  public void start() {
++//    // do no thing
++//  }
++//
++//  @Override
++//  public void stop() {
++//    try {
++//      closeAll();
++//    } catch (FileNodeManagerException e) {
++//      LOGGER.error("Failed to setCloseMark file node manager because .", e);
++//    }
++//
++//    boolean notFinished = true;
++//    while (notFinished) {
++//      int size = 0;
  //      for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
--//        fileNodeProcessor.checkAllClosingProcessors();
++//        size += fileNodeProcessor.getClosingBufferWriteProcessor().size();
  //      }
--    }, 0, 30000, TimeUnit.MILLISECONDS);
--
--  }
--
--  public static FileNodeManager getInstance() {
--    return FileNodeManagerHolder.INSTANCE;
--  }
--
--  private void updateStatHashMapWhenFail(TSRecord tsRecord) {
--    statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_REQ_FAIL.name())
--        .incrementAndGet();
--    statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS_FAIL.name())
--        .addAndGet(tsRecord.dataPointList.size());
--  }
--
--  /**
--   * get stats parameter hash map.
--   *
--   * @return the key represents the params' name, values is AtomicLong type
--   */
--  @Override
--  public Map<String, AtomicLong> getStatParamsHashMap() {
--    return statParamsHashMap;
--  }
--
--  @Override
--  public List<String> getAllPathForStatistic() {
--    List<String> list = new ArrayList<>();
--    for (MonitorConstants.FileNodeManagerStatConstants statConstant :
--        MonitorConstants.FileNodeManagerStatConstants.values()) {
--      list.add(MonitorConstants.STAT_STORAGE_DELTA_NAME + MonitorConstants.MONITOR_PATH_SEPARATOR
--          + statConstant.name());
--    }
--    return list;
--  }
--
--  @Override
--  public Map<String, TSRecord> getAllStatisticsValue() {
--    long curTime = System.currentTimeMillis();
--    TSRecord tsRecord = StatMonitor
--        .convertToTSRecord(getStatParamsHashMap(), MonitorConstants.STAT_STORAGE_DELTA_NAME,
--            curTime);
--    HashMap<String, TSRecord> ret = new HashMap<>();
--    ret.put(MonitorConstants.STAT_STORAGE_DELTA_NAME, tsRecord);
--    return ret;
--  }
--
--  /**
--   * Init Stat MetaDta.
--   */
--  @Override
--  public void registerStatMetadata() {
--    Map<String, String> hashMap = new HashMap<>();
--    for (MonitorConstants.FileNodeManagerStatConstants statConstant :
--        MonitorConstants.FileNodeManagerStatConstants.values()) {
--      hashMap
--          .put(MonitorConstants.STAT_STORAGE_DELTA_NAME + MonitorConstants.MONITOR_PATH_SEPARATOR
--              + statConstant.name(), MonitorConstants.DATA_TYPE_INT64);
--    }
--    StatMonitor.getInstance().registerStatStorageGroup(hashMap);
--  }
--
--  /**
--   * This function is just for unit test.
--   */
--  public synchronized void resetFileNodeManager() {
--    for (String key : statParamsHashMap.keySet()) {
--      statParamsHashMap.put(key, new AtomicLong());
--    }
--    processorMap.clear();
--  }
--
--  /**
--   * @param filenodeName storage name, e.g., root.a.b
--   */
--  private FileNodeProcessor constructNewProcessor(String filenodeName)
--      throws FileNodeManagerException {
--    try {
--      return new FileNodeProcessor(baseDir, filenodeName);
--    } catch (FileNodeProcessorException e) {
--      LOGGER.error("Can't construct the FileNodeProcessor, the filenode is {}", filenodeName, e);
--      throw new FileNodeManagerException(e);
--    }
--  }
--
--  private FileNodeProcessor getProcessor(String path, boolean isWriteLock)
--      throws FileNodeManagerException {
--    String filenodeName;
--    try {
--      // return the stroage name
--      filenodeName = MManager.getInstance().getFileNameByPath(path);
--    } catch (PathErrorException e) {
--      LOGGER.error("MManager get filenode name error, seriesPath is {}", path);
--      throw new FileNodeManagerException(e);
--    }
--    FileNodeProcessor processor;
--    processor = processorMap.get(filenodeName);
--    if (processor != null) {
--      processor.lock(isWriteLock);
--    } else {
--      filenodeName = filenodeName.intern();
--      // calculate the value with same key synchronously
--      synchronized (filenodeName) {
--        processor = processorMap.get(filenodeName);
--        if (processor != null) {
--          processor.lock(isWriteLock);
--        } else {
--          // calculate the value with the key monitor
--          LOGGER.debug("construct a processor instance, the filenode is {}, Thread is {}",
--              filenodeName, Thread.currentThread().getId());
--          processor = constructNewProcessor(filenodeName);
--          processor.lock(isWriteLock);
--          processorMap.put(filenodeName, processor);
--        }
--      }
--    }
--    return processor;
--  }
--
--  /**
--   * recovery the filenode processor.
--   */
--  public void recovery() {
--    List<String> filenodeNames = null;
--    try {
--      filenodeNames = MManager.getInstance().getAllFileNames();
--    } catch (PathErrorException e) {
--      LOGGER.error("Restoring all FileNodes failed.", e);
--      return;
--    }
--    for (String filenodeName : filenodeNames) {
--      FileNodeProcessor fileNodeProcessor = null;
--      try {
--        // recover in initialization
--        fileNodeProcessor = getProcessor(filenodeName, true);
--      } catch (FileNodeManagerException e) {
--        LOGGER.error("Restoring fileNode {} failed.", filenodeName, e);
--      } finally {
--        if (fileNodeProcessor != null) {
--          fileNodeProcessor.writeUnlock();
--        }
--      }
--      // add index check sum
--    }
--  }
--
--  /**
--   * insert TsRecord into storage group.
--   *
--   * @param tsRecord input Data
--   * @param isMonitor if true, the insertion is done by StatMonitor and the statistic Info will not
--   * be recorded. if false, the statParamsHashMap will be updated.
--   * @return an int value represents the insert type, 0: failed; 1: overflow; 2: bufferwrite
--   */
--  public int insert(TSRecord tsRecord, boolean isMonitor) throws FileNodeManagerException {
--    long timestamp = tsRecord.time;
--
--    String deviceId = tsRecord.deviceId;
--    checkTimestamp(tsRecord);
--//    //if memory is dangerous, directly reject
--//    long memUsage = MemUtils.getRecordSize(tsRecord);
--//    BasicMemController.UsageLevel level = BasicMemController.getInstance()
--//        .acquireUsage(this, memUsage);
--//    if (level == UsageLevel.DANGEROUS) {
--//      return 0;
--//    }
--
--    updateStat(isMonitor, tsRecord);
--
--    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
--    int insertType;
--
--    try {
--      long lastUpdateTime = fileNodeProcessor.getFlushLastUpdateTime(deviceId);
--      if (timestamp < lastUpdateTime) {
--
--        long startOverflow = System.currentTimeMillis();
--
--        insertOverflow(fileNodeProcessor, timestamp, tsRecord, isMonitor, deviceId);
--
--        startOverflow = System.currentTimeMillis() - startOverflow;
--        if (startOverflow > 1000) {
--          LOGGER.info("has overflow data, insert cost: {}", startOverflow);
--        }
--
--        insertType = 1;
--      } else {
--        insertBufferWrite(fileNodeProcessor, timestamp, isMonitor, tsRecord, deviceId);
--        insertType = 2;
--      }
--    } catch (FileNodeProcessorException e) {
--      LOGGER.error(String.format("Encounter an error when closing the buffer insert processor %s.",
--          fileNodeProcessor.getProcessorName()), e);
--      throw new FileNodeManagerException(e);
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--    // Modify the insert
--    if (!isMonitor) {
--      fileNodeProcessor.getStatParamsHashMap()
--          .get(MonitorConstants.FileNodeProcessorStatConstants.TOTAL_POINTS_SUCCESS.name())
--          .addAndGet(tsRecord.dataPointList.size());
--      fileNodeProcessor.getStatParamsHashMap()
--          .get(MonitorConstants.FileNodeProcessorStatConstants.TOTAL_REQ_SUCCESS.name())
--          .incrementAndGet();
--      statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_REQ_SUCCESS.name())
--          .incrementAndGet();
--      statParamsHashMap
--          .get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS_SUCCESS.name())
--          .addAndGet(tsRecord.dataPointList.size());
--    }
--    return insertType;
--  }
--
--  private void writeLog(TSRecord tsRecord, boolean isMonitor, WriteLogNode logNode)
--      throws FileNodeManagerException {
--    try {
--      if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
--        String[] measurementList = new String[tsRecord.dataPointList.size()];
--        String[] insertValues = new String[tsRecord.dataPointList.size()];
--        int i=0;
--        for (DataPoint dp : tsRecord.dataPointList) {
--          measurementList[i] = dp.getMeasurementId();
--          insertValues[i] = dp.getValue().toString();
--          i++;
--        }
--        logNode.write(new InsertPlan(2, tsRecord.deviceId, tsRecord.time, measurementList,
--            insertValues));
--      }
--    } catch (IOException e) {
--      if (!isMonitor) {
--        updateStatHashMapWhenFail(tsRecord);
--      }
--      throw new FileNodeManagerException(e);
--    }
--  }
--
--  private void checkTimestamp(TSRecord tsRecord) throws FileNodeManagerException {
--    if (tsRecord.time < 0) {
--      LOGGER.error("The insert time lt 0, {}.", tsRecord);
--      throw new FileNodeManagerException("The insert time lt 0, the tsrecord is " + tsRecord);
--    }
--  }
--
--  private void updateStat(boolean isMonitor, TSRecord tsRecord) {
--    if (!isMonitor) {
--      statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS.name())
--          .addAndGet(tsRecord.dataPointList.size());
--    }
--  }
--
--  private void insertOverflow(FileNodeProcessor fileNodeProcessor, long timestamp,
--      TSRecord tsRecord, boolean isMonitor, String deviceId)
--      throws FileNodeManagerException {
--    // get overflow processor
--    OverflowProcessor overflowProcessor;
--    String filenodeName = fileNodeProcessor.getProcessorName();
--    try {
--      overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
--    } catch (ProcessorException e) {
--      LOGGER.error("Get the overflow processor failed, the filenode is {}, insert time is {}",
--          filenodeName, timestamp);
--      if (!isMonitor) {
--        updateStatHashMapWhenFail(tsRecord);
--      }
--      throw new FileNodeManagerException(e);
--    }
--    // insert wal
--    try {
--      writeLog(tsRecord, isMonitor, overflowProcessor.getLogNode());
--    } catch (IOException e) {
--      throw new FileNodeManagerException(e);
--    }
--    // insert overflow data
--    try {
--      overflowProcessor.insert(tsRecord);
--      fileNodeProcessor.changeTypeToChanged(deviceId, timestamp);
--      fileNodeProcessor.setOverflowed(true);
--    } catch (IOException e) {
--      LOGGER.error("Insert into overflow error, the reason is {}", e);
--      if (!isMonitor) {
--        updateStatHashMapWhenFail(tsRecord);
--      }
--      throw new FileNodeManagerException(e);
--    }
--  }
--
--  private void insertBufferWrite(FileNodeProcessor fileNodeProcessor, long timestamp,
--      boolean isMonitor, TSRecord tsRecord, String deviceId)
--      throws FileNodeManagerException, FileNodeProcessorException {
--
--    long start1 = System.currentTimeMillis();
--    // get bufferwrite processor
--    BufferWriteProcessor bufferWriteProcessor;
--    String filenodeName = fileNodeProcessor.getProcessorName();
--    try {
--      bufferWriteProcessor = fileNodeProcessor.getBufferWriteProcessor(filenodeName, timestamp);
--
--    } catch (FileNodeProcessorException e) {
--      LOGGER.error("Get the bufferwrite processor failed, the filenode is {}, insert time is {}",
--          filenodeName, timestamp);
--      if (!isMonitor) {
--        updateStatHashMapWhenFail(tsRecord);
--      }
--      throw new FileNodeManagerException(e);
--    } finally {
--      long start1_1 = System.currentTimeMillis() - start1;
--      if (start1_1 > 1000) {
--        LOGGER.info("FileNodeManager.insertBufferWrite step-1-1, cost: {}", start1_1);
--      }
--    }
--
--    long start1_2 = System.currentTimeMillis();
--    // Add a new interval file to newfilelist
--    if (bufferWriteProcessor.isNewProcessor()) {
--      bufferWriteProcessor.setNewProcessor(false);
--      String bufferwriteBaseDir = bufferWriteProcessor.getBaseDir();
--      String bufferwriteRelativePath = bufferWriteProcessor.getFileRelativePath();
--      try {
--        bufferWriteProcessor.setCurrentTsFileResource(new TsFileResource(new File(new File(bufferwriteBaseDir), bufferwriteRelativePath), false));
--        fileNodeProcessor.addIntervalFileNode(bufferWriteProcessor.getCurrentTsFileResource());
--      } catch (Exception e) {
--        if (!isMonitor) {
--          updateStatHashMapWhenFail(tsRecord);
--        }
--        throw new FileNodeManagerException(e);
--      }
--    }
--    start1_2 = System.currentTimeMillis() - start1_2;
--    if (start1_2 > 1000) {
--      LOGGER.info("FileNodeManager.insertBufferWrite step-1-2, cost: {}", start1_2);
--    }
--
--    start1 = System.currentTimeMillis() - start1;
--    if (start1 > 1000) {
--      LOGGER.info("FileNodeManager.insertBufferWrite step-1, cost: {}", start1);
--    }
--
--    long start2 = System.currentTimeMillis();
--
--    long start2_1 = start2;
--    // insert wal
--    try {
--      writeLog(tsRecord, isMonitor, bufferWriteProcessor.getLogNode());
--    } catch (IOException e) {
--      throw new FileNodeManagerException(e);
--    }
--    start2_1 = System.currentTimeMillis() - start2_1;
--    if (start2_1 > 1000) {
--      LOGGER.info("FileNodeManager.insertBufferWrite step2-1 cost: {}", start2_1);
--    }
--
--    long start2_2 = System.currentTimeMillis();
--    // Write data
--    long prevStartTime = fileNodeProcessor.getIntervalFileNodeStartTime(deviceId);
--    long prevUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
--
--    fileNodeProcessor.setIntervalFileNodeStartTime(deviceId);
--    fileNodeProcessor.setLastUpdateTime(deviceId, timestamp);
--
--    start2_2 = System.currentTimeMillis() - start2_2;
--    if (start2_2 > 1000) {
--      LOGGER.info("FileNodeManager.insertBufferWrite step2-2 cost: {}", start2_2);
--    }
--    try {
--      long start2_3 = System.currentTimeMillis();
--
--      // insert tsrecord and check flushMetadata
--      if (!bufferWriteProcessor.write(tsRecord)) {
--        start2_3 = System.currentTimeMillis() - start2_3;
--        if (start2_3 > 1000) {
--          LOGGER.info("FileNodeManager.insertBufferWrite step2-3 cost: {}", start2_3);
--        }
--
--        long start2_4 = System.currentTimeMillis();
--        // undo time update
--        fileNodeProcessor.setIntervalFileNodeStartTime(deviceId, prevStartTime);
--        fileNodeProcessor.setLastUpdateTime(deviceId, prevUpdateTime);
--        start2_4 = System.currentTimeMillis() - start2_4;
--        if (start2_4 > 1000) {
--          LOGGER.info("FileNodeManager.insertBufferWrite step2-4 cost: {}", start2_4);
--        }
--      }
--    } catch (BufferWriteProcessorException e) {
--      if (!isMonitor) {
--        updateStatHashMapWhenFail(tsRecord);
--      }
--      throw new FileNodeManagerException(e);
--    }
--    start2 = System.currentTimeMillis() - start2;
--    if (start2 > 1000) {
--      LOGGER.info("FileNodeManager.insertBufferWrite step-2, cost: {}", start2);
--    }
--
--    long start3 = System.currentTimeMillis();
--
--    // check if the file should be closed
--    if (bufferWriteProcessor
--        .getFileSize() > IoTDBDescriptor.getInstance()
--        .getConfig().getBufferwriteFileSizeThreshold()) {
--      if (LOGGER.isInfoEnabled()) {
--        LOGGER.info(
--            "The filenode processor {} will setCloseMark the bufferwrite processor, "
--                + "because the size[{}] of tsfile {} reaches the threshold {}",
--            filenodeName, MemUtils.bytesCntToStr(bufferWriteProcessor.getFileSize()),
--            bufferWriteProcessor.getInsertFilePath(), MemUtils.bytesCntToStr(
--                IoTDBDescriptor.getInstance().getConfig().getBufferwriteFileSizeThreshold()));
--      }
--
--      fileNodeProcessor.closeBufferWrite();
--      start3 = System.currentTimeMillis() - start3;
--      if (start3 > 1000) {
--        LOGGER.info("FileNodeManager.insertBufferWrite step-3, setCloseMark buffer insert cost: {}", start3);
--      }
--    }
--  }
--
--  /**
--   * update data.
--   */
--  public void update(String deviceId, String measurementId, long startTime, long endTime,
--      TSDataType type, String v)
--      throws FileNodeManagerException {
--
--    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
--    try {
--
--      long lastUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
--      if (startTime > lastUpdateTime) {
--        LOGGER.warn("The update range is error, startTime {} is great than lastUpdateTime {}",
--            startTime,
--            lastUpdateTime);
--        return;
--      }
--      long finalEndTime = endTime > lastUpdateTime ? lastUpdateTime : endTime;
--
--      String filenodeName = fileNodeProcessor.getProcessorName();
--      // get overflow processor
--      OverflowProcessor overflowProcessor;
--      try {
--        overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
--      } catch (ProcessorException e) {
--        LOGGER.error(
--            "Get the overflow processor failed, the filenode is {}, "
--                + "insert time range is from {} to {}",
--            filenodeName, startTime, finalEndTime);
--        throw new FileNodeManagerException(e);
--      }
--      overflowProcessor.update(deviceId, measurementId, startTime, finalEndTime, type, v);
--      // change the type of tsfile to overflowed
--      fileNodeProcessor.changeTypeToChanged(deviceId, startTime, finalEndTime);
--      fileNodeProcessor.setOverflowed(true);
--
--      // insert wal
--      try {
--        if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
--          overflowProcessor.getLogNode().write(
--              new UpdatePlan(startTime, finalEndTime, v, new Path(deviceId
--                  + "." + measurementId)));
--        }
--      } catch (IOException e) {
--        throw new FileNodeManagerException(e);
--      }
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--  }
--
--  /**
--   * delete data.
--   */
--  public void delete(String deviceId, String measurementId, long timestamp)
--      throws FileNodeManagerException {
--
--    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
--    try {
--      long lastUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
--      // no tsfile data, the delete operation is invalid
--      if (lastUpdateTime == -1) {
--        LOGGER.warn("The last update time is -1, delete overflow is invalid, "
--                + "the filenode processor is {}",
--            fileNodeProcessor.getProcessorName());
--      } else {
--        // insert wal
--        if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
--          // get processors for wal
--          String filenodeName = fileNodeProcessor.getProcessorName();
--          OverflowProcessor overflowProcessor;
--          BufferWriteProcessor bufferWriteProcessor;
--          try {
--            overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
--            // in case that no BufferWriteProcessor is available, a new BufferWriteProcessor is
--            // needed to access LogNode.
--            // TODO this may make the time range of the next TsFile a little wider
--            bufferWriteProcessor = fileNodeProcessor.getBufferWriteProcessor(filenodeName,
--                lastUpdateTime + 1);
--          } catch (ProcessorException e) {
--            LOGGER.error("Getting the processor failed, the filenode is {}, delete time is {}.",
--                filenodeName, timestamp);
--            throw new FileNodeManagerException(e);
--          }
--          try {
--            overflowProcessor.getLogNode().write(new DeletePlan(timestamp,
--                new Path(deviceId + "." + measurementId)));
--            bufferWriteProcessor.getLogNode().write(new DeletePlan(timestamp,
--                new Path(deviceId + "." + measurementId)));
--          } catch (IOException e) {
--            throw new FileNodeManagerException(e);
--          }
--        }
--
--        try {
--          fileNodeProcessor.delete(deviceId, measurementId, timestamp);
--        } catch (IOException e) {
--          throw new FileNodeManagerException(e);
--        }
--        // change the type of tsfile to overflowed
--        fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
--        fileNodeProcessor.setOverflowed(true);
--
--      }
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--  }
--
--  private void delete(String processorName,
--      Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator)
--      throws FileNodeManagerException {
--    if (!processorMap.containsKey(processorName)) {
--      //TODO do we need to call processorIterator.remove() ?
--      LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
--      return;
--    }
--    LOGGER.info("Try to delete the filenode processor {}.", processorName);
--    FileNodeProcessor processor = processorMap.get(processorName);
--    if (!processor.tryWriteLock()) {
--      throw new FileNodeManagerException(String
--          .format("Can't delete the filenode processor %s because Can't get the insert lock.",
--              processorName));
--    }
--
--    try {
--      if (!processor.canBeClosed()) {
--        LOGGER.warn("The filenode processor {} can't be deleted.", processorName);
--        return;
--      }
--
--      try {
--        LOGGER.info("Delete the filenode processor {}.", processorName);
--        processor.delete();
--        processorIterator.remove();
--      } catch (ProcessorException e) {
--        LOGGER.error("Delete the filenode processor {} by iterator error.", processorName, e);
--        throw new FileNodeManagerException(e);
--      }
--    } finally {
--      processor.writeUnlock();
--    }
--  }
--
--  /**
--   * Similar to delete(), but only deletes data in BufferWrite. Only used by WAL recovery.
--   */
--  public void deleteBufferWrite(String deviceId, String measurementId, long timestamp)
--      throws FileNodeManagerException {
--    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
--    try {
--      fileNodeProcessor.deleteBufferWrite(deviceId, measurementId, timestamp);
--    } catch (BufferWriteProcessorException | IOException e) {
--      throw new FileNodeManagerException(e);
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--    // change the type of tsfile to overflowed
--    fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
--    fileNodeProcessor.setOverflowed(true);
--  }
--
--  /**
--   * Similar to delete(), but only deletes data in Overflow. Only used by WAL recovery.
--   */
--  public void deleteOverflow(String deviceId, String measurementId, long timestamp)
--      throws FileNodeManagerException {
--    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
--    try {
--      fileNodeProcessor.deleteOverflow(deviceId, measurementId, timestamp);
--    } catch (ProcessorException e) {
--      throw new FileNodeManagerException(e);
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--    // change the type of tsfile to overflowed
--    fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
--    fileNodeProcessor.setOverflowed(true);
--  }
--
--  /**
--   * begin query.
--   *
--   * @param deviceId queried deviceId
--   * @return a query token for the device.
--   */
--  public int beginQuery(String deviceId) throws FileNodeManagerException {
--    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
--    try {
--      LOGGER.debug("Get the FileNodeProcessor: filenode is {}, begin query.",
--          fileNodeProcessor.getProcessorName());
--      return fileNodeProcessor.addMultiPassCount();
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--  }
--
--  /**
--   * query data.
--   */
--  public QueryDataSource query(SingleSeriesExpression seriesExpression, QueryContext context)
--      throws FileNodeManagerException {
--    String deviceId = seriesExpression.getSeriesPath().getDevice();
--    String measurementId = seriesExpression.getSeriesPath().getMeasurement();
--    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, false);
--    LOGGER.debug("Get the FileNodeProcessor: filenode is {}, query.",
--        fileNodeProcessor.getProcessorName());
--    try {
--      QueryDataSource queryDataSource;
--      // query operation must have overflow processor
--      if (!fileNodeProcessor.hasOverflowProcessor()) {
--        try {
--          fileNodeProcessor.getOverflowProcessor(fileNodeProcessor.getProcessorName());
--        } catch (ProcessorException e) {
--          LOGGER.error("Get the overflow processor failed, the filenode is {}, query is {},{}",
--              fileNodeProcessor.getProcessorName(), deviceId, measurementId);
--          throw new FileNodeManagerException(e);
--        }
--      }
--      try {
--        queryDataSource = fileNodeProcessor.query(deviceId, measurementId, context);
--      } catch (FileNodeProcessorException e) {
--        LOGGER.error("Query error: the deviceId {}, the measurementId {}", deviceId, measurementId,
--            e);
--        throw new FileNodeManagerException(e);
--      }
--      // return query structure
--      return queryDataSource;
--    } finally {
--      fileNodeProcessor.readUnlock();
--    }
--  }
--
--  /**
--   * end query.
--   */
--  public void endQuery(String deviceId, int token) throws FileNodeManagerException {
--
--    FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
--    try {
--      LOGGER.debug("Get the FileNodeProcessor: {} end query.",
--          fileNodeProcessor.getProcessorName());
--      fileNodeProcessor.decreaseMultiPassCount(token);
--    } catch (FileNodeProcessorException e) {
--      LOGGER.error("Failed to end query: the deviceId {}, token {}.", deviceId, token, e);
--      throw new FileNodeManagerException(e);
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--  }
--
--  /**
--   * Append one specified tsfile to the storage group. <b>This method is only provided for
--   * transmission module</b>
--   *
--   * @param fileNodeName the seriesPath of storage group
--   * @param appendFile the appended tsfile information
--   */
--  public boolean appendFileToFileNode(String fileNodeName, TsFileResource appendFile,
--      String appendFilePath) throws FileNodeManagerException {
--    FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
--    try {
--      // check append file
--      for (Map.Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
--        if (fileNodeProcessor.getLastUpdateTime(entry.getKey()) >= entry.getValue()) {
--          return false;
--        }
--      }
--      // setCloseMark bufferwrite file
--      fileNodeProcessor.closeBufferWrite();
--      // append file to storage group.
--      fileNodeProcessor.appendFile(appendFile, appendFilePath);
--    } catch (FileNodeProcessorException e) {
--      LOGGER.error("Cannot append the file {} to {}", appendFile.getFile().getAbsolutePath(), fileNodeName, e);
--      throw new FileNodeManagerException(e);
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--    return true;
--  }
--
--  /**
--   * get all overlap tsfiles which are conflict with the appendFile.
--   *
--   * @param fileNodeName the seriesPath of storage group
--   * @param appendFile the appended tsfile information
--   */
--  public List<String> getOverlapFilesFromFileNode(String fileNodeName, TsFileResource appendFile,
--      String uuid) throws FileNodeManagerException {
--    FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
--    List<String> overlapFiles;
--    try {
--      overlapFiles = fileNodeProcessor.getOverlapFiles(appendFile, uuid);
--    } catch (FileNodeProcessorException e) {
--      throw new FileNodeManagerException(e);
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--    return overlapFiles;
--  }
--
--  /**
--   * merge all overflowed filenode.
--   *
--   * @throws FileNodeManagerException FileNodeManagerException
--   */
--  public void mergeAll() throws FileNodeManagerException {
--    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
--      LOGGER.warn("Failed to merge all overflowed filenode, because filenode manager status is {}",
--          fileNodeManagerStatus);
--      return;
--    }
--
--    fileNodeManagerStatus = FileNodeManagerStatus.MERGE;
--    LOGGER.info("Start to merge all overflowed filenode");
--    List<String> allFileNodeNames;
--    try {
--      allFileNodeNames = MManager.getInstance().getAllFileNames();
--    } catch (PathErrorException e) {
--      LOGGER.error("Get all storage group seriesPath error,", e);
--      throw new FileNodeManagerException(e);
--    }
--    List<Future<?>> futureTasks = new ArrayList<>();
--    for (String fileNodeName : allFileNodeNames) {
--      FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
--      try {
--        Future<?> task = fileNodeProcessor.submitToMerge();
--        if (task != null) {
--          LOGGER.info("Submit the filenode {} to the merge pool", fileNodeName);
--          futureTasks.add(task);
--        }
--      } finally {
--        fileNodeProcessor.writeUnlock();
--      }
--    }
--    long totalTime = 0;
--    // loop waiting for merge to end, the longest waiting time is
--    // 60s.
--    int time = 2;
--    List<Exception> mergeException = new ArrayList<>();
--    for (Future<?> task : futureTasks) {
--      while (!task.isDone()) {
--        try {
--          LOGGER.info(
--              "Waiting for the end of merge, already waiting for {}s, "
--                  + "continue to wait anothor {}s",
--              totalTime, time);
--          TimeUnit.SECONDS.sleep(time);
--          totalTime += time;
--          time = updateWaitTime(time);
--        } catch (InterruptedException e) {
--          LOGGER.error("Unexpected interruption {}", e);
--          Thread.currentThread().interrupt();
--        }
--      }
--      try {
--        task.get();
--      } catch (InterruptedException e) {
--        LOGGER.error("Unexpected interruption {}", e);
--      } catch (ExecutionException e) {
--        mergeException.add(e);
--        LOGGER.error("The exception for merge: {}", e);
--      }
--    }
--    if (!mergeException.isEmpty()) {
--      // just throw the first exception
--      throw new FileNodeManagerException(mergeException.get(0));
--    }
--    fileNodeManagerStatus = FileNodeManagerStatus.NONE;
--    LOGGER.info("End to merge all overflowed filenode");
--  }
--
--  private int updateWaitTime(int time) {
--    return time < 32 ? time * 2 : 60;
--  }
--
--  /**
--   * delete one filenode.
--   */
--  public void deleteOneFileNode(String processorName) throws FileNodeManagerException {
--    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
--      return;
--    }
--
--    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
--    try {
--      if (processorMap.containsKey(processorName)) {
--        deleteFileNodeBlocked(processorName);
--      }
--      String fileNodePath = TsFileDBConf.getFileNodeDir();
--      fileNodePath = standardizeDir(fileNodePath) + processorName;
--      FileUtils.deleteDirectory(new File(fileNodePath));
--
--      cleanBufferWrite(processorName);
--
--      MultiFileLogNodeManager.getInstance()
--          .deleteNode(processorName + IoTDBConstant.BUFFERWRITE_LOG_NODE_SUFFIX);
--      MultiFileLogNodeManager.getInstance()
--          .deleteNode(processorName + IoTDBConstant.OVERFLOW_LOG_NODE_SUFFIX);
--    } catch (IOException e) {
--      LOGGER.error("Delete the filenode processor {} error.", processorName, e);
--      throw new FileNodeManagerException(e);
--    } finally {
--      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
--    }
--  }
--
--  private void cleanBufferWrite(String processorName) throws IOException {
-     List<String> bufferwritePathList = DIRECTORY_MANAGER.getAllTsFileFolders();
 -    List<String> bufferwritePathList = directories.getAllTsFileFolders();
--    for (String bufferwritePath : bufferwritePathList) {
--      bufferwritePath = standardizeDir(bufferwritePath) + processorName;
--      File bufferDir = new File(bufferwritePath);
--      // free and setCloseMark the streams under this bufferwrite directory
--      if (!bufferDir.exists()) {
--        continue;
--      }
--      File[] bufferFiles = bufferDir.listFiles();
--      if (bufferFiles != null) {
--        for (File bufferFile : bufferFiles) {
--          FileReaderManager.getInstance().closeFileAndRemoveReader(bufferFile.getPath());
--        }
--      }
--      FileUtils.deleteDirectory(new File(bufferwritePath));
--    }
--  }
--
--  private void deleteFileNodeBlocked(String processorName) throws FileNodeManagerException {
--    LOGGER.info("Forced to delete the filenode processor {}", processorName);
--    FileNodeProcessor processor = processorMap.get(processorName);
--    while (true) {
--      if (processor.tryWriteLock()) {
--        try {
--          if (processor.canBeClosed()) {
--            LOGGER.info("Delete the filenode processor {}.", processorName);
--            processor.delete();
--            processorMap.remove(processorName);
--            break;
--          } else {
--            LOGGER.info(
--                "Can't delete the filenode processor {}, "
--                    + "because the filenode processor can't be closed."
--                    + " Wait 100ms to retry");
--          }
--        } catch (ProcessorException e) {
--          LOGGER.error("Delete the filenode processor {} error.", processorName, e);
--          throw new FileNodeManagerException(e);
--        } finally {
--          processor.writeUnlock();
--        }
--      } else {
--        LOGGER.info(
--            "Can't delete the filenode processor {}, because it can't get the insert lock."
--                + " Wait 100ms to retry", processorName);
--      }
--      try {
--        TimeUnit.MILLISECONDS.sleep(100);
--      } catch (InterruptedException e) {
--        LOGGER.error(e.getMessage());
--        Thread.currentThread().interrupt();
--      }
--    }
--  }
--
--  private String standardizeDir(String originalPath) {
--    String res = originalPath;
--    if ((originalPath.length() > 0
--        && originalPath.charAt(originalPath.length() - 1) != File.separatorChar)
--        || originalPath.length() == 0) {
--      res = originalPath + File.separatorChar;
--    }
--    return res;
--  }
--
--  /**
--   * add time series.
--   */
--  public void addTimeSeries(Path path, TSDataType dataType, TSEncoding encoding,
--      CompressionType compressor,
--      Map<String, String> props) throws FileNodeManagerException {
--    FileNodeProcessor fileNodeProcessor = getProcessor(path.getFullPath(), true);
--    try {
--      fileNodeProcessor.addTimeSeries(path.getMeasurement(), dataType, encoding, compressor, props);
--    } finally {
--      fileNodeProcessor.writeUnlock();
--    }
--  }
--
--
--  /**
--   * Force to setCloseMark the filenode processor.
--   */
--  public void closeOneFileNode(String processorName) throws FileNodeManagerException {
--    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
--      return;
--    }
--
--    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
--    try {
--      LOGGER.info("Force to setCloseMark the filenode processor {}.", processorName);
--      while (!closeOneProcessor(processorName)) {
--        try {
--          LOGGER.info("Can't force to setCloseMark the filenode processor {}, wait 100ms to retry",
--              processorName);
--          TimeUnit.MILLISECONDS.sleep(100);
--        } catch (InterruptedException e) {
--          // ignore the interrupted exception
--          LOGGER.error("Unexpected interruption {}", e);
--          Thread.currentThread().interrupt();
--        }
--      }
--    } finally {
--      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
--    }
--  }
--
--
--  /**
--   * try to setCloseMark the filenode processor. The name of filenode processor is processorName
--   * notice: this method has the same function with close()
--   */
--  private boolean closeOneProcessor(String processorName) throws FileNodeManagerException {
--    if (!processorMap.containsKey(processorName)) {
--      return true;
--    }
--
--    Processor processor = processorMap.get(processorName);
--    if (processor.tryWriteLock()) {
--      try {
--        if (processor.canBeClosed()) {
--          processor.close();
--          return true;
--        } else {
--          return false;
--        }
--      } catch (ProcessorException e) {
--        LOGGER.error("Close the filenode processor {} error.", processorName, e);
--        throw new FileNodeManagerException(e);
--      } finally {
--        processor.writeUnlock();
--      }
--    } else {
--      return false;
--    }
--  }
--
--  /**
--   * try to setCloseMark the filenode processor.
--   * notice: This method has the same function with closeOneProcessor()
--   */
--  private void close(String processorName) throws FileNodeManagerException {
--    if (!processorMap.containsKey(processorName)) {
--      LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
--      return;
--    }
--    LOGGER.info("Try to setCloseMark the filenode processor {}.", processorName);
--    FileNodeProcessor processor = processorMap.get(processorName);
--    if (!processor.tryWriteLock()) {
--      LOGGER.warn("Can't get the insert lock of the filenode processor {}.", processorName);
--      return;
--    }
--    try {
--      if (processor.canBeClosed()) {
--        try {
--          LOGGER.info("Close the filenode processor {}.", processorName);
--          processor.close();
--        } catch (ProcessorException e) {
--          LOGGER.error("Close the filenode processor {} error.", processorName, e);
--          throw new FileNodeManagerException(e);
--        }
--      } else {
--        LOGGER.warn("The filenode processor {} can't be closed.", processorName);
--      }
--    } finally {
--      processor.writeUnlock();
--    }
--  }
--
--  /**
--   * delete all filenode.
--   */
--  public synchronized boolean deleteAll() throws FileNodeManagerException {
--    LOGGER.info("Start deleting all filenode");
--    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
--      LOGGER.info("Failed to delete all filenode processor because of merge operation");
--      return false;
--    }
--
--    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
--    try {
--      Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator = processorMap.entrySet()
--          .iterator();
--      while (processorIterator.hasNext()) {
--        Map.Entry<String, FileNodeProcessor> processorEntry = processorIterator.next();
--        delete(processorEntry.getKey(), processorIterator);
--      }
--      return processorMap.isEmpty();
--    } finally {
--      LOGGER.info("Deleting all FileNodeProcessors ends");
--      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
--    }
--  }
--
--  /**
--   * Try to setCloseMark All.
--   */
--  public void closeAll() throws FileNodeManagerException {
--    LOGGER.info("Start closing all filenode processor");
--    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
--      LOGGER.info("Failed to setCloseMark all filenode processor because of merge operation");
--      return;
--    }
--    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
--    try {
--      for (Map.Entry<String, FileNodeProcessor> processorEntry : processorMap.entrySet()) {
--        close(processorEntry.getKey());
--      }
--    } finally {
--      LOGGER.info("Close all FileNodeProcessors ends");
--      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
--    }
--  }
--
--  /**
--   * force flushMetadata to control memory usage.
--   */
--  public void forceFlush(BasicMemController.UsageLevel level) {
--    // you may add some delicate process like below
--    // or you could provide multiple methods for different urgency
--    switch (level) {
--      // only select the most urgent (most active or biggest in size)
--      // processors to flushMetadata
--      // only select top 10% active memory user to flushMetadata
--      case WARNING:
--        try {
--          flushTop(0.1f);
--        } catch (IOException e) {
--          LOGGER.error("force flushMetadata memory data error: {}", e);
--        }
--        break;
--      // force all processors to flushMetadata
--      case DANGEROUS:
--        try {
--          flushAll();
--        } catch (IOException e) {
--          LOGGER.error("force flushMetadata memory data error: {}", e);
--        }
--        break;
--      // if the flushMetadata thread pool is not full ( or half full), start a new
--      // flushMetadata task
--      case SAFE:
--        if (FlushPoolManager.getInstance().getActiveCnt() < 0.5 * FlushPoolManager.getInstance()
--            .getThreadCnt()) {
--          try {
--            flushTop(0.01f);
--          } catch (IOException e) {
--            LOGGER.error("force flushMetadata memory data error: ", e);
--          }
--        }
--        break;
--      default:
--    }
--  }
--
--  private void flushAll() throws IOException {
--    for (FileNodeProcessor processor : processorMap.values()) {
--      if (!processor.tryLock(true)) {
--        continue;
--      }
--      try {
--        boolean isMerge = processor.flush().isHasOverflowFlushTask();
--        if (isMerge) {
--          processor.submitToMerge();
--        }
--      } finally {
--        processor.unlock(true);
--      }
--    }
--  }
--
--  private void flushTop(float percentage) throws IOException {
--    List<FileNodeProcessor> tempProcessors = new ArrayList<>(processorMap.values());
--    // sort the tempProcessors as descending order
--    tempProcessors.sort((o1, o2) -> (int) (o2.memoryUsage() - o1.memoryUsage()));
--    int flushNum =
--        (int) (tempProcessors.size() * percentage) > 1
--            ? (int) (tempProcessors.size() * percentage)
--            : 1;
--    for (int i = 0; i < flushNum && i < tempProcessors.size(); i++) {
--      FileNodeProcessor processor = tempProcessors.get(i);
--      // 64M
--      if (processor.memoryUsage() <= TSFileConfig.groupSizeInByte / 2) {
--        continue;
--      }
--      long start = System.currentTimeMillis();
--      processor.writeLock();
--      try {
--        boolean isMerge = processor.flush().isHasOverflowFlushTask();
--        if (isMerge) {
--          processor.submitToMerge();
--        }
--      } finally {
--        processor.writeUnlock();
--      }
--      start = System.currentTimeMillis() - start;
--      LOGGER.info("flushMetadata Top cost: {}", start);
--    }
--  }
--
--  @Override
--  public void start() {
--    // do no thing
--  }
--
--  @Override
--  public void stop() {
--    try {
--      closeAll();
--    } catch (FileNodeManagerException e) {
--      LOGGER.error("Failed to setCloseMark file node manager because .", e);
--    }
--
--    boolean notFinished = true;
--    while (notFinished) {
--      int size = 0;
--      for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
--        size += fileNodeProcessor.getClosingBufferWriteProcessor().size();
--      }
--      if (size == 0) {
--        notFinished = false;
--      } else {
--        try {
--          Thread.sleep(10);
--        } catch (InterruptedException e) {
--          LOGGER.error("File node Manager Stop process is interrupted", e);
--        }
--      }
--    }
--    closedProcessorCleaner.shutdownNow();
--  }
--
--  @Override
--  public ServiceType getID() {
--    return ServiceType.FILE_NODE_SERVICE;
--  }
--
--  private enum FileNodeManagerStatus {
--    NONE, MERGE, CLOSE
--  }
--
--  private static class FileNodeManagerHolder {
--
--    private FileNodeManagerHolder() {
--    }
--
--    private static final FileNodeManager INSTANCE = new FileNodeManager(
--        TsFileDBConf.getFileNodeDir());
--  }
--
--}
--
--
--
++//      if (size == 0) {
++//        notFinished = false;
++//      } else {
++//        try {
++//          Thread.sleep(10);
++//        } catch (InterruptedException e) {
++//          LOGGER.error("File node Manager Stop process is interrupted", e);
++//        }
++//      }
++//    }
++//    closedProcessorCleaner.shutdownNow();
++//  }
++//
++//  @Override
++//  public ServiceType getID() {
++//    return ServiceType.FILE_NODE_SERVICE;
++//  }
++//
++//  private enum FileNodeManagerStatus {
++//    NONE, MERGE, CLOSE
++//  }
++//
++//  private static class FileNodeManagerHolder {
++//
++//    private FileNodeManagerHolder() {
++//    }
++//
++//    private static final FileNodeManager INSTANCE = new FileNodeManager(
++//        TsFileDBConf.getFileNodeDir());
++//  }
++//
++//}
++//
++//
++//
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
index 1869aad,a61edc9..7eca2ce
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
@@@ -1,2130 -1,2130 +1,2130 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *      http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.filenode;
--
--import static java.time.ZonedDateTime.ofInstant;
--
--import java.io.File;
--import java.io.FileInputStream;
--import java.io.FileOutputStream;
--import java.io.IOException;
--import java.nio.file.FileSystems;
--import java.nio.file.Files;
--import java.time.Instant;
--import java.time.ZoneId;
--import java.time.ZonedDateTime;
--import java.util.ArrayList;
--import java.util.HashMap;
--import java.util.HashSet;
--import java.util.Iterator;
--import java.util.List;
--import java.util.Map;
--import java.util.Map.Entry;
--import java.util.Objects;
--import java.util.Set;
--import java.util.concurrent.CountDownLatch;
--import java.util.concurrent.ExecutionException;
--import java.util.concurrent.Future;
--import java.util.concurrent.TimeUnit;
--import java.util.concurrent.TimeoutException;
--import java.util.concurrent.atomic.AtomicInteger;
--import java.util.concurrent.atomic.AtomicLong;
--import java.util.concurrent.locks.ReentrantLock;
--import java.util.function.Consumer;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBConstant;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
- import org.apache.iotdb.db.conf.directories.DirectoryManager;
 -import org.apache.iotdb.db.conf.directories.Directories;
--import org.apache.iotdb.db.engine.Processor;
--import org.apache.iotdb.db.engine.bufferwrite.Action;
--import org.apache.iotdb.db.engine.bufferwrite.ActionException;
--import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
--import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
--import org.apache.iotdb.db.engine.modification.Deletion;
--import org.apache.iotdb.db.engine.modification.Modification;
--import org.apache.iotdb.db.engine.modification.ModificationFile;
--import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
--import org.apache.iotdb.db.engine.pool.MergePoolManager;
--import org.apache.iotdb.db.engine.querycontext.GlobalSortedSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.OverflowInsertFile;
--import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
--import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
--import org.apache.iotdb.db.engine.querycontext.UnsealedTsFile;
--import org.apache.iotdb.db.engine.version.SimpleFileVersionController;
--import org.apache.iotdb.db.engine.version.VersionController;
--import org.apache.iotdb.db.exception.BufferWriteProcessorException;
--import org.apache.iotdb.db.exception.ErrorDebugException;
--import org.apache.iotdb.db.exception.FileNodeProcessorException;
--import org.apache.iotdb.db.exception.OverflowProcessorException;
--import org.apache.iotdb.db.exception.PathErrorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.metadata.MManager;
--import org.apache.iotdb.db.monitor.IStatistic;
--import org.apache.iotdb.db.monitor.MonitorConstants;
--import org.apache.iotdb.db.monitor.StatMonitor;
--import org.apache.iotdb.db.query.context.QueryContext;
--import org.apache.iotdb.db.query.control.FileReaderManager;
--import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
--import org.apache.iotdb.db.query.reader.IReader;
--import org.apache.iotdb.db.sync.conf.Constans;
--import org.apache.iotdb.db.utils.ImmediateFuture;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.db.utils.QueryUtils;
--import org.apache.iotdb.db.utils.TimeValuePair;
--import org.apache.iotdb.db.writelog.recover.SeqTsFileRecoverPerformer;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
--import org.apache.iotdb.tsfile.file.footer.ChunkGroupFooter;
--import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
--import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
--import org.apache.iotdb.tsfile.read.common.Path;
--import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
--import org.apache.iotdb.tsfile.read.filter.TimeFilter;
--import org.apache.iotdb.tsfile.read.filter.basic.Filter;
--import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory;
--import org.apache.iotdb.tsfile.utils.Pair;
--import org.apache.iotdb.tsfile.write.chunk.ChunkBuffer;
--import org.apache.iotdb.tsfile.write.chunk.ChunkWriterImpl;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.LongDataPoint;
--import org.apache.iotdb.tsfile.write.schema.FileSchema;
--import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
--import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
--import org.slf4j.Logger;
--import org.slf4j.LoggerFactory;
--
--public class FileNodeProcessor extends Processor implements IStatistic {
--
--  private static final String WARN_NO_SUCH_OVERFLOWED_FILE = "Can not find any tsfile which"
--      + " will be overflowed in the filenode processor {}, ";
--  public static final String RESTORE_FILE_SUFFIX = ".restore";
--  private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeProcessor.class);
--  private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
--  private static final MManager mManager = MManager.getInstance();
-   private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
 -  private static final Directories directories = Directories.getInstance();
--  private final String statStorageDeltaName;
--  private final HashMap<String, AtomicLong> statParamsHashMap = new HashMap<>();
--  /**
--   * Used to keep the oldest timestamp for each deviceId. The key is deviceId.
--   */
--  private volatile boolean isOverflowed;
--  private Map<String, Long> lastUpdateTimeMap;
--  private Map<String, Long> flushLastUpdateTimeMap;
--  private Map<String, List<TsFileResource>> invertedIndexOfFiles;
--  private TsFileResource emptyTsFileResource;
--//  private TsFileResourceV2 currentTsFileResource;
--  private List<TsFileResource> newFileNodes;
--  private FileNodeProcessorStatus isMerging;
--
--  /**
--   * this is used when work->merge operation
--   */
--  private int numOfMergeFile;
--  private FileNodeProcessorStore fileNodeProcessorStore;
--  private String fileNodeRestoreFilePath;
--  private final Object fileNodeRestoreLock = new Object();
--
--  /**
--   * last merge time
--   */
--  private long lastMergeTime = -1;
--  private BufferWriteProcessor bufferWriteProcessor = null;
--
--  //the bufferwrite Processors that are closing. (Because they are not closed well,
--  // their memtable are not released and we have to query data from them.
--  //private ConcurrentSkipListSet<BufferWriteProcessor> closingBufferWriteProcessor = new ConcurrentSkipListSet<>();
--  private CopyOnReadLinkedList<BufferWriteProcessor> closingBufferWriteProcessor = new CopyOnReadLinkedList<>();
--
--  private OverflowProcessor overflowProcessor = null;
--  private Set<Integer> oldMultiPassTokenSet = null;
--  private Set<Integer> newMultiPassTokenSet = new HashSet<>();
--
--  /**
--   * Represent the number of old queries that have not ended.
--   * This parameter only decreases but not increase.
--   */
--  private CountDownLatch oldMultiPassCount = null;
--
--  /**
--   * Represent the number of new queries that have not ended.
--   */
--  private AtomicInteger newMultiPassCount = new AtomicInteger(0);
--
--  /**
--   * statistic monitor parameters
--   */
--  private Map<String, Action> parameters;
--  private FileSchema fileSchema;
--
--  private Action fileNodeFlushAction = () -> {
--    synchronized (fileNodeProcessorStore) {
--      try {
--        writeStoreToDisk(fileNodeProcessorStore);
--      } catch (FileNodeProcessorException e) {
--        throw new ActionException(e);
--      }
--    }
--  };
--
--  private Action bufferwriteFlushAction = () -> {
--    // update the lastUpdateTime Notice: Thread safe
--    synchronized (fileNodeProcessorStore) {
--      // deep copy
--      Map<String, Long> tempLastUpdateMap = new HashMap<>(lastUpdateTimeMap);
--      // update flushLastUpdateTimeMap
--      for (Entry<String, Long> entry : lastUpdateTimeMap.entrySet()) {
--        flushLastUpdateTimeMap.put(entry.getKey(), entry.getValue() + 1);
--      }
--      fileNodeProcessorStore.setLastUpdateTimeMap(tempLastUpdateMap);
--    }
--  };
--
--//  private Action bufferwriteCloseAction = new Action() {
++///**
++// * Licensed to the Apache Software Foundation (ASF) under one
++// * or more contributor license agreements.  See the NOTICE file
++// * distributed with this work for additional information
++// * regarding copyright ownership.  The ASF licenses this file
++// * to you under the Apache License, Version 2.0 (the
++// * "License"); you may not use this file except in compliance
++// * with the License.  You may obtain a copy of the License at
++// *
++// *      http://www.apache.org/licenses/LICENSE-2.0
++// *
++// * Unless required by applicable law or agreed to in writing,
++// * software distributed under the License is distributed on an
++// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++// * KIND, either express or implied.  See the License for the
++// * specific language governing permissions and limitations
++// * under the License.
++// */
++//package org.apache.iotdb.db.engine.filenode;
  //
--//    @Override
--//    public void act() {
--//      synchronized (fileNodeProcessorStore) {
--//        fileNodeProcessorStore.setLatestFlushTimeForEachDevice(lastUpdateTimeMap);
--//        addLastTimeToIntervalFile();
--//        fileNodeProcessorStore.setSequenceFileList(newFileNodes);
++//import static java.time.ZonedDateTime.ofInstant;
++//
++//import java.io.File;
++//import java.io.FileInputStream;
++//import java.io.FileOutputStream;
++//import java.io.IOException;
++//import java.nio.file.FileSystems;
++//import java.nio.file.Files;
++//import java.time.Instant;
++//import java.time.ZoneId;
++//import java.time.ZonedDateTime;
++//import java.util.ArrayList;
++//import java.util.HashMap;
++//import java.util.HashSet;
++//import java.util.Iterator;
++//import java.util.List;
++//import java.util.Map;
++//import java.util.Map.Entry;
++//import java.util.Objects;
++//import java.util.Set;
++//import java.util.concurrent.CountDownLatch;
++//import java.util.concurrent.ExecutionException;
++//import java.util.concurrent.Future;
++//import java.util.concurrent.TimeUnit;
++//import java.util.concurrent.TimeoutException;
++//import java.util.concurrent.atomic.AtomicInteger;
++//import java.util.concurrent.atomic.AtomicLong;
++//import java.util.concurrent.locks.ReentrantLock;
++//import java.util.function.Consumer;
++//import org.apache.iotdb.db.conf.IoTDBConfig;
++//import org.apache.iotdb.db.conf.IoTDBConstant;
++//import org.apache.iotdb.db.conf.IoTDBDescriptor;
++//import org.apache.iotdb.db.conf.directories.DirectoryManager;
++//import org.apache.iotdb.db.engine.Processor;
++//import org.apache.iotdb.db.engine.bufferwrite.Action;
++//import org.apache.iotdb.db.engine.bufferwrite.ActionException;
++//import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
++//import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
++//import org.apache.iotdb.db.engine.modification.Deletion;
++//import org.apache.iotdb.db.engine.modification.Modification;
++//import org.apache.iotdb.db.engine.modification.ModificationFile;
++//import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
++//import org.apache.iotdb.db.engine.pool.MergePoolManager;
++//import org.apache.iotdb.db.engine.querycontext.GlobalSortedSeriesDataSource;
++//import org.apache.iotdb.db.engine.querycontext.OverflowInsertFile;
++//import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
++//import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
++//import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
++//import org.apache.iotdb.db.engine.querycontext.UnsealedTsFile;
++//import org.apache.iotdb.db.engine.version.SimpleFileVersionController;
++//import org.apache.iotdb.db.engine.version.VersionController;
++//import org.apache.iotdb.db.exception.BufferWriteProcessorException;
++//import org.apache.iotdb.db.exception.ErrorDebugException;
++//import org.apache.iotdb.db.exception.FileNodeProcessorException;
++//import org.apache.iotdb.db.exception.OverflowProcessorException;
++//import org.apache.iotdb.db.exception.PathErrorException;
++//import org.apache.iotdb.db.exception.ProcessorException;
++//import org.apache.iotdb.db.metadata.MManager;
++//import org.apache.iotdb.db.monitor.IStatistic;
++//import org.apache.iotdb.db.monitor.MonitorConstants;
++//import org.apache.iotdb.db.monitor.StatMonitor;
++//import org.apache.iotdb.db.query.context.QueryContext;
++//import org.apache.iotdb.db.query.control.FileReaderManager;
++//import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
++//import org.apache.iotdb.db.query.reader.IReader;
++//import org.apache.iotdb.db.sync.conf.Constans;
++//import org.apache.iotdb.db.utils.ImmediateFuture;
++//import org.apache.iotdb.db.utils.MemUtils;
++//import org.apache.iotdb.db.utils.QueryUtils;
++//import org.apache.iotdb.db.utils.TimeValuePair;
++//import org.apache.iotdb.db.writelog.recover.SeqTsFileRecoverPerformer;
++//import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
++//import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
++//import org.apache.iotdb.tsfile.file.footer.ChunkGroupFooter;
++//import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
++//import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
++//import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
++//import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
++//import org.apache.iotdb.tsfile.read.common.Path;
++//import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
++//import org.apache.iotdb.tsfile.read.filter.TimeFilter;
++//import org.apache.iotdb.tsfile.read.filter.basic.Filter;
++//import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory;
++//import org.apache.iotdb.tsfile.utils.Pair;
++//import org.apache.iotdb.tsfile.write.chunk.ChunkBuffer;
++//import org.apache.iotdb.tsfile.write.chunk.ChunkWriterImpl;
++//import org.apache.iotdb.tsfile.write.record.TSRecord;
++//import org.apache.iotdb.tsfile.write.record.datapoint.LongDataPoint;
++//import org.apache.iotdb.tsfile.write.schema.FileSchema;
++//import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
++//import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
++//import org.slf4j.Logger;
++//import org.slf4j.LoggerFactory;
++//
++//public class FileNodeProcessor extends Processor implements IStatistic {
++//
++//  private static final String WARN_NO_SUCH_OVERFLOWED_FILE = "Can not find any tsfile which"
++//      + " will be overflowed in the filenode processor {}, ";
++//  public static final String RESTORE_FILE_SUFFIX = ".restore";
++//  private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeProcessor.class);
++//  private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
++//  private static final MManager mManager = MManager.getInstance();
++//  private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
++//  private final String statStorageDeltaName;
++//  private final HashMap<String, AtomicLong> statParamsHashMap = new HashMap<>();
++//  /**
++//   * Used to keep the oldest timestamp for each deviceId. The key is deviceId.
++//   */
++//  private volatile boolean isOverflowed;
++//  private Map<String, Long> lastUpdateTimeMap;
++//  private Map<String, Long> flushLastUpdateTimeMap;
++//  private Map<String, List<TsFileResource>> invertedIndexOfFiles;
++//  private TsFileResource emptyTsFileResource;
++////  private TsFileResourceV2 currentTsFileResource;
++//  private List<TsFileResource> newFileNodes;
++//  private FileNodeProcessorStatus isMerging;
++//
++//  /**
++//   * this is used when work->merge operation
++//   */
++//  private int numOfMergeFile;
++//  private FileNodeProcessorStore fileNodeProcessorStore;
++//  private String fileNodeRestoreFilePath;
++//  private final Object fileNodeRestoreLock = new Object();
++//
++//  /**
++//   * last merge time
++//   */
++//  private long lastMergeTime = -1;
++//  private BufferWriteProcessor bufferWriteProcessor = null;
++//
++//  //the bufferwrite Processors that are closing. (Because they are not closed well,
++//  // their memtable are not released and we have to query data from them.
++//  //private ConcurrentSkipListSet<BufferWriteProcessor> closingBufferWriteProcessor = new ConcurrentSkipListSet<>();
++//  private CopyOnReadLinkedList<BufferWriteProcessor> closingBufferWriteProcessor = new CopyOnReadLinkedList<>();
++//
++//  private OverflowProcessor overflowProcessor = null;
++//  private Set<Integer> oldMultiPassTokenSet = null;
++//  private Set<Integer> newMultiPassTokenSet = new HashSet<>();
++//
++//  /**
++//   * Represent the number of old queries that have not ended.
++//   * This parameter only decreases but not increase.
++//   */
++//  private CountDownLatch oldMultiPassCount = null;
++//
++//  /**
++//   * Represent the number of new queries that have not ended.
++//   */
++//  private AtomicInteger newMultiPassCount = new AtomicInteger(0);
++//
++//  /**
++//   * statistic monitor parameters
++//   */
++//  private Map<String, Action> parameters;
++//  private FileSchema fileSchema;
++//
++//  private Action fileNodeFlushAction = () -> {
++//    synchronized (fileNodeProcessorStore) {
++//      try {
++//        writeStoreToDisk(fileNodeProcessorStore);
++//      } catch (FileNodeProcessorException e) {
++//        throw new ActionException(e);
++//      }
++//    }
++//  };
++//
++//  private Action bufferwriteFlushAction = () -> {
++//    // update the lastUpdateTime Notice: Thread safe
++//    synchronized (fileNodeProcessorStore) {
++//      // deep copy
++//      Map<String, Long> tempLastUpdateMap = new HashMap<>(lastUpdateTimeMap);
++//      // update flushLastUpdateTimeMap
++//      for (Entry<String, Long> entry : lastUpdateTimeMap.entrySet()) {
++//        flushLastUpdateTimeMap.put(entry.getKey(), entry.getValue() + 1);
  //      }
++//      fileNodeProcessorStore.setLastUpdateTimeMap(tempLastUpdateMap);
  //    }
++//  };
  //
--//    private void addLastTimeToIntervalFile() {
++////  private Action bufferwriteCloseAction = new Action() {
++////
++////    @Override
++////    public void act() {
++////      synchronized (fileNodeProcessorStore) {
++////        fileNodeProcessorStore.setLatestFlushTimeForEachDevice(lastUpdateTimeMap);
++////        addLastTimeToIntervalFile();
++////        fileNodeProcessorStore.setSequenceFileList(newFileNodes);
++////      }
++////    }
++////
++////    private void addLastTimeToIntervalFile() {
++////
++////      if (!newFileNodes.isEmpty()) {
++////        // end time with one start time
++////        Map<String, Long> endTimeMap = new HashMap<>();
++////        for (Entry<String, Long> startTime : currentTsFileResource.getStartTimeMap().entrySet()) {
++////          String deviceId = startTime.getKey();
++////          endTimeMap.put(deviceId, lastUpdateTimeMap.get(deviceId));
++////        }
++////        currentTsFileResource.setEndTimeMap(endTimeMap);
++////      }
++////    }
++////  };
++//
++//  private Consumer<BufferWriteProcessor> bufferwriteCloseConsumer = (bwProcessor) -> {
++//    synchronized (fileNodeProcessorStore) {
++//      fileNodeProcessorStore.setLastUpdateTimeMap(lastUpdateTimeMap);
  //
  //      if (!newFileNodes.isEmpty()) {
  //        // end time with one start time
  //        Map<String, Long> endTimeMap = new HashMap<>();
--//        for (Entry<String, Long> startTime : currentTsFileResource.getStartTimeMap().entrySet()) {
++//        TsFileResource resource = bwProcessor.getCurrentTsFileResource();
++//        for (Entry<String, Long> startTime : resource.getStartTimeMap().entrySet()) {
  //          String deviceId = startTime.getKey();
  //          endTimeMap.put(deviceId, lastUpdateTimeMap.get(deviceId));
  //        }
--//        currentTsFileResource.setEndTimeMap(endTimeMap);
++//        resource.setEndTimeMap(endTimeMap);
  //      }
++//      fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++//    }
++//  };
++//
++//
++//  private Action overflowFlushAction = () -> {
++//
++//    // update the new TsFileResourceV2 List and emptyIntervalFile.
++//    // Notice: thread safe
++//    synchronized (fileNodeProcessorStore) {
++//      fileNodeProcessorStore.setOverflowed(isOverflowed);
++//      fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
++//      fileNodeProcessorStore.setNewFileNodes(newFileNodes);
  //    }
  //  };
--
--  private Consumer<BufferWriteProcessor> bufferwriteCloseConsumer = (bwProcessor) -> {
--    synchronized (fileNodeProcessorStore) {
--      fileNodeProcessorStore.setLastUpdateTimeMap(lastUpdateTimeMap);
--
--      if (!newFileNodes.isEmpty()) {
--        // end time with one start time
--        Map<String, Long> endTimeMap = new HashMap<>();
--        TsFileResource resource = bwProcessor.getCurrentTsFileResource();
--        for (Entry<String, Long> startTime : resource.getStartTimeMap().entrySet()) {
--          String deviceId = startTime.getKey();
--          endTimeMap.put(deviceId, lastUpdateTimeMap.get(deviceId));
--        }
--        resource.setEndTimeMap(endTimeMap);
--      }
--      fileNodeProcessorStore.setNewFileNodes(newFileNodes);
--    }
--  };
--
--
--  private Action overflowFlushAction = () -> {
--
--    // update the new TsFileResourceV2 List and emptyIntervalFile.
--    // Notice: thread safe
--    synchronized (fileNodeProcessorStore) {
--      fileNodeProcessorStore.setOverflowed(isOverflowed);
--      fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
--      fileNodeProcessorStore.setNewFileNodes(newFileNodes);
--    }
--  };
--  // Token for query which used to
--  private int multiPassLockToken = 0;
--  private VersionController versionController;
--  private ReentrantLock mergeDeleteLock = new ReentrantLock();
--
--  /**
--   * This is the modification file of the result of the current merge.
--   */
--  private ModificationFile mergingModification;
--
--  private TsFileIOWriter mergeFileWriter = null;
--  private String mergeOutputPath = null;
--  private String mergeBaseDir = null;
--  private String mergeFileName = null;
--  private boolean mergeIsChunkGroupHasData = false;
--  private long mergeStartPos;
--
--  /**
--   * constructor of FileNodeProcessor.
--   */
--  FileNodeProcessor(String fileNodeDirPath, String processorName)
--      throws FileNodeProcessorException {
--    super(processorName);
--    for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
--        MonitorConstants.FileNodeProcessorStatConstants.values()) {
--      statParamsHashMap.put(statConstant.name(), new AtomicLong(0));
--    }
--    statStorageDeltaName =
--        MonitorConstants.STAT_STORAGE_GROUP_PREFIX + MonitorConstants.MONITOR_PATH_SEPARATOR
--            + MonitorConstants.FILE_NODE_PATH + MonitorConstants.MONITOR_PATH_SEPARATOR
--            + processorName.replaceAll("\\.", "_");
--
--    this.parameters = new HashMap<>();
--    String dirPath = fileNodeDirPath;
--    if (dirPath.length() > 0
--        && dirPath.charAt(dirPath.length() - 1) != File.separatorChar) {
--      dirPath = dirPath + File.separatorChar;
--    }
--
--    File restoreFolder = new File(dirPath + processorName);
--    if (!restoreFolder.exists()) {
--      restoreFolder.mkdirs();
--      LOGGER.info(
--          "The restore directory of the filenode processor {} doesn't exist. Create new " +
--              "directory {}",
--          getProcessorName(), restoreFolder.getAbsolutePath());
--    }
--    fileNodeRestoreFilePath = new File(restoreFolder, processorName + RESTORE_FILE_SUFFIX)
--        .getPath();
--    try {
--      fileNodeProcessorStore = readStoreFromDisk();
--    } catch (FileNodeProcessorException e) {
--      LOGGER.error(
--          "The fileNode processor {} encountered an error when recoverying restore " +
--              "information.", processorName);
--      throw new FileNodeProcessorException(e);
--    }
--    // TODO deep clone the lastupdate time
--    emptyTsFileResource = fileNodeProcessorStore.getEmptyTsFileResource();
--    newFileNodes = fileNodeProcessorStore.getNewFileNodes();
--    isMerging = fileNodeProcessorStore.getFileNodeProcessorStatus();
--    numOfMergeFile = fileNodeProcessorStore.getNumOfMergeFile();
--    invertedIndexOfFiles = new HashMap<>();
--
--    // construct the fileschema
--    try {
--      this.fileSchema = constructFileSchema(processorName);
--    } catch (WriteProcessException e) {
--      throw new FileNodeProcessorException(e);
--    }
--
--    recover();
--
--    // RegistStatService
--    if (TsFileDBConf.isEnableStatMonitor()) {
--      StatMonitor statMonitor = StatMonitor.getInstance();
--      registerStatMetadata();
--      statMonitor.registerStatistics(statStorageDeltaName, this);
--    }
--    try {
--      versionController = new SimpleFileVersionController(restoreFolder.getPath());
--    } catch (IOException e) {
--      throw new FileNodeProcessorException(e);
--    }
--  }
--
--  @Override
--  public Map<String, AtomicLong> getStatParamsHashMap() {
--    return statParamsHashMap;
--  }
--
--  @Override
--  public void registerStatMetadata() {
--    Map<String, String> hashMap = new HashMap<>();
--    for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
--        MonitorConstants.FileNodeProcessorStatConstants.values()) {
--      hashMap
--          .put(statStorageDeltaName + MonitorConstants.MONITOR_PATH_SEPARATOR + statConstant.name(),
--              MonitorConstants.DATA_TYPE_INT64);
--    }
--    StatMonitor.getInstance().registerStatStorageGroup(hashMap);
--  }
--
--  @Override
--  public List<String> getAllPathForStatistic() {
--    List<String> list = new ArrayList<>();
--    for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
--        MonitorConstants.FileNodeProcessorStatConstants.values()) {
--      list.add(
--          statStorageDeltaName + MonitorConstants.MONITOR_PATH_SEPARATOR + statConstant.name());
--    }
--    return list;
--  }
--
--  @Override
--  public Map<String, TSRecord> getAllStatisticsValue() {
--    Long curTime = System.currentTimeMillis();
--    HashMap<String, TSRecord> tsRecordHashMap = new HashMap<>();
--    TSRecord tsRecord = new TSRecord(curTime, statStorageDeltaName);
--
--    Map<String, AtomicLong> hashMap = getStatParamsHashMap();
--    tsRecord.dataPointList = new ArrayList<>();
--    for (Map.Entry<String, AtomicLong> entry : hashMap.entrySet()) {
--      tsRecord.dataPointList.add(new LongDataPoint(entry.getKey(), entry.getValue().get()));
--    }
--
--    tsRecordHashMap.put(statStorageDeltaName, tsRecord);
--    return tsRecordHashMap;
--  }
--
--  /**
--   * add interval FileNode.
--   */
--  void addIntervalFileNode(TsFileResource tsFileResource) throws ActionException {
--    newFileNodes.add(tsFileResource);
--    fileNodeProcessorStore.setNewFileNodes(newFileNodes);
--    fileNodeFlushAction.act();
--  }
--
--  /**
--   * set interval filenode start time.
--   *
--   * @param deviceId device ID
--   */
--  void setIntervalFileNodeStartTime(String deviceId) {
--    if (getBufferWriteProcessor().getCurrentTsFileResource().getStartTime(deviceId) == -1) {
--      getBufferWriteProcessor().getCurrentTsFileResource().setStartTime(deviceId,
--          flushLastUpdateTimeMap.get(deviceId));
--      if (!invertedIndexOfFiles.containsKey(deviceId)) {
--        invertedIndexOfFiles.put(deviceId, new ArrayList<>());
--      }
--      invertedIndexOfFiles.get(deviceId).add(getBufferWriteProcessor().getCurrentTsFileResource());
--    }
--  }
--
--  void setIntervalFileNodeStartTime(String deviceId, long time) {
--    if (time != -1) {
--      getBufferWriteProcessor().getCurrentTsFileResource().setStartTime(deviceId, time);
--    } else {
--      getBufferWriteProcessor().getCurrentTsFileResource().removeTime(deviceId);
--      invertedIndexOfFiles.get(deviceId).remove(getBufferWriteProcessor().getCurrentTsFileResource());
--    }
--  }
--
--  long getIntervalFileNodeStartTime(String deviceId) {
--    return getBufferWriteProcessor().getCurrentTsFileResource().getStartTime(deviceId);
--  }
--
--  private void addAllFileIntoIndex(List<TsFileResource> fileList) {
--    // clear map
--    invertedIndexOfFiles.clear();
--    // add all file to index
--    for (TsFileResource fileNode : fileList) {
--      if (fileNode.getStartTimeMap().isEmpty()) {
--        continue;
--      }
--      for (String deviceId : fileNode.getStartTimeMap().keySet()) {
--        if (!invertedIndexOfFiles.containsKey(deviceId)) {
--          invertedIndexOfFiles.put(deviceId, new ArrayList<>());
--        }
--        invertedIndexOfFiles.get(deviceId).add(fileNode);
--      }
--    }
--  }
--
--  public boolean isOverflowed() {
--    return isOverflowed;
--  }
--
--  /**
--   * if overflow insert, update and delete insert into this filenode processor, set
--   * <code>isOverflowed</code> to true.
--   */
--  public void setOverflowed(boolean isOverflowed) {
--    if (this.isOverflowed != isOverflowed) {
--      this.isOverflowed = isOverflowed;
--    }
--  }
--
--  public FileNodeProcessorStatus getFileNodeProcessorStatus() {
--    return isMerging;
--  }
--
--  /**
--   * execute filenode recovery.
--   */
--  public void recover() throws FileNodeProcessorException {
--    // restore sequential files
--    parameters.put(FileNodeConstants.BUFFERWRITE_FLUSH_ACTION, bufferwriteFlushAction);
--    //parameters.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
--    parameters
--        .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
--    parameters.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowFlushAction);
--    parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
--
--    for (int i = 0; i < newFileNodes.size(); i++) {
--      TsFileResource tsFile = newFileNodes.get(i);
--      try {
--        String filePath = tsFile.getFilePath();
--        String logNodePrefix = BufferWriteProcessor.logNodePrefix(processorName);
--        SeqTsFileRecoverPerformer recoverPerformer =
--            new SeqTsFileRecoverPerformer(logNodePrefix,
--                fileSchema, versionController, tsFile);
--        recoverPerformer.recover();
--      } catch (ProcessorException e) {
--        LOGGER.error(
--            "The filenode processor {} failed to recover the bufferwrite processor, "
--                + "the last bufferwrite file is {}.",
--            getProcessorName(), tsFile.getFile().getName());
--        throw new FileNodeProcessorException(e);
--      }
--    }
--    recoverUpdateTimeMap();
--
--    // restore the overflow processor
--    LOGGER.info("The filenode processor {} will recover the overflow processor.",
--        getProcessorName());
--
--    try {
--      overflowProcessor = new OverflowProcessor(getProcessorName(), parameters, fileSchema,
--          versionController);
--    } catch (ProcessorException e) {
--      LOGGER.error("The filenode processor {} failed to recovery the overflow processor.",
--          getProcessorName());
--      throw new FileNodeProcessorException(e);
--    }
--
--    if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
--      // re-merge all file
--      // if bufferwrite processor is not null, and setCloseMark
--      LOGGER.info("The filenode processor {} is recovering, the filenode status is {}.",
--          getProcessorName(), isMerging);
--      merge();
--    } else if (isMerging == FileNodeProcessorStatus.WAITING) {
--      LOGGER.info("The filenode processor {} is recovering, the filenode status is {}.",
--          getProcessorName(), isMerging);
--      switchWaitingToWorking();
--    }
--    // add file into index of file
--    addAllFileIntoIndex(newFileNodes);
--  }
--
--  private void recoverUpdateTimeMap() {
--    lastUpdateTimeMap = new HashMap<>();
--    flushLastUpdateTimeMap = new HashMap<>();
--    for (TsFileResource tsFileResource : newFileNodes) {
--      Map<String, Long> endTimeMap =  tsFileResource.getEndTimeMap();
--      endTimeMap.forEach((key, value) -> {
--        Long lastTime = lastUpdateTimeMap.get(key);
--        if (lastTime == null || lastTime < value) {
--          lastUpdateTimeMap.put(key, value);
--          flushLastUpdateTimeMap.put(key, value);
--        }
--      });
--    }
--  }
--
--  //when calling this method, the bufferWriteProcessor must not be null
--  private BufferWriteProcessor getBufferWriteProcessor() {
--    return bufferWriteProcessor;
--  }
--
--  /**
--   * get buffer insert processor by processor name and insert time.
--   */
--  public BufferWriteProcessor getBufferWriteProcessor(String processorName, long insertTime)
--      throws FileNodeProcessorException {
--    if (bufferWriteProcessor == null) {
--      Map<String, Action> params = new HashMap<>();
--      params.put(FileNodeConstants.BUFFERWRITE_FLUSH_ACTION, bufferwriteFlushAction);
--      //params.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
--      params
--          .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
-       String baseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
 -      String baseDir = directories.getNextFolderForTsfile();
--      LOGGER.info("Allocate folder {} for the new bufferwrite processor.", baseDir);
--      // construct processor or restore
--      try {
--        bufferWriteProcessor = new BufferWriteProcessor(baseDir, processorName,
--            insertTime + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR
--                + System.currentTimeMillis(),
--            params, bufferwriteCloseConsumer, versionController, fileSchema);
--      } catch (BufferWriteProcessorException e) {
--        throw new FileNodeProcessorException(String
--            .format("The filenode processor %s failed to get the bufferwrite processor.",
--                processorName), e);
--      }
--    }
--    return bufferWriteProcessor;
--  }
--
--  /**
--   * get overflow processor by processor name.
--   */
--  public OverflowProcessor getOverflowProcessor(String processorName) throws ProcessorException {
--    if (overflowProcessor == null) {
--      Map<String, Action> params = new HashMap<>();
--      // construct processor or restore
--      params.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowFlushAction);
--      params
--          .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
--      overflowProcessor = new OverflowProcessor(processorName, params, fileSchema,
--          versionController);
--    } else if (overflowProcessor.isClosed()) {
--      overflowProcessor.reopen();
--    }
--    return overflowProcessor;
--  }
--
--  /**
--   * get overflow processor.
--   */
--  public OverflowProcessor getOverflowProcessor() {
--    if (overflowProcessor == null || overflowProcessor.isClosed()) {
--      LOGGER.error("The overflow processor is null when getting the overflowProcessor");
--    }
--    return overflowProcessor;
--  }
--
--  public boolean hasOverflowProcessor() {
--    return overflowProcessor != null && !overflowProcessor.isClosed();
--  }
--
--  public void setBufferwriteProcessroToClosed() {
--
--    bufferWriteProcessor = null;
--  }
--
--  public boolean hasBufferwriteProcessor() {
--
--    return bufferWriteProcessor != null;
--  }
--
--  /**
--   * set last update time.
--   */
--  public void setLastUpdateTime(String deviceId, long timestamp) {
--    if (!lastUpdateTimeMap.containsKey(deviceId) || lastUpdateTimeMap.get(deviceId) < timestamp) {
--      lastUpdateTimeMap.put(deviceId, timestamp);
--    }
--    if (timestamp == -1) {
--      lastUpdateTimeMap.remove(deviceId);
--    }
--  }
--
--  /**
--   * get last update time.
--   */
--  public long getLastUpdateTime(String deviceId) {
--
--    if (lastUpdateTimeMap.containsKey(deviceId)) {
--      return lastUpdateTimeMap.get(deviceId);
--    } else {
--      return -1;
--    }
--  }
--
--  /**
--   * get flushMetadata last update time.
--   */
--  public long getFlushLastUpdateTime(String deviceId) {
--    if (!flushLastUpdateTimeMap.containsKey(deviceId)) {
--      flushLastUpdateTimeMap.put(deviceId, 0L);
--    }
--    return flushLastUpdateTimeMap.get(deviceId);
--  }
--
--  public Map<String, Long> getLastUpdateTimeMap() {
--    return lastUpdateTimeMap;
--  }
--
--  /**
--   * For insert overflow.
--   */
--  public void changeTypeToChanged(String deviceId, long timestamp) {
--    if (!invertedIndexOfFiles.containsKey(deviceId)) {
--      LOGGER.warn(
--          WARN_NO_SUCH_OVERFLOWED_FILE
--              + "the data is [device:{},time:{}]",
--          getProcessorName(), deviceId, timestamp);
--      emptyTsFileResource.setStartTime(deviceId, 0L);
--      emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
--      emptyTsFileResource.changeTypeToChanged(isMerging);
--    } else {
--      List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
--      int index = searchIndexNodeByTimestamp(deviceId, timestamp, temp);
--      changeTypeToChanged(temp.get(index), deviceId);
--    }
--  }
--
--  private void changeTypeToChanged(TsFileResource fileNode, String deviceId) {
--    fileNode.changeTypeToChanged(isMerging);
--    if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
--      fileNode.addMergeChanged(deviceId);
--    }
--  }
--
--  /**
--   * For update overflow.
--   */
--  public void changeTypeToChanged(String deviceId, long startTime, long endTime) {
--    if (!invertedIndexOfFiles.containsKey(deviceId)) {
--      LOGGER.warn(
--          WARN_NO_SUCH_OVERFLOWED_FILE
--              + "the data is [device:{}, start time:{}, end time:{}]",
--          getProcessorName(), deviceId, startTime, endTime);
--      emptyTsFileResource.setStartTime(deviceId, 0L);
--      emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
--      emptyTsFileResource.changeTypeToChanged(isMerging);
--    } else {
--      List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
--      int left = searchIndexNodeByTimestamp(deviceId, startTime, temp);
--      int right = searchIndexNodeByTimestamp(deviceId, endTime, temp);
--      for (int i = left; i <= right; i++) {
--        changeTypeToChanged(temp.get(i), deviceId);
--      }
--    }
--  }
--
--  /**
--   * For delete overflow.
--   */
--  public void changeTypeToChangedForDelete(String deviceId, long timestamp) {
--    if (!invertedIndexOfFiles.containsKey(deviceId)) {
--      LOGGER.warn(
--          WARN_NO_SUCH_OVERFLOWED_FILE
--              + "the data is [device:{}, delete time:{}]",
--          getProcessorName(), deviceId, timestamp);
--      emptyTsFileResource.setStartTime(deviceId, 0L);
--      emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
--      emptyTsFileResource.changeTypeToChanged(isMerging);
--    } else {
--      List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
--      int index = searchIndexNodeByTimestamp(deviceId, timestamp, temp);
--      for (int i = 0; i <= index; i++) {
--        temp.get(i).changeTypeToChanged(isMerging);
--        if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
--          temp.get(i).addMergeChanged(deviceId);
--        }
--      }
--    }
--  }
--
--  /**
--   * Search the index of the interval by the timestamp.
--   *
--   * @return index of interval
--   */
--  private int searchIndexNodeByTimestamp(String deviceId, long timestamp,
--      List<TsFileResource> fileList) {
--    int index = 1;
--    while (index < fileList.size()) {
--      if (timestamp < fileList.get(index).getStartTime(deviceId)) {
--        break;
--      } else {
--        index++;
--      }
--    }
--    return index - 1;
--  }
--
--  /**
--   * add multiple pass lock.
--   */
--  public int addMultiPassCount() {
--    LOGGER.debug("Add MultiPassCount: cloneList lock newMultiPassCount.");
--    newMultiPassCount.incrementAndGet();
--    while (newMultiPassTokenSet.contains(multiPassLockToken)) {
--      multiPassLockToken++;
--    }
--    newMultiPassTokenSet.add(multiPassLockToken);
--    LOGGER.debug("Add multi token:{}, nsPath:{}.", multiPassLockToken, getProcessorName());
--    return multiPassLockToken;
--  }
--
--  /**
--   * decrease multiple pass count. TODO: use the return value or remove it.
--   */
--  public boolean decreaseMultiPassCount(int token) throws FileNodeProcessorException {
--    if (newMultiPassTokenSet.contains(token)) {
--      int newMultiPassCountValue = newMultiPassCount.decrementAndGet();
--      if (newMultiPassCountValue < 0) {
--        throw new FileNodeProcessorException(String
--            .format("Remove MultiPassCount error, newMultiPassCount:%d", newMultiPassCountValue));
--      }
--      newMultiPassTokenSet.remove(token);
--      LOGGER.debug("Remove multi token:{}, nspath:{}, new set:{}, count:{}", token,
--          getProcessorName(),
--          newMultiPassTokenSet, newMultiPassCount);
--      return true;
--    } else if (oldMultiPassTokenSet != null && oldMultiPassTokenSet.contains(token)) {
--      // remove token first, then unlock
--      oldMultiPassTokenSet.remove(token);
--      oldMultiPassCount.countDown();
--      long oldMultiPassCountValue = oldMultiPassCount.getCount();
--      if (oldMultiPassCountValue < 0) {
--        throw new FileNodeProcessorException(String
--            .format("Remove MultiPassCount error, oldMultiPassCount:%d", oldMultiPassCountValue));
--      }
--      LOGGER.debug("Remove multi token:{}, old set:{}, count:{}", token, oldMultiPassTokenSet,
--          oldMultiPassCount.getCount());
--      return true;
--    } else {
--      LOGGER.error("remove token error:{},new set:{}, old set:{}", token, newMultiPassTokenSet,
--          oldMultiPassTokenSet);
--      // should add throw exception
--      return false;
--    }
--  }
--
--  /**
--   * query data.
--   */
--  public <T extends Comparable<T>> QueryDataSource query(String deviceId, String measurementId,
--      QueryContext context) throws FileNodeProcessorException {
--    // query overflow data
--    MeasurementSchema mSchema;
--    TSDataType dataType;
--
--    //mSchema = mManager.getSchemaForOnePath(deviceId + "." + measurementId);
--    mSchema = fileSchema.getMeasurementSchema(measurementId);
--    dataType = mSchema.getType();
--
--    OverflowSeriesDataSource overflowSeriesDataSource;
--    try {
--      overflowSeriesDataSource = overflowProcessor.query(deviceId, measurementId, dataType,
--          mSchema.getProps(), context);
--    } catch (IOException e) {
--      throw new FileNodeProcessorException(e);
--    }
--    // tsfile dataØØ
--    List<TsFileResource> bufferwriteDataInFiles = new ArrayList<>();
--    for (TsFileResource tsFileResource : newFileNodes) {
--      // add the same tsFileResource, but not the same reference
--      if (tsFileResource.isClosed()) {
--        bufferwriteDataInFiles.add(tsFileResource.backUp());
--      }
--    }
--    Pair<ReadOnlyMemChunk, List<ChunkMetaData>> bufferwritedata = new Pair<>(null, null);
--    // bufferwrite data
--    UnsealedTsFile unsealedTsFile = null;
--
--    if (!newFileNodes.isEmpty() && !newFileNodes.get(newFileNodes.size() - 1).isClosed()
--        && !newFileNodes.get(newFileNodes.size() - 1).getStartTimeMap().isEmpty()) {
--      unsealedTsFile = new UnsealedTsFile();
--      unsealedTsFile.setFilePath(newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath());
--      if (bufferWriteProcessor == null) {
--        throw new FileNodeProcessorException(String.format(
--            "The last of tsfile %s in filenode processor %s is not closed, "
--                + "but the bufferwrite processor is null.",
--            newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath(), getProcessorName()));
--      }
--      bufferwritedata = bufferWriteProcessor
--          .queryBufferWriteData(deviceId, measurementId, dataType, mSchema.getProps());
--
--      try {
--        List<Modification> pathModifications = context.getPathModifications(
--            bufferWriteProcessor.getCurrentTsFileResource().getModFile(), deviceId
--                + IoTDBConstant.PATH_SEPARATOR + measurementId
--        );
--        if (!pathModifications.isEmpty()) {
--          QueryUtils.modifyChunkMetaData(bufferwritedata.right, pathModifications);
--        }
--      } catch (IOException e) {
--        throw new FileNodeProcessorException(e);
--      }
--
--      unsealedTsFile.setTimeSeriesChunkMetaDatas(bufferwritedata.right);
--    }
--    GlobalSortedSeriesDataSource globalSortedSeriesDataSource = new GlobalSortedSeriesDataSource(
--        new Path(deviceId + "." + measurementId), bufferwriteDataInFiles, unsealedTsFile,
--        bufferwritedata.left);
--    return new QueryDataSource(globalSortedSeriesDataSource, overflowSeriesDataSource);
--
--  }
--
--  /**
--   * append one specified tsfile to this filenode processor.
--   *
--   * @param appendFile the appended tsfile information
--   * @param appendFilePath the seriesPath of appended file
--   */
--  public void appendFile(TsFileResource appendFile, String appendFilePath)
--      throws FileNodeProcessorException {
--    try {
--      if (!appendFile.getFile().getParentFile().exists()) {
--        appendFile.getFile().getParentFile().mkdirs();
--      }
--      // move file
--      File originFile = new File(appendFilePath);
--      File targetFile = appendFile.getFile();
--      if (!originFile.exists()) {
--        throw new FileNodeProcessorException(
--            String.format("The appended file %s does not exist.", appendFilePath));
--      }
--      if (targetFile.exists()) {
--        throw new FileNodeProcessorException(
--            String.format("The appended target file %s already exists.",
--                appendFile.getFile().getAbsolutePath()));
--      }
--      if (!originFile.renameTo(targetFile)) {
--        LOGGER.warn("File renaming failed when appending new file. Origin: {}, Target: {}",
--            originFile.getPath(), targetFile.getPath());
--      }
--      // append the new tsfile
--      this.newFileNodes.add(appendFile);
--      // update the lastUpdateTime
--      for (Entry<String, Long> entry : appendFile.getEndTimeMap().entrySet()) {
--        lastUpdateTimeMap.put(entry.getKey(), entry.getValue());
--      }
--      bufferwriteFlushAction.act();
--      fileNodeProcessorStore.setNewFileNodes(newFileNodes);
--      // reconstruct the inverted index of the newFileNodes
--      fileNodeFlushAction.act();
--      addAllFileIntoIndex(newFileNodes);
--    } catch (Exception e) {
--      LOGGER.error("Failed to append the tsfile {} to filenode processor {}.", appendFile,
--          getProcessorName());
--      throw new FileNodeProcessorException(e);
--    }
--  }
--
--  /**
--   * get overlap tsfiles which are conflict with the appendFile.
--   *
--   * @param appendFile the appended tsfile information
--   */
--  public List<String> getOverlapFiles(TsFileResource appendFile, String uuid)
--      throws FileNodeProcessorException {
--    List<String> overlapFiles = new ArrayList<>();
--    try {
--      for (TsFileResource tsFileResource : newFileNodes) {
--        getOverlapFiles(appendFile, tsFileResource, uuid, overlapFiles);
--      }
--    } catch (IOException e) {
--      LOGGER.error("Failed to get overlap tsfiles which conflict with the appendFile.");
--      throw new FileNodeProcessorException(e);
--    }
--    return overlapFiles;
--  }
--
--  private void getOverlapFiles(TsFileResource appendFile, TsFileResource tsFileResource,
--      String uuid, List<String> overlapFiles) throws IOException {
--    for (Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
--      if (tsFileResource.getStartTimeMap().containsKey(entry.getKey()) &&
--          tsFileResource.getEndTime(entry.getKey()) >= entry.getValue()
--          && tsFileResource.getStartTime(entry.getKey()) <= appendFile
--          .getEndTime(entry.getKey())) {
--        String relativeFilePath =
--            Constans.SYNC_SERVER + File.separatorChar + uuid + File.separatorChar
--                + Constans.BACK_UP_DIRECTORY_NAME
--                + File.separatorChar + tsFileResource.getRelativePath();
--        File newFile = new File(
-             DirectoryManager.getInstance().getTsFileFolder(tsFileResource.getBaseDirIndex()),
 -            Directories.getInstance().getTsFileFolder(tsFileResource.getBaseDirIndex()),
--            relativeFilePath);
--        if (!newFile.getParentFile().exists()) {
--          newFile.getParentFile().mkdirs();
--        }
--        java.nio.file.Path link = FileSystems.getDefault().getPath(newFile.getPath());
--        java.nio.file.Path target = FileSystems.getDefault()
--            .getPath(tsFileResource.getFile().getAbsolutePath());
--        Files.createLink(link, target);
--        overlapFiles.add(newFile.getPath());
--        break;
--      }
--    }
--  }
--
--  /**
--   * add time series.
--   */
--  public void addTimeSeries(String measurementId, TSDataType dataType, TSEncoding encoding,
--      CompressionType compressor, Map<String, String> props) {
--    fileSchema.registerMeasurement(new MeasurementSchema(measurementId, dataType, encoding,
--        compressor, props));
--  }
--
--  /**
--   * submit the merge task to the <code>MergePool</code>.
--   *
--   * @return null -can't submit the merge task, because this filenode is not overflowed or it is
--   * merging now. Future - submit the merge task successfully.
--   */
--  Future submitToMerge() {
--    ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
--    if (lastMergeTime > 0) {
--      long thisMergeTime = System.currentTimeMillis();
--      long mergeTimeInterval = thisMergeTime - lastMergeTime;
--      ZonedDateTime lastDateTime = ofInstant(Instant.ofEpochMilli(lastMergeTime),
--          zoneId);
--      ZonedDateTime thisDateTime = ofInstant(Instant.ofEpochMilli(thisMergeTime),
--          zoneId);
--      LOGGER.info(
--          "The filenode {} last merge time is {}, this merge time is {}, "
--              + "merge time interval is {}s",
--          getProcessorName(), lastDateTime, thisDateTime, mergeTimeInterval / 1000);
--    }
--    lastMergeTime = System.currentTimeMillis();
--
--    if (overflowProcessor != null && !overflowProcessor.isClosed()) {
--      if (overflowProcessor.getFileSize() < IoTDBDescriptor.getInstance()
--          .getConfig().getOverflowFileSizeThreshold()) {
--        if (LOGGER.isInfoEnabled()) {
--          LOGGER.info(
--              "Skip this merge taks submission, because the size{} of overflow processor {} "
--                  + "does not reaches the threshold {}.",
--              MemUtils.bytesCntToStr(overflowProcessor.getFileSize()), getProcessorName(),
--              MemUtils.bytesCntToStr(
--                  IoTDBDescriptor.getInstance().getConfig().getOverflowFileSizeThreshold()));
--        }
--        return null;
--      }
--    } else {
--      LOGGER.info(
--          "Skip this merge taks submission, because the filenode processor {} "
--              + "has no overflow processor.",
--          getProcessorName());
--      return null;
--    }
--    if (isOverflowed && isMerging == FileNodeProcessorStatus.NONE) {
--      Runnable mergeThread;
--      mergeThread = new MergeRunnale();
--      LOGGER.info("Submit the merge task, the merge filenode is {}", getProcessorName());
--      return MergePoolManager.getInstance().submit(mergeThread);
--    } else {
--      if (!isOverflowed) {
--        LOGGER.info(
--            "Skip this merge taks submission, because the filenode processor {} is not " +
--                "overflowed.",
--            getProcessorName());
--      } else {
--        LOGGER.warn(
--            "Skip this merge task submission, because last merge task is not over yet, "
--                + "the merge filenode processor is {}",
--            getProcessorName());
--      }
--    }
--    return null;
--  }
--
--  /**
--   * Prepare for merge, setCloseMark the bufferwrite and overflow.
--   */
--  private void prepareForMerge() {
--    try {
--      LOGGER.info("The filenode processor {} prepares for merge, closes the bufferwrite processor",
--          getProcessorName());
--      Future<Boolean> future = closeBufferWrite();
--      future.get();
--      LOGGER.info("The bufferwrite processor {} is closed successfully",
--          getProcessorName());
--      // try to get overflow processor
--      getOverflowProcessor(getProcessorName());
--      // must setCloseMark the overflow processor
--      while (!getOverflowProcessor().canBeClosed()) {
--        waitForClosing();
--      }
--      LOGGER.info("The filenode processor {} prepares for merge, closes the overflow processor",
--          getProcessorName());
--      getOverflowProcessor().close();
--    } catch (ProcessorException | InterruptedException | ExecutionException e) {
--      LOGGER.error("The filenode processor {} prepares for merge error.", getProcessorName());
--      writeUnlock();
--      throw new ErrorDebugException(e);
--    }
--  }
--
--  private void waitForClosing() {
--    try {
--      LOGGER.info(
--          "The filenode processor {} prepares for merge, the overflow {} can't be closed, "
--              + "wait 100ms,",
--          getProcessorName(), getProcessorName());
--      TimeUnit.MICROSECONDS.sleep(100);
--    } catch (InterruptedException e) {
--      Thread.currentThread().interrupt();
--    }
--  }
--
--  /**
--   * Merge this storage group, merge the tsfile data with overflow data.
--   */
--  public void merge() throws FileNodeProcessorException {
--    // setCloseMark bufferwrite and overflow, prepare for merge
--    LOGGER.info("The filenode processor {} begins to merge.", getProcessorName());
--    writeLock();
--    prepareForMerge();
--    // change status from overflowed to no overflowed
--    isOverflowed = false;
--    // change status from work to merge
--    isMerging = FileNodeProcessorStatus.MERGING_WRITE;
--    // check the empty file
--    Map<String, Long> startTimeMap = emptyTsFileResource.getStartTimeMap();
--    mergeCheckEmptyFile(startTimeMap);
--
--    for (TsFileResource tsFileResource : newFileNodes) {
--      if (tsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
--        tsFileResource.setOverflowChangeType(OverflowChangeType.CHANGED);
--      }
--    }
--
--    addAllFileIntoIndex(newFileNodes);
--    synchronized (fileNodeProcessorStore) {
--      fileNodeProcessorStore.setOverflowed(isOverflowed);
--      fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
--      fileNodeProcessorStore.setNewFileNodes(newFileNodes);
--      fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
--      // flushMetadata this filenode information
--      try {
--        writeStoreToDisk(fileNodeProcessorStore);
--      } catch (FileNodeProcessorException e) {
--        LOGGER.error("The filenode processor {} writes restore information error when merging.",
--            getProcessorName(), e);
--        writeUnlock();
--        throw new FileNodeProcessorException(e);
--      }
--    }
--    // add numOfMergeFile to control the number of the merge file
--    List<TsFileResource> backupIntervalFiles;
--
--    backupIntervalFiles = switchFileNodeToMerge();
--    //
--    // clear empty file
--    //
--    boolean needEmtpy = false;
--    if (emptyTsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
--      needEmtpy = true;
--    }
--    emptyTsFileResource.clear();
--    // attention
--    try {
--      if (overflowProcessor.isClosed()) {
--        overflowProcessor.reopen();
--      }
--      overflowProcessor.switchWorkToMerge();
--    } catch (ProcessorException | IOException e) {
--      LOGGER.error("The filenode processor {} can't switch overflow processor from work to merge.",
--          getProcessorName(), e);
--      writeUnlock();
--      throw new FileNodeProcessorException(e);
--    }
--    LOGGER.info("The filenode processor {} switches from {} to {}.", getProcessorName(),
--        FileNodeProcessorStatus.NONE, FileNodeProcessorStatus.MERGING_WRITE);
--    writeUnlock();
--
--    // query tsfile data and overflow data, and merge them
--    int numOfMergeFiles = 0;
--    int allNeedMergeFiles = backupIntervalFiles.size();
--    for (TsFileResource backupIntervalFile : backupIntervalFiles) {
--      numOfMergeFiles++;
--      if (backupIntervalFile.getOverflowChangeType() == OverflowChangeType.CHANGED) {
--        // query data and merge
--        String filePathBeforeMerge = backupIntervalFile.getRelativePath();
--        try {
--          LOGGER.info(
--              "The filenode processor {} begins merging the {}/{} tsfile[{}] with "
--                  + "overflow file, the process is {}%",
--              getProcessorName(), numOfMergeFiles, allNeedMergeFiles, filePathBeforeMerge,
--              (int) (((numOfMergeFiles - 1) / (float) allNeedMergeFiles) * 100));
--          long startTime = System.currentTimeMillis();
--          String newFile = queryAndWriteDataForMerge(backupIntervalFile);
--          long endTime = System.currentTimeMillis();
--          long timeConsume = endTime - startTime;
--          ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
--          LOGGER.info(
--              "The fileNode processor {} has merged the {}/{} tsfile[{}->{}] over, "
--                  + "start time of merge is {}, end time of merge is {}, "
--                  + "time consumption is {}ms,"
--                  + " the process is {}%",
--              getProcessorName(), numOfMergeFiles, allNeedMergeFiles, filePathBeforeMerge,
--              newFile, ofInstant(Instant.ofEpochMilli(startTime),
--                  zoneId), ofInstant(Instant.ofEpochMilli(endTime), zoneId), timeConsume,
--              numOfMergeFiles / (float) allNeedMergeFiles * 100);
--        } catch (IOException | PathErrorException e) {
--          LOGGER.error("Merge: query and insert data error.", e);
--          throw new FileNodeProcessorException(e);
--        }
--      } else if (backupIntervalFile.getOverflowChangeType() == OverflowChangeType.MERGING_CHANGE) {
--        LOGGER.error("The overflowChangeType of backupIntervalFile must not be {}",
--            OverflowChangeType.MERGING_CHANGE);
--        // handle this error, throw one runtime exception
--        throw new FileNodeProcessorException(
--            "The overflowChangeType of backupIntervalFile must not be "
--                + OverflowChangeType.MERGING_CHANGE);
--      } else {
--        LOGGER.debug(
--            "The filenode processor {} is merging, the interval file {} doesn't "
--                + "need to be merged.",
--            getProcessorName(), backupIntervalFile.getRelativePath());
--      }
--    }
--
--    // change status from merge to wait
--    switchMergeToWaiting(backupIntervalFiles, needEmtpy);
--
--    // change status from wait to work
--    switchWaitingToWorking();
--  }
--
--  private void mergeCheckEmptyFile(Map<String, Long> startTimeMap) {
--    if (emptyTsFileResource.getOverflowChangeType() == OverflowChangeType.NO_CHANGE) {
--      return;
--    }
--    Iterator<Entry<String, Long>> iterator = emptyTsFileResource.getEndTimeMap().entrySet()
--        .iterator();
--    while (iterator.hasNext()) {
--      Entry<String, Long> entry = iterator.next();
--      String deviceId = entry.getKey();
--      if (invertedIndexOfFiles.containsKey(deviceId)) {
--        invertedIndexOfFiles.get(deviceId).get(0).setOverflowChangeType(OverflowChangeType.CHANGED);
--        startTimeMap.remove(deviceId);
--        iterator.remove();
--      }
--    }
--    if (emptyTsFileResource.checkEmpty()) {
--      emptyTsFileResource.clear();
--    } else {
--      if (!newFileNodes.isEmpty()) {
--        TsFileResource first = newFileNodes.get(0);
--        for (String deviceId : emptyTsFileResource.getStartTimeMap().keySet()) {
--          first.setStartTime(deviceId, emptyTsFileResource.getStartTime(deviceId));
--          first.setEndTime(deviceId, emptyTsFileResource.getEndTime(deviceId));
--          first.setOverflowChangeType(OverflowChangeType.CHANGED);
--        }
--        emptyTsFileResource.clear();
--      } else {
--        emptyTsFileResource.setOverflowChangeType(OverflowChangeType.CHANGED);
--      }
--    }
--  }
--
--  private List<TsFileResource> switchFileNodeToMerge() throws FileNodeProcessorException {
--    List<TsFileResource> result = new ArrayList<>();
--    if (emptyTsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
--      // add empty
--      result.add(emptyTsFileResource.backUp());
--      if (!newFileNodes.isEmpty()) {
--        throw new FileNodeProcessorException(
--            String.format("The status of empty file is %s, but the new file list is not empty",
--                emptyTsFileResource.getOverflowChangeType()));
--      }
--      return result;
--    }
--    if (newFileNodes.isEmpty()) {
--      LOGGER.error("No file was changed when merging, the filenode is {}", getProcessorName());
--      throw new FileNodeProcessorException(
--          "No file was changed when merging, the filenode is " + getProcessorName());
--    }
--    for (TsFileResource tsFileResource : newFileNodes) {
--      updateFileNode(tsFileResource, result);
--    }
--    return result;
--  }
--
--  private void updateFileNode(TsFileResource tsFileResource, List<TsFileResource> result) {
--    if (tsFileResource.getOverflowChangeType() == OverflowChangeType.NO_CHANGE) {
--      result.add(tsFileResource.backUp());
--    } else {
--      Map<String, Long> startTimeMap = new HashMap<>();
--      Map<String, Long> endTimeMap = new HashMap<>();
--      for (String deviceId : tsFileResource.getEndTimeMap().keySet()) {
--        List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
--        int index = temp.indexOf(tsFileResource);
--        int size = temp.size();
--        // start time
--        if (index == 0) {
--          startTimeMap.put(deviceId, 0L);
--        } else {
--          startTimeMap.put(deviceId, tsFileResource.getStartTime(deviceId));
--        }
--        // end time
--        if (index < size - 1) {
--          endTimeMap.put(deviceId, temp.get(index + 1).getStartTime(deviceId) - 1);
--        } else {
--          endTimeMap.put(deviceId, tsFileResource.getEndTime(deviceId));
--        }
--      }
--      TsFileResource node = new TsFileResource(startTimeMap, endTimeMap,
--          tsFileResource.getOverflowChangeType(), tsFileResource.getFile());
--      result.add(node);
--    }
--  }
--
--  private void switchMergeToWaiting(List<TsFileResource> backupIntervalFiles, boolean needEmpty)
--      throws FileNodeProcessorException {
--    LOGGER.info("The status of filenode processor {} switches from {} to {}.", getProcessorName(),
--        FileNodeProcessorStatus.MERGING_WRITE, FileNodeProcessorStatus.WAITING);
--    writeLock();
--    try {
--      oldMultiPassTokenSet = newMultiPassTokenSet;
--      oldMultiPassCount = new CountDownLatch(newMultiPassCount.get());
--      newMultiPassTokenSet = new HashSet<>();
--      newMultiPassCount = new AtomicInteger(0);
--      List<TsFileResource> result = new ArrayList<>();
--      int beginIndex = 0;
--      if (needEmpty) {
--        TsFileResource empty = backupIntervalFiles.get(0);
--        if (!empty.checkEmpty()) {
--          updateEmpty(empty, result);
--          beginIndex++;
--        }
--      }
--      // reconstruct the file index
--      addAllFileIntoIndex(backupIntervalFiles);
--      // check the merge changed file
--      for (int i = beginIndex; i < backupIntervalFiles.size(); i++) {
--        TsFileResource newFile = newFileNodes.get(i - beginIndex);
--        TsFileResource temp = backupIntervalFiles.get(i);
--        if (newFile.getOverflowChangeType() == OverflowChangeType.MERGING_CHANGE) {
--          updateMergeChanged(newFile, temp);
--        }
--        if (!temp.checkEmpty()) {
--          result.add(temp);
--        }
--      }
--      // add new file when merge
--      for (int i = backupIntervalFiles.size() - beginIndex; i < newFileNodes.size(); i++) {
--        TsFileResource fileNode = newFileNodes.get(i);
--        if (fileNode.isClosed()) {
--          result.add(fileNode.backUp());
--        } else {
--          result.add(fileNode);
--        }
--      }
--
--      isMerging = FileNodeProcessorStatus.WAITING;
--      newFileNodes = result;
--      // reconstruct the index
--      addAllFileIntoIndex(newFileNodes);
--      // clear merge changed
--      for (TsFileResource fileNode : newFileNodes) {
--        fileNode.clearMergeChanged();
--      }
--
--      synchronized (fileNodeProcessorStore) {
--        fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
--        fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
--        fileNodeProcessorStore.setNewFileNodes(newFileNodes);
--        try {
--          writeStoreToDisk(fileNodeProcessorStore);
--        } catch (FileNodeProcessorException e) {
--          LOGGER.error(
--              "Merge: failed to insert filenode information to revocery file, the filenode is " +
--                  "{}.",
--              getProcessorName(), e);
--          throw new FileNodeProcessorException(
--              "Merge: insert filenode information to revocery file failed, the filenode is "
--                  + getProcessorName());
--        }
--      }
--    } finally {
--      writeUnlock();
--    }
--  }
--
--  private void updateEmpty(TsFileResource empty, List<TsFileResource> result) {
--    for (String deviceId : empty.getStartTimeMap().keySet()) {
--      if (invertedIndexOfFiles.containsKey(deviceId)) {
--        TsFileResource temp = invertedIndexOfFiles.get(deviceId).get(0);
--        if (temp.getMergeChanged().contains(deviceId)) {
--          empty.setOverflowChangeType(OverflowChangeType.CHANGED);
--          break;
--        }
--      }
--    }
--    empty.clearMergeChanged();
--    result.add(empty.backUp());
--  }
--
--  private void updateMergeChanged(TsFileResource newFile, TsFileResource temp) {
--    for (String deviceId : newFile.getMergeChanged()) {
--      if (temp.getStartTimeMap().containsKey(deviceId)) {
--        temp.setOverflowChangeType(OverflowChangeType.CHANGED);
--      } else {
--        changeTypeToChanged(deviceId, newFile.getStartTime(deviceId),
--            newFile.getEndTime(deviceId));
--      }
--    }
--  }
--
--
--  private void switchWaitingToWorking()
--      throws FileNodeProcessorException {
--
--    LOGGER.info("The status of filenode processor {} switches from {} to {}.", getProcessorName(),
--        FileNodeProcessorStatus.WAITING, FileNodeProcessorStatus.NONE);
--
--    if (oldMultiPassCount != null) {
--      LOGGER.info("The old Multiple Pass Token set is {}, the old Multiple Pass Count is {}",
--          oldMultiPassTokenSet,
--          oldMultiPassCount);
--      try {
--        oldMultiPassCount.await();
--      } catch (InterruptedException e) {
--        LOGGER.info(
--            "The filenode processor {} encountered an error when it waits for all old queries over.",
--            getProcessorName());
--        throw new FileNodeProcessorException(e);
--      }
--    }
--
--    try {
--      writeLock();
--      try {
--        // delete the all files which are in the newFileNodes
--        // notice: the last restore file of the interval file
--
-         List<String> bufferwriteDirPathList = DIRECTORY_MANAGER.getAllTsFileFolders();
 -        List<String> bufferwriteDirPathList = directories.getAllTsFileFolders();
--        List<File> bufferwriteDirList = new ArrayList<>();
--        collectBufferWriteDirs(bufferwriteDirPathList, bufferwriteDirList);
--
--        Set<String> bufferFiles = new HashSet<>();
--        collectBufferWriteFiles(bufferFiles);
--
--        // add the restore file, if the last file is not closed
--        if (!newFileNodes.isEmpty() && !newFileNodes.get(newFileNodes.size() - 1).isClosed()) {
--          String bufferFileRestorePath =
--              newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath() + RESTORE_FILE_SUFFIX;
--          bufferFiles.add(bufferFileRestorePath);
--        }
--
--        deleteBufferWriteFiles(bufferwriteDirList, bufferFiles);
--
--        // merge switch
--        changeFileNodes();
--
--        // overflow switch from merge to work
--        overflowProcessor.switchMergeToWork();
--
--        // insert status to file
--        isMerging = FileNodeProcessorStatus.NONE;
--        synchronized (fileNodeProcessorStore) {
--          fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
--          fileNodeProcessorStore.setNewFileNodes(newFileNodes);
--          fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
--          writeStoreToDisk(fileNodeProcessorStore);
--        }
--      } catch (IOException e) {
--        LOGGER.info(
--            "The filenode processor {} encountered an error when its "
--                + "status switched from {} to {}.",
--            getProcessorName(), FileNodeProcessorStatus.NONE,
--            FileNodeProcessorStatus.MERGING_WRITE);
--        throw new FileNodeProcessorException(e);
--      } finally {
--        writeUnlock();
--      }
--    } finally {
--      oldMultiPassTokenSet = null;
--      oldMultiPassCount = null;
--    }
--
--  }
--
--  private void collectBufferWriteDirs(List<String> bufferwriteDirPathList,
--      List<File> bufferwriteDirList) {
--    for (String bufferwriteDirPath : bufferwriteDirPathList) {
--      if (bufferwriteDirPath.length() > 0
--          && bufferwriteDirPath.charAt(bufferwriteDirPath.length() - 1)
--          != File.separatorChar) {
--        bufferwriteDirPath = bufferwriteDirPath + File.separatorChar;
--      }
--      bufferwriteDirPath = bufferwriteDirPath + getProcessorName();
--      File bufferwriteDir = new File(bufferwriteDirPath);
--      bufferwriteDirList.add(bufferwriteDir);
--      if (!bufferwriteDir.exists()) {
--        bufferwriteDir.mkdirs();
--      }
--    }
--  }
--
--  private void collectBufferWriteFiles(Set<String> bufferFiles) {
--    for (TsFileResource bufferFileNode : newFileNodes) {
--      String bufferFilePath = bufferFileNode.getFile().getAbsolutePath();
--      if (bufferFilePath != null) {
--        bufferFiles.add(bufferFilePath);
--      }
--    }
--  }
--
--  private void deleteBufferWriteFiles(List<File> bufferwriteDirList, Set<String> bufferFiles)
--      throws IOException {
--    for (File bufferwriteDir : bufferwriteDirList) {
--      File[] files = bufferwriteDir.listFiles();
--      if (files == null) {
--        continue;
--      }
--      for (File file : files) {
--        if (!bufferFiles.contains(file.getPath())) {
--          FileReaderManager.getInstance().closeFileAndRemoveReader(file.getPath());
--          if (!file.delete()) {
--            LOGGER.warn("Cannot delete BufferWrite file {}", file.getPath());
--          }
--        }
--      }
--    }
--  }
--
--  private void changeFileNodes() {
--    for (TsFileResource fileNode : newFileNodes) {
--      if (fileNode.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
--        fileNode.setOverflowChangeType(OverflowChangeType.CHANGED);
--      }
--    }
--  }
--
--  private String queryAndWriteDataForMerge(TsFileResource backupIntervalFile)
--      throws IOException, FileNodeProcessorException, PathErrorException {
--    Map<String, Long> startTimeMap = new HashMap<>();
--    Map<String, Long> endTimeMap = new HashMap<>();
--
--    mergeFileWriter = null;
--    mergeOutputPath = null;
--    mergeBaseDir = null;
--    mergeFileName = null;
--    // modifications are blocked before mergeModification is created to avoid
--    // losing some modification.
--    mergeDeleteLock.lock();
--    QueryContext context = new QueryContext();
--    try {
--      FileReaderManager.getInstance().increaseFileReaderReference(backupIntervalFile.getFilePath(),
--          true);
--      for (String deviceId : backupIntervalFile.getStartTimeMap().keySet()) {
--        // query one deviceId
--        List<Path> pathList = new ArrayList<>();
--        mergeIsChunkGroupHasData = false;
--        mergeStartPos = -1;
--        ChunkGroupFooter footer;
--        int numOfChunk = 0;
--        try {
--          List<String> pathStrings = mManager.getLeafNodePathInNextLevel(deviceId);
--          for (String string : pathStrings) {
--            pathList.add(new Path(string));
--          }
--        } catch (PathErrorException e) {
--          LOGGER.error("Can't get all the paths from MManager, the deviceId is {}", deviceId);
--          throw new FileNodeProcessorException(e);
--        }
--        if (pathList.isEmpty()) {
--          continue;
--        }
--        for (Path path : pathList) {
--          // query one measurement in the special deviceId
--          String measurementId = path.getMeasurement();
--          TSDataType dataType = mManager.getSeriesType(path.getFullPath());
--          OverflowSeriesDataSource overflowSeriesDataSource = overflowProcessor.queryMerge(deviceId,
--              measurementId, dataType, true, context);
--          Filter timeFilter = FilterFactory
--              .and(TimeFilter.gtEq(backupIntervalFile.getStartTime(deviceId)),
--                  TimeFilter.ltEq(backupIntervalFile.getEndTime(deviceId)));
--          SingleSeriesExpression seriesFilter = new SingleSeriesExpression(path, timeFilter);
--
--          for (OverflowInsertFile overflowInsertFile : overflowSeriesDataSource
--              .getOverflowInsertFileList()) {
--            FileReaderManager.getInstance()
--                .increaseFileReaderReference(overflowInsertFile.getFilePath(),
--                    false);
--          }
--
--          IReader seriesReader = SeriesReaderFactory.getInstance()
--              .createSeriesReaderForMerge(backupIntervalFile,
--                  overflowSeriesDataSource, seriesFilter, context);
--          numOfChunk += queryAndWriteSeries(seriesReader, path, seriesFilter, dataType,
--              startTimeMap, endTimeMap, overflowSeriesDataSource);
--        }
--        if (mergeIsChunkGroupHasData) {
--          // end the new rowGroupMetadata
--          mergeFileWriter.endChunkGroup(0);
--        }
--      }
--    } finally {
--      FileReaderManager.getInstance().decreaseFileReaderReference(backupIntervalFile.getFilePath(),
--          true);
--
--      if (mergeDeleteLock.isLocked()) {
--        mergeDeleteLock.unlock();
--      }
--    }
--
--    if (mergeFileWriter != null) {
--      mergeFileWriter.endFile(fileSchema);
--    }
--    backupIntervalFile.setFile(new File(mergeBaseDir + File.separator + mergeFileName));
--    backupIntervalFile.setOverflowChangeType(OverflowChangeType.NO_CHANGE);
--    backupIntervalFile.setStartTimeMap(startTimeMap);
--    backupIntervalFile.setEndTimeMap(endTimeMap);
--    backupIntervalFile.setModFile(mergingModification);
--    mergingModification = null;
--    return mergeFileName;
--  }
--
--  private int queryAndWriteSeries(IReader seriesReader, Path path,
--      SingleSeriesExpression seriesFilter, TSDataType dataType,
--      Map<String, Long> startTimeMap, Map<String, Long> endTimeMap,
--      OverflowSeriesDataSource overflowSeriesDataSource)
--      throws IOException {
--    int numOfChunk = 0;
--    try {
--      if (!seriesReader.hasNext()) {
--        LOGGER.debug(
--            "The time-series {} has no data with the filter {} in the filenode processor {}",
--            path, seriesFilter, getProcessorName());
--      } else {
--        numOfChunk++;
--        TimeValuePair timeValuePair = seriesReader.next();
--        if (mergeFileWriter == null) {
-           mergeBaseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
 -          mergeBaseDir = directories.getNextFolderForTsfile();
--          mergeFileName = timeValuePair.getTimestamp()
--              + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR + System.currentTimeMillis();
--          mergeOutputPath = constructOutputFilePath(mergeBaseDir, getProcessorName(),
--              mergeFileName);
--          mergeFileName = getProcessorName() + File.separatorChar + mergeFileName;
--          mergeFileWriter = new TsFileIOWriter(new File(mergeOutputPath));
--          mergingModification = new ModificationFile(mergeOutputPath
--              + ModificationFile.FILE_SUFFIX);
--          mergeDeleteLock.unlock();
--        }
--        if (!mergeIsChunkGroupHasData) {
--          // start a new rowGroupMetadata
--          mergeIsChunkGroupHasData = true;
--          // the datasize and numOfChunk is fake
--          // the accurate datasize and numOfChunk will get after insert all this device data.
--          mergeFileWriter.startFlushChunkGroup(path.getDevice());// TODO please check me.
--          mergeStartPos = mergeFileWriter.getPos();
--        }
--        // init the serieswWriteImpl
--        MeasurementSchema measurementSchema = fileSchema
--            .getMeasurementSchema(path.getMeasurement());
--        ChunkBuffer pageWriter = new ChunkBuffer(measurementSchema);
--        int pageSizeThreshold = TSFileConfig.pageSizeInByte;
--        ChunkWriterImpl seriesWriterImpl = new ChunkWriterImpl(measurementSchema, pageWriter,
--            pageSizeThreshold);
--        // insert the series data
--        writeOneSeries(path.getDevice(), seriesWriterImpl, dataType,
--            seriesReader,
--            startTimeMap, endTimeMap, timeValuePair);
--        // flushMetadata the series data
--        seriesWriterImpl.writeToFileWriter(mergeFileWriter);
--      }
--    } finally {
--      for (OverflowInsertFile overflowInsertFile : overflowSeriesDataSource
--          .getOverflowInsertFileList()) {
--        FileReaderManager.getInstance()
--            .decreaseFileReaderReference(overflowInsertFile.getFilePath(),
--                false);
--      }
--    }
--    return numOfChunk;
--  }
--
--
--  private void writeOneSeries(String deviceId, ChunkWriterImpl seriesWriterImpl,
--      TSDataType dataType, IReader seriesReader, Map<String, Long> startTimeMap,
--      Map<String, Long> endTimeMap, TimeValuePair firstTVPair) throws IOException {
--    long startTime;
--    long endTime;
--    TimeValuePair localTV = firstTVPair;
--    writeTVPair(seriesWriterImpl, dataType, localTV);
--    startTime = endTime = localTV.getTimestamp();
--    if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId) > startTime) {
--      startTimeMap.put(deviceId, startTime);
--    }
--    if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) < endTime) {
--      endTimeMap.put(deviceId, endTime);
--    }
--    while (seriesReader.hasNext()) {
--      localTV = seriesReader.next();
--      endTime = localTV.getTimestamp();
--      writeTVPair(seriesWriterImpl, dataType, localTV);
--    }
--    if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) < endTime) {
--      endTimeMap.put(deviceId, endTime);
--    }
--  }
--
--  private void writeTVPair(ChunkWriterImpl seriesWriterImpl, TSDataType dataType,
--      TimeValuePair timeValuePair) throws IOException {
--    switch (dataType) {
--      case BOOLEAN:
--        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getBoolean());
--        break;
--      case INT32:
--        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getInt());
--        break;
--      case INT64:
--        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getLong());
--        break;
--      case FLOAT:
--        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getFloat());
--        break;
--      case DOUBLE:
--        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getDouble());
--        break;
--      case TEXT:
--        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getBinary());
--        break;
--      default:
--        LOGGER.error("Not support data type: {}", dataType);
--        break;
--    }
--  }
--
--
--  private String constructOutputFilePath(String baseDir, String processorName, String fileName) {
--
--    String localBaseDir = baseDir;
--    if (localBaseDir.charAt(localBaseDir.length() - 1) != File.separatorChar) {
--      localBaseDir = localBaseDir + File.separatorChar + processorName;
--    }
--    File dataDir = new File(localBaseDir);
--    if (!dataDir.exists()) {
--      LOGGER.warn("The bufferwrite processor data dir doesn't exists, create new directory {}",
--          localBaseDir);
--      dataDir.mkdirs();
--    }
--    File outputFile = new File(dataDir, fileName);
--    return outputFile.getPath();
--  }
--
--  private FileSchema constructFileSchema(String processorName) throws WriteProcessException {
--
--    List<MeasurementSchema> columnSchemaList;
--    columnSchemaList = mManager.getSchemaForFileName(processorName);
--
--    FileSchema schema = new FileSchema();
--    for (MeasurementSchema measurementSchema : columnSchemaList) {
--      schema.registerMeasurement(measurementSchema);
--    }
--    return schema;
--
--  }
--
--  @Override
--  public boolean canBeClosed() {
--    if (isMerging != FileNodeProcessorStatus.NONE) {
--      LOGGER.info("The filenode {} can't be closed, because the filenode status is {}",
--          getProcessorName(),
--          isMerging);
--      return false;
--    }
--    if (newMultiPassCount.get() != 0) {
--      LOGGER.warn("The filenode {} can't be closed, because newMultiPassCount is {}. The newMultiPassTokenSet is {}",
--          getProcessorName(), newMultiPassCount, newMultiPassTokenSet);
--      return false;
--    }
--
--    if (oldMultiPassCount == null) {
--      return true;
--    }
--    if (oldMultiPassCount.getCount() == 0) {
--      return true;
--    } else {
--      LOGGER.info("The filenode {} can't be closed, because oldMultiPassCount is {}",
--          getProcessorName(), oldMultiPassCount.getCount());
--      return false;
--    }
--  }
--
--  @Override
--  public FileNodeFlushFuture flush() throws IOException {
--    Future<Boolean> bufferWriteFlushFuture = null;
--    Future<Boolean> overflowFlushFuture = null;
--    if (bufferWriteProcessor != null) {
--      bufferWriteFlushFuture = bufferWriteProcessor.flush();
--    }
--    if (overflowProcessor != null && !overflowProcessor.isClosed()) {
--      overflowFlushFuture = overflowProcessor.flush();
--    }
--    return new FileNodeFlushFuture(bufferWriteFlushFuture, overflowFlushFuture);
--  }
--
--  /**
--   * Close the bufferwrite processor.
--   */
--  public Future<Boolean> closeBufferWrite() throws FileNodeProcessorException {
--    if (bufferWriteProcessor == null) {
--      return new ImmediateFuture<>(true);
--    }
--    try {
--      while (!bufferWriteProcessor.canBeClosed()) {
--        waitForBufferWriteClose();
--      }
--      bufferWriteProcessor.close();
--      Future<Boolean> result = bufferWriteProcessor.getCloseFuture();
--      closingBufferWriteProcessor.add(bufferWriteProcessor);
--      bufferWriteProcessor = null;
--      return result;
--    } catch (BufferWriteProcessorException e) {
--      throw new FileNodeProcessorException(e);
--    }
--  }
--
--
--
--  private void waitForBufferWriteClose() {
--    try {
--      LOGGER.info("The bufferwrite {} can't be closed, wait 100ms",
--          bufferWriteProcessor.getProcessorName());
--      TimeUnit.MICROSECONDS.sleep(100);
--    } catch (InterruptedException e) {
--      LOGGER.error("Unexpected interruption", e);
--      Thread.currentThread().interrupt();
--    }
--  }
--
--  /**
--   * Close the overflow processor.
--   */
--  public void closeOverflow() throws FileNodeProcessorException {
--    if (overflowProcessor == null || overflowProcessor.isClosed()) {
--      return;
--    }
--    try {
--      while (!overflowProcessor.canBeClosed()) {
--        waitForOverflowClose();
--      }
--      overflowProcessor.close();
--    } catch (OverflowProcessorException e) {
--      throw new FileNodeProcessorException(e);
--    }
--  }
--
--  private void waitForOverflowClose() {
--    try {
--      LOGGER.info("The overflow {} can't be closed, wait 100ms",
--          overflowProcessor.getProcessorName());
--      TimeUnit.MICROSECONDS.sleep(100);
--    } catch (InterruptedException e) {
--      LOGGER.error("Unexpected interruption", e);
--      Thread.currentThread().interrupt();
--    }
--  }
--
--  @Override
--  public void close() throws FileNodeProcessorException {
--    LOGGER.info("Will setCloseMark FileNode Processor {}.", getProcessorName());
--    Future<Boolean> result = closeBufferWrite();
--    try {
--      result.get();
--    } catch (InterruptedException | ExecutionException e) {
--      throw new FileNodeProcessorException(e);
--    }
--    closeOverflow();
--    for (TsFileResource fileNode : newFileNodes) {
--      if (fileNode.getModFile() != null) {
--        try {
--          fileNode.getModFile().close();
--        } catch (IOException e) {
--          throw new FileNodeProcessorException(e);
--        }
--      }
--    }
--  }
--
--  /**
--   * deregister the filenode processor.
--   */
--  public void delete() throws ProcessorException {
--    if (TsFileDBConf.isEnableStatMonitor()) {
--      // remove the monitor
--      LOGGER.info("Deregister the filenode processor: {} from monitor.", getProcessorName());
--      StatMonitor.getInstance().deregisterStatistics(statStorageDeltaName);
--    }
--    closeBufferWrite();
--    closeOverflow();
--    for (TsFileResource fileNode : newFileNodes) {
--      if (fileNode.getModFile() != null) {
--        try {
--          fileNode.getModFile().close();
--        } catch (IOException e) {
--          throw new FileNodeProcessorException(e);
--        }
--      }
--    }
--  }
--
--  @Override
--  public long memoryUsage() {
--    long memSize = 0;
--    if (bufferWriteProcessor != null) {
--      memSize += bufferWriteProcessor.memoryUsage();
--    }
--    if (overflowProcessor != null) {
--      memSize += overflowProcessor.memoryUsage();
--    }
--    return memSize;
--  }
--
--  private void writeStoreToDisk(FileNodeProcessorStore fileNodeProcessorStore)
--      throws FileNodeProcessorException {
--
--    synchronized (fileNodeRestoreLock) {
--      try (FileOutputStream fileOutputStream = new FileOutputStream(fileNodeRestoreFilePath)) {
--        fileNodeProcessorStore.serialize(fileOutputStream);
--        LOGGER.debug("The filenode processor {} writes restore information to the restore file",
--            getProcessorName());
--      } catch (IOException e) {
--        throw new FileNodeProcessorException(e);
--      }
--    }
--  }
--
--  private FileNodeProcessorStore readStoreFromDisk() throws FileNodeProcessorException {
--
--    synchronized (fileNodeRestoreLock) {
--      File restoreFile = new File(fileNodeRestoreFilePath);
--      if (!restoreFile.exists() || restoreFile.length() == 0) {
--        try {
--          return new FileNodeProcessorStore(false, new HashMap<>(),
--              new TsFileResource(null, false),
--              new ArrayList<>(), FileNodeProcessorStatus.NONE, 0);
--        } catch (IOException e) {
--          throw new FileNodeProcessorException(e);
--        }
--      }
--      try (FileInputStream inputStream = new FileInputStream(fileNodeRestoreFilePath)) {
--        return FileNodeProcessorStore.deSerialize(inputStream);
--      } catch (IOException e) {
--        LOGGER
--            .error("Failed to deserialize the FileNodeRestoreFile {}, {}", fileNodeRestoreFilePath,
--                e);
--        throw new FileNodeProcessorException(e);
--      }
--    }
--  }
--
--  String getFileNodeRestoreFilePath() {
--    return fileNodeRestoreFilePath;
--  }
--
--  /**
--   * Delete data whose timestamp <= 'timestamp' and belong to timeseries deviceId.measurementId.
--   *
--   * @param deviceId the deviceId of the timeseries to be deleted.
--   * @param measurementId the measurementId of the timeseries to be deleted.
--   * @param timestamp the delete range is (0, timestamp].
--   */
--  public void delete(String deviceId, String measurementId, long timestamp) throws IOException {
--    // TODO: how to avoid partial deletion?
--    mergeDeleteLock.lock();
--    long version = versionController.nextVersion();
--
--    // record what files are updated so we can roll back them in case of exception
--    List<ModificationFile> updatedModFiles = new ArrayList<>();
--
--    try {
--      String fullPath = deviceId +
--          IoTDBConstant.PATH_SEPARATOR + measurementId;
--      Deletion deletion = new Deletion(fullPath, version, timestamp);
--      if (mergingModification != null) {
--        mergingModification.write(deletion);
--        updatedModFiles.add(mergingModification);
--      }
--      deleteBufferWriteFiles(deviceId, deletion, updatedModFiles);
--      // delete data in memory
--      OverflowProcessor ofProcessor = getOverflowProcessor(getProcessorName());
--      ofProcessor.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
--      if (bufferWriteProcessor != null) {
--        bufferWriteProcessor.delete(deviceId, measurementId, timestamp);
--      }
--    } catch (Exception e) {
--      // roll back
--      for (ModificationFile modFile : updatedModFiles) {
--        modFile.abort();
--      }
--      throw new IOException(e);
--    } finally {
--      mergeDeleteLock.unlock();
--    }
--  }
--
--  private void deleteBufferWriteFiles(String deviceId, Deletion deletion,
--      List<ModificationFile> updatedModFiles) throws IOException {
--    BufferWriteProcessor bufferWriteProcessor = getBufferWriteProcessor();
--    TsFileResource resource = null;
--    if (bufferWriteProcessor != null) {
--      //bufferWriteProcessor == null means the bufferWriteProcessor is closed now.
--      resource = bufferWriteProcessor.getCurrentTsFileResource();
--      if (resource != null && resource.containsDevice(deviceId)) {
--        resource.getModFile().write(deletion);
--        updatedModFiles.add(resource.getModFile());
--      }
--    }
--
--    for (TsFileResource fileNode : newFileNodes) {
--      if (fileNode != resource && fileNode.containsDevice(deviceId)
--          && fileNode.getStartTime(deviceId) <= deletion.getTimestamp()) {
--        fileNode.getModFile().write(deletion);
--        updatedModFiles.add(fileNode.getModFile());
--      }
--    }
--  }
--
--  /**
--   * Similar to delete(), but only deletes data in BufferWrite. Only used by WAL recovery.
--   */
--  public void deleteBufferWrite(String deviceId, String measurementId, long timestamp)
--      throws IOException, BufferWriteProcessorException {
--    String fullPath = deviceId +
--        IoTDBConstant.PATH_SEPARATOR + measurementId;
--    long version = versionController.nextVersion();
--    Deletion deletion = new Deletion(fullPath, version, timestamp);
--
--    List<ModificationFile> updatedModFiles = new ArrayList<>();
--    try {
--      deleteBufferWriteFiles(deviceId, deletion, updatedModFiles);
--    } catch (IOException e) {
--      for (ModificationFile modificationFile : updatedModFiles) {
--        modificationFile.abort();
--      }
--      throw e;
--    }
--    if (bufferWriteProcessor != null) {
--      try {
--        bufferWriteProcessor.delete(deviceId, measurementId, timestamp);
--      } catch (BufferWriteProcessorException e) {
--        throw new IOException(e);
--      }
--    }
--  }
--
--  /**
--   * Similar to delete(), but only deletes data in Overflow. Only used by WAL recovery.
--   */
--  public void deleteOverflow(String deviceId, String measurementId, long timestamp)
--      throws ProcessorException {
--    long version = versionController.nextVersion();
--
--    OverflowProcessor overflowProcessor = getOverflowProcessor(getProcessorName());
--    List<ModificationFile> updatedModFiles = new ArrayList<>();
--    try {
--      overflowProcessor.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
--    } catch (IOException e) {
--      for (ModificationFile modificationFile : updatedModFiles) {
--        try {
--          modificationFile.abort();
--        } catch (IOException e1) {
--          throw new ProcessorException(e);
--        }
--      }
--      throw new ProcessorException(e);
--    }
--  }
--
--  public CopyOnReadLinkedList<BufferWriteProcessor> getClosingBufferWriteProcessor() {
--    for (BufferWriteProcessor processor: closingBufferWriteProcessor.cloneList()) {
--      if (processor.isClosed()) {
--        closingBufferWriteProcessor.remove(processor);
--      }
--    }
--    closingBufferWriteProcessor.reset();
--    return closingBufferWriteProcessor;
--  }
--
--  @Override
--  public boolean equals(Object o) {
--    if (this == o) {
--      return true;
--    }
--    if (o == null || getClass() != o.getClass()) {
--      return false;
--    }
--    if (!super.equals(o)) {
--      return false;
--    }
--    FileNodeProcessor that = (FileNodeProcessor) o;
--    return isOverflowed == that.isOverflowed &&
--        numOfMergeFile == that.numOfMergeFile &&
--        lastMergeTime == that.lastMergeTime &&
--        multiPassLockToken == that.multiPassLockToken &&
--        Objects.equals(statStorageDeltaName, that.statStorageDeltaName) &&
--        Objects.equals(statParamsHashMap, that.statParamsHashMap) &&
--        Objects.equals(lastUpdateTimeMap, that.lastUpdateTimeMap) &&
--        Objects.equals(flushLastUpdateTimeMap, that.flushLastUpdateTimeMap) &&
--        Objects.equals(invertedIndexOfFiles, that.invertedIndexOfFiles) &&
--        Objects.equals(emptyTsFileResource, that.emptyTsFileResource) &&
--        Objects.equals(newFileNodes, that.newFileNodes) &&
--        isMerging == that.isMerging &&
--        Objects.equals(fileNodeProcessorStore, that.fileNodeProcessorStore) &&
--        Objects.equals(fileNodeRestoreFilePath, that.fileNodeRestoreFilePath) &&
--        Objects.equals(bufferWriteProcessor, that.bufferWriteProcessor) &&
--        Objects.equals(overflowProcessor, that.overflowProcessor) &&
--        Objects.equals(oldMultiPassTokenSet, that.oldMultiPassTokenSet) &&
--        Objects.equals(newMultiPassTokenSet, that.newMultiPassTokenSet) &&
--        Objects.equals(oldMultiPassCount, that.oldMultiPassCount) &&
--        Objects.equals(newMultiPassCount, that.newMultiPassCount) &&
--        Objects.equals(parameters, that.parameters) &&
--        Objects.equals(fileSchema, that.fileSchema) &&
--        Objects.equals(fileNodeFlushAction, that.fileNodeFlushAction) &&
--        Objects.equals(bufferwriteFlushAction, that.bufferwriteFlushAction) &&
--        Objects.equals(overflowFlushAction, that.overflowFlushAction);
--  }
--
--  @Override
--  public int hashCode() {
--    return processorName.hashCode();
--  }
--
--  public class MergeRunnale implements Runnable {
--
--    @Override
--    public void run() {
--      try {
--        ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
--        long mergeStartTime = System.currentTimeMillis();
--        merge();
--        long mergeEndTime = System.currentTimeMillis();
--        long intervalTime = mergeEndTime - mergeStartTime;
--        LOGGER.info(
--            "The filenode processor {} merge start time is {}, "
--                + "merge end time is {}, merge consumes {}ms.",
--            getProcessorName(), ofInstant(Instant.ofEpochMilli(mergeStartTime),
--                zoneId), ofInstant(Instant.ofEpochMilli(mergeEndTime),
--                zoneId), intervalTime);
--      } catch (FileNodeProcessorException e) {
--        LOGGER.error("The filenode processor {} encountered an error when merging.",
--            getProcessorName(), e);
--        throw new ErrorDebugException(e);
--      }
--    }
--  }
--
--  /**
--   * wait for all closing processors finishing their tasks
--   */
--  public void waitforAllClosed() throws FileNodeProcessorException {
--    close();
--    while (getClosingBufferWriteProcessor().size() != 0) {
--      checkAllClosingProcessors();
--      try {
--        Thread.sleep(10);
--      } catch (InterruptedException e) {
--        LOGGER.error("Filenode Processor {} is interrupted when waiting for all closed.", processorName, e);
--      }
--    }
--  }
--
--
--  void checkAllClosingProcessors() {
--    Iterator<BufferWriteProcessor> iterator =
--        this.getClosingBufferWriteProcessor().iterator();
--    while (iterator.hasNext()) {
--      BufferWriteProcessor processor = iterator.next();
--      try {
--        if (processor.getCloseFuture().get(10, TimeUnit.MILLISECONDS)) {
--          //if finished, we can remove it.
--          iterator.remove();
--        }
--      } catch (InterruptedException | ExecutionException e) {
--        LOGGER.error("Close bufferwrite processor {} failed.", processor.getProcessorName(), e);
--      } catch (TimeoutException e) {
--        //do nothing.
--      }
--    }
--    this.getClosingBufferWriteProcessor().reset();
--  }
--}
++//  // Token for query which used to
++//  private int multiPassLockToken = 0;
++//  private VersionController versionController;
++//  private ReentrantLock mergeDeleteLock = new ReentrantLock();
++//
++//  /**
++//   * This is the modification file of the result of the current merge.
++//   */
++//  private ModificationFile mergingModification;
++//
++//  private TsFileIOWriter mergeFileWriter = null;
++//  private String mergeOutputPath = null;
++//  private String mergeBaseDir = null;
++//  private String mergeFileName = null;
++//  private boolean mergeIsChunkGroupHasData = false;
++//  private long mergeStartPos;
++//
++//  /**
++//   * constructor of FileNodeProcessor.
++//   */
++//  FileNodeProcessor(String fileNodeDirPath, String processorName)
++//      throws FileNodeProcessorException {
++//    super(processorName);
++//    for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
++//        MonitorConstants.FileNodeProcessorStatConstants.values()) {
++//      statParamsHashMap.put(statConstant.name(), new AtomicLong(0));
++//    }
++//    statStorageDeltaName =
++//        MonitorConstants.STAT_STORAGE_GROUP_PREFIX + MonitorConstants.MONITOR_PATH_SEPARATOR
++//            + MonitorConstants.FILE_NODE_PATH + MonitorConstants.MONITOR_PATH_SEPARATOR
++//            + processorName.replaceAll("\\.", "_");
++//
++//    this.parameters = new HashMap<>();
++//    String dirPath = fileNodeDirPath;
++//    if (dirPath.length() > 0
++//        && dirPath.charAt(dirPath.length() - 1) != File.separatorChar) {
++//      dirPath = dirPath + File.separatorChar;
++//    }
++//
++//    File restoreFolder = new File(dirPath + processorName);
++//    if (!restoreFolder.exists()) {
++//      restoreFolder.mkdirs();
++//      LOGGER.info(
++//          "The restore directory of the filenode processor {} doesn't exist. Create new " +
++//              "directory {}",
++//          getProcessorName(), restoreFolder.getAbsolutePath());
++//    }
++//    fileNodeRestoreFilePath = new File(restoreFolder, processorName + RESTORE_FILE_SUFFIX)
++//        .getPath();
++//    try {
++//      fileNodeProcessorStore = readStoreFromDisk();
++//    } catch (FileNodeProcessorException e) {
++//      LOGGER.error(
++//          "The fileNode processor {} encountered an error when recoverying restore " +
++//              "information.", processorName);
++//      throw new FileNodeProcessorException(e);
++//    }
++//    // TODO deep clone the lastupdate time
++//    emptyTsFileResource = fileNodeProcessorStore.getEmptyTsFileResource();
++//    newFileNodes = fileNodeProcessorStore.getNewFileNodes();
++//    isMerging = fileNodeProcessorStore.getFileNodeProcessorStatus();
++//    numOfMergeFile = fileNodeProcessorStore.getNumOfMergeFile();
++//    invertedIndexOfFiles = new HashMap<>();
++//
++//    // construct the fileschema
++//    try {
++//      this.fileSchema = constructFileSchema(processorName);
++//    } catch (WriteProcessException e) {
++//      throw new FileNodeProcessorException(e);
++//    }
++//
++//    recover();
++//
++//    // RegistStatService
++//    if (TsFileDBConf.isEnableStatMonitor()) {
++//      StatMonitor statMonitor = StatMonitor.getInstance();
++//      registerStatMetadata();
++//      statMonitor.registerStatistics(statStorageDeltaName, this);
++//    }
++//    try {
++//      versionController = new SimpleFileVersionController(restoreFolder.getPath());
++//    } catch (IOException e) {
++//      throw new FileNodeProcessorException(e);
++//    }
++//  }
++//
++//  @Override
++//  public Map<String, AtomicLong> getStatParamsHashMap() {
++//    return statParamsHashMap;
++//  }
++//
++//  @Override
++//  public void registerStatMetadata() {
++//    Map<String, String> hashMap = new HashMap<>();
++//    for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
++//        MonitorConstants.FileNodeProcessorStatConstants.values()) {
++//      hashMap
++//          .put(statStorageDeltaName + MonitorConstants.MONITOR_PATH_SEPARATOR + statConstant.name(),
++//              MonitorConstants.DATA_TYPE_INT64);
++//    }
++//    StatMonitor.getInstance().registerStatStorageGroup(hashMap);
++//  }
++//
++//  @Override
++//  public List<String> getAllPathForStatistic() {
++//    List<String> list = new ArrayList<>();
++//    for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
++//        MonitorConstants.FileNodeProcessorStatConstants.values()) {
++//      list.add(
++//          statStorageDeltaName + MonitorConstants.MONITOR_PATH_SEPARATOR + statConstant.name());
++//    }
++//    return list;
++//  }
++//
++//  @Override
++//  public Map<String, TSRecord> getAllStatisticsValue() {
++//    Long curTime = System.currentTimeMillis();
++//    HashMap<String, TSRecord> tsRecordHashMap = new HashMap<>();
++//    TSRecord tsRecord = new TSRecord(curTime, statStorageDeltaName);
++//
++//    Map<String, AtomicLong> hashMap = getStatParamsHashMap();
++//    tsRecord.dataPointList = new ArrayList<>();
++//    for (Map.Entry<String, AtomicLong> entry : hashMap.entrySet()) {
++//      tsRecord.dataPointList.add(new LongDataPoint(entry.getKey(), entry.getValue().get()));
++//    }
++//
++//    tsRecordHashMap.put(statStorageDeltaName, tsRecord);
++//    return tsRecordHashMap;
++//  }
++//
++//  /**
++//   * add interval FileNode.
++//   */
++//  void addIntervalFileNode(TsFileResource tsFileResource) throws ActionException {
++//    newFileNodes.add(tsFileResource);
++//    fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++//    fileNodeFlushAction.act();
++//  }
++//
++//  /**
++//   * set interval filenode start time.
++//   *
++//   * @param deviceId device ID
++//   */
++//  void setIntervalFileNodeStartTime(String deviceId) {
++//    if (getBufferWriteProcessor().getCurrentTsFileResource().getStartTime(deviceId) == -1) {
++//      getBufferWriteProcessor().getCurrentTsFileResource().setStartTime(deviceId,
++//          flushLastUpdateTimeMap.get(deviceId));
++//      if (!invertedIndexOfFiles.containsKey(deviceId)) {
++//        invertedIndexOfFiles.put(deviceId, new ArrayList<>());
++//      }
++//      invertedIndexOfFiles.get(deviceId).add(getBufferWriteProcessor().getCurrentTsFileResource());
++//    }
++//  }
++//
++//  void setIntervalFileNodeStartTime(String deviceId, long time) {
++//    if (time != -1) {
++//      getBufferWriteProcessor().getCurrentTsFileResource().setStartTime(deviceId, time);
++//    } else {
++//      getBufferWriteProcessor().getCurrentTsFileResource().removeTime(deviceId);
++//      invertedIndexOfFiles.get(deviceId).remove(getBufferWriteProcessor().getCurrentTsFileResource());
++//    }
++//  }
++//
++//  long getIntervalFileNodeStartTime(String deviceId) {
++//    return getBufferWriteProcessor().getCurrentTsFileResource().getStartTime(deviceId);
++//  }
++//
++//  private void addAllFileIntoIndex(List<TsFileResource> fileList) {
++//    // clear map
++//    invertedIndexOfFiles.clear();
++//    // add all file to index
++//    for (TsFileResource fileNode : fileList) {
++//      if (fileNode.getStartTimeMap().isEmpty()) {
++//        continue;
++//      }
++//      for (String deviceId : fileNode.getStartTimeMap().keySet()) {
++//        if (!invertedIndexOfFiles.containsKey(deviceId)) {
++//          invertedIndexOfFiles.put(deviceId, new ArrayList<>());
++//        }
++//        invertedIndexOfFiles.get(deviceId).add(fileNode);
++//      }
++//    }
++//  }
++//
++//  public boolean isOverflowed() {
++//    return isOverflowed;
++//  }
++//
++//  /**
++//   * if overflow insert, update and delete insert into this filenode processor, set
++//   * <code>isOverflowed</code> to true.
++//   */
++//  public void setOverflowed(boolean isOverflowed) {
++//    if (this.isOverflowed != isOverflowed) {
++//      this.isOverflowed = isOverflowed;
++//    }
++//  }
++//
++//  public FileNodeProcessorStatus getFileNodeProcessorStatus() {
++//    return isMerging;
++//  }
++//
++//  /**
++//   * execute filenode recovery.
++//   */
++//  public void recover() throws FileNodeProcessorException {
++//    // restore sequential files
++//    parameters.put(FileNodeConstants.BUFFERWRITE_FLUSH_ACTION, bufferwriteFlushAction);
++//    //parameters.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
++//    parameters
++//        .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
++//    parameters.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowFlushAction);
++//    parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
++//
++//    for (int i = 0; i < newFileNodes.size(); i++) {
++//      TsFileResource tsFile = newFileNodes.get(i);
++////      try {
++////        String filePath = tsFile.getFilePath();
++////        String logNodePrefix = BufferWriteProcessor.logNodePrefix(processorName);
++////        SeqTsFileRecoverPerformer recoverPerformer =
++////            new SeqTsFileRecoverPerformer(logNodePrefix,
++////                fileSchema, versionController, tsFile);
++////        recoverPerformer.recover();
++////      } catch (ProcessorException e) {
++////        LOGGER.error(
++////            "The filenode processor {} failed to recover the bufferwrite processor, "
++////                + "the last bufferwrite file is {}.",
++////            getProcessorName(), tsFile.getFile().getName());
++////        throw new FileNodeProcessorException(e);
++////      }
++//    }
++//    recoverUpdateTimeMap();
++//
++//    // restore the overflow processor
++//    LOGGER.info("The filenode processor {} will recover the overflow processor.",
++//        getProcessorName());
++//
++//    try {
++//      overflowProcessor = new OverflowProcessor(getProcessorName(), parameters, fileSchema,
++//          versionController);
++//    } catch (ProcessorException e) {
++//      LOGGER.error("The filenode processor {} failed to recovery the overflow processor.",
++//          getProcessorName());
++//      throw new FileNodeProcessorException(e);
++//    }
++//
++//    if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
++//      // re-merge all file
++//      // if bufferwrite processor is not null, and setCloseMark
++//      LOGGER.info("The filenode processor {} is recovering, the filenode status is {}.",
++//          getProcessorName(), isMerging);
++//      merge();
++//    } else if (isMerging == FileNodeProcessorStatus.WAITING) {
++//      LOGGER.info("The filenode processor {} is recovering, the filenode status is {}.",
++//          getProcessorName(), isMerging);
++//      switchWaitingToWorking();
++//    }
++//    // add file into index of file
++//    addAllFileIntoIndex(newFileNodes);
++//  }
++//
++//  private void recoverUpdateTimeMap() {
++//    lastUpdateTimeMap = new HashMap<>();
++//    flushLastUpdateTimeMap = new HashMap<>();
++//    for (TsFileResource tsFileResource : newFileNodes) {
++//      Map<String, Long> endTimeMap =  tsFileResource.getEndTimeMap();
++//      endTimeMap.forEach((key, value) -> {
++//        Long lastTime = lastUpdateTimeMap.get(key);
++//        if (lastTime == null || lastTime < value) {
++//          lastUpdateTimeMap.put(key, value);
++//          flushLastUpdateTimeMap.put(key, value);
++//        }
++//      });
++//    }
++//  }
++//
++//  //when calling this method, the bufferWriteProcessor must not be null
++//  private BufferWriteProcessor getBufferWriteProcessor() {
++//    return bufferWriteProcessor;
++//  }
++//
++//  /**
++//   * get buffer insert processor by processor name and insert time.
++//   */
++//  public BufferWriteProcessor getBufferWriteProcessor(String processorName, long insertTime)
++//      throws FileNodeProcessorException {
++//    if (bufferWriteProcessor == null) {
++//      Map<String, Action> params = new HashMap<>();
++//      params.put(FileNodeConstants.BUFFERWRITE_FLUSH_ACTION, bufferwriteFlushAction);
++//      //params.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
++//      params
++//          .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
++//      String baseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
++//      LOGGER.info("Allocate folder {} for the new bufferwrite processor.", baseDir);
++//      // construct processor or restore
++//      try {
++//        bufferWriteProcessor = new BufferWriteProcessor(baseDir, processorName,
++//            insertTime + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR
++//                + System.currentTimeMillis(),
++//            params, bufferwriteCloseConsumer, versionController, fileSchema);
++//      } catch (BufferWriteProcessorException e) {
++//        throw new FileNodeProcessorException(String
++//            .format("The filenode processor %s failed to get the bufferwrite processor.",
++//                processorName), e);
++//      }
++//    }
++//    return bufferWriteProcessor;
++//  }
++//
++//  /**
++//   * get overflow processor by processor name.
++//   */
++//  public OverflowProcessor getOverflowProcessor(String processorName) throws ProcessorException {
++//    if (overflowProcessor == null) {
++//      Map<String, Action> params = new HashMap<>();
++//      // construct processor or restore
++//      params.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowFlushAction);
++//      params
++//          .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
++//      overflowProcessor = new OverflowProcessor(processorName, params, fileSchema,
++//          versionController);
++//    } else if (overflowProcessor.isClosed()) {
++//      overflowProcessor.reopen();
++//    }
++//    return overflowProcessor;
++//  }
++//
++//  /**
++//   * get overflow processor.
++//   */
++//  public OverflowProcessor getOverflowProcessor() {
++//    if (overflowProcessor == null || overflowProcessor.isClosed()) {
++//      LOGGER.error("The overflow processor is null when getting the overflowProcessor");
++//    }
++//    return overflowProcessor;
++//  }
++//
++//  public boolean hasOverflowProcessor() {
++//    return overflowProcessor != null && !overflowProcessor.isClosed();
++//  }
++//
++//  public void setBufferwriteProcessroToClosed() {
++//
++//    bufferWriteProcessor = null;
++//  }
++//
++//  public boolean hasBufferwriteProcessor() {
++//
++//    return bufferWriteProcessor != null;
++//  }
++//
++//  /**
++//   * set last update time.
++//   */
++//  public void setLastUpdateTime(String deviceId, long timestamp) {
++//    if (!lastUpdateTimeMap.containsKey(deviceId) || lastUpdateTimeMap.get(deviceId) < timestamp) {
++//      lastUpdateTimeMap.put(deviceId, timestamp);
++//    }
++//    if (timestamp == -1) {
++//      lastUpdateTimeMap.remove(deviceId);
++//    }
++//  }
++//
++//  /**
++//   * get last update time.
++//   */
++//  public long getLastUpdateTime(String deviceId) {
++//
++//    if (lastUpdateTimeMap.containsKey(deviceId)) {
++//      return lastUpdateTimeMap.get(deviceId);
++//    } else {
++//      return -1;
++//    }
++//  }
++//
++//  /**
++//   * get flushMetadata last update time.
++//   */
++//  public long getFlushLastUpdateTime(String deviceId) {
++//    if (!flushLastUpdateTimeMap.containsKey(deviceId)) {
++//      flushLastUpdateTimeMap.put(deviceId, 0L);
++//    }
++//    return flushLastUpdateTimeMap.get(deviceId);
++//  }
++//
++//  public Map<String, Long> getLastUpdateTimeMap() {
++//    return lastUpdateTimeMap;
++//  }
++//
++//  /**
++//   * For insert overflow.
++//   */
++//  public void changeTypeToChanged(String deviceId, long timestamp) {
++//    if (!invertedIndexOfFiles.containsKey(deviceId)) {
++//      LOGGER.warn(
++//          WARN_NO_SUCH_OVERFLOWED_FILE
++//              + "the data is [device:{},time:{}]",
++//          getProcessorName(), deviceId, timestamp);
++//      emptyTsFileResource.setStartTime(deviceId, 0L);
++//      emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
++//      emptyTsFileResource.changeTypeToChanged(isMerging);
++//    } else {
++//      List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
++//      int index = searchIndexNodeByTimestamp(deviceId, timestamp, temp);
++//      changeTypeToChanged(temp.get(index), deviceId);
++//    }
++//  }
++//
++//  private void changeTypeToChanged(TsFileResource fileNode, String deviceId) {
++//    fileNode.changeTypeToChanged(isMerging);
++//    if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
++//      fileNode.addMergeChanged(deviceId);
++//    }
++//  }
++//
++//  /**
++//   * For update overflow.
++//   */
++//  public void changeTypeToChanged(String deviceId, long startTime, long endTime) {
++//    if (!invertedIndexOfFiles.containsKey(deviceId)) {
++//      LOGGER.warn(
++//          WARN_NO_SUCH_OVERFLOWED_FILE
++//              + "the data is [device:{}, start time:{}, end time:{}]",
++//          getProcessorName(), deviceId, startTime, endTime);
++//      emptyTsFileResource.setStartTime(deviceId, 0L);
++//      emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
++//      emptyTsFileResource.changeTypeToChanged(isMerging);
++//    } else {
++//      List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
++//      int left = searchIndexNodeByTimestamp(deviceId, startTime, temp);
++//      int right = searchIndexNodeByTimestamp(deviceId, endTime, temp);
++//      for (int i = left; i <= right; i++) {
++//        changeTypeToChanged(temp.get(i), deviceId);
++//      }
++//    }
++//  }
++//
++//  /**
++//   * For delete overflow.
++//   */
++//  public void changeTypeToChangedForDelete(String deviceId, long timestamp) {
++//    if (!invertedIndexOfFiles.containsKey(deviceId)) {
++//      LOGGER.warn(
++//          WARN_NO_SUCH_OVERFLOWED_FILE
++//              + "the data is [device:{}, delete time:{}]",
++//          getProcessorName(), deviceId, timestamp);
++//      emptyTsFileResource.setStartTime(deviceId, 0L);
++//      emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
++//      emptyTsFileResource.changeTypeToChanged(isMerging);
++//    } else {
++//      List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
++//      int index = searchIndexNodeByTimestamp(deviceId, timestamp, temp);
++//      for (int i = 0; i <= index; i++) {
++//        temp.get(i).changeTypeToChanged(isMerging);
++//        if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
++//          temp.get(i).addMergeChanged(deviceId);
++//        }
++//      }
++//    }
++//  }
++//
++//  /**
++//   * Search the index of the interval by the timestamp.
++//   *
++//   * @return index of interval
++//   */
++//  private int searchIndexNodeByTimestamp(String deviceId, long timestamp,
++//      List<TsFileResource> fileList) {
++//    int index = 1;
++//    while (index < fileList.size()) {
++//      if (timestamp < fileList.get(index).getStartTime(deviceId)) {
++//        break;
++//      } else {
++//        index++;
++//      }
++//    }
++//    return index - 1;
++//  }
++//
++//  /**
++//   * add multiple pass lock.
++//   */
++//  public int addMultiPassCount() {
++//    LOGGER.debug("Add MultiPassCount: cloneList lock newMultiPassCount.");
++//    newMultiPassCount.incrementAndGet();
++//    while (newMultiPassTokenSet.contains(multiPassLockToken)) {
++//      multiPassLockToken++;
++//    }
++//    newMultiPassTokenSet.add(multiPassLockToken);
++//    LOGGER.debug("Add multi token:{}, nsPath:{}.", multiPassLockToken, getProcessorName());
++//    return multiPassLockToken;
++//  }
++//
++//  /**
++//   * decrease multiple pass count. TODO: use the return value or remove it.
++//   */
++//  public boolean decreaseMultiPassCount(int token) throws FileNodeProcessorException {
++//    if (newMultiPassTokenSet.contains(token)) {
++//      int newMultiPassCountValue = newMultiPassCount.decrementAndGet();
++//      if (newMultiPassCountValue < 0) {
++//        throw new FileNodeProcessorException(String
++//            .format("Remove MultiPassCount error, newMultiPassCount:%d", newMultiPassCountValue));
++//      }
++//      newMultiPassTokenSet.remove(token);
++//      LOGGER.debug("Remove multi token:{}, nspath:{}, new set:{}, count:{}", token,
++//          getProcessorName(),
++//          newMultiPassTokenSet, newMultiPassCount);
++//      return true;
++//    } else if (oldMultiPassTokenSet != null && oldMultiPassTokenSet.contains(token)) {
++//      // remove token first, then unlock
++//      oldMultiPassTokenSet.remove(token);
++//      oldMultiPassCount.countDown();
++//      long oldMultiPassCountValue = oldMultiPassCount.getCount();
++//      if (oldMultiPassCountValue < 0) {
++//        throw new FileNodeProcessorException(String
++//            .format("Remove MultiPassCount error, oldMultiPassCount:%d", oldMultiPassCountValue));
++//      }
++//      LOGGER.debug("Remove multi token:{}, old set:{}, count:{}", token, oldMultiPassTokenSet,
++//          oldMultiPassCount.getCount());
++//      return true;
++//    } else {
++//      LOGGER.error("remove token error:{},new set:{}, old set:{}", token, newMultiPassTokenSet,
++//          oldMultiPassTokenSet);
++//      // should add throw exception
++//      return false;
++//    }
++//  }
++//
++//  /**
++//   * query data.
++//   */
++//  public <T extends Comparable<T>> QueryDataSource query(String deviceId, String measurementId,
++//      QueryContext context) throws FileNodeProcessorException {
++//    // query overflow data
++//    MeasurementSchema mSchema;
++//    TSDataType dataType;
++//
++//    //mSchema = mManager.getSchemaForOnePath(deviceId + "." + measurementId);
++//    mSchema = fileSchema.getMeasurementSchema(measurementId);
++//    dataType = mSchema.getType();
++//
++//    OverflowSeriesDataSource overflowSeriesDataSource;
++//    try {
++//      overflowSeriesDataSource = overflowProcessor.query(deviceId, measurementId, dataType,
++//          mSchema.getProps(), context);
++//    } catch (IOException e) {
++//      throw new FileNodeProcessorException(e);
++//    }
++//    // tsfile dataØØ
++//    List<TsFileResource> bufferwriteDataInFiles = new ArrayList<>();
++//    for (TsFileResource tsFileResource : newFileNodes) {
++//      // add the same tsFileResource, but not the same reference
++//      if (tsFileResource.isClosed()) {
++//        bufferwriteDataInFiles.add(tsFileResource.backUp());
++//      }
++//    }
++//    Pair<ReadOnlyMemChunk, List<ChunkMetaData>> bufferwritedata = new Pair<>(null, null);
++//    // bufferwrite data
++//    UnsealedTsFile unsealedTsFile = null;
++//
++//    if (!newFileNodes.isEmpty() && !newFileNodes.get(newFileNodes.size() - 1).isClosed()
++//        && !newFileNodes.get(newFileNodes.size() - 1).getStartTimeMap().isEmpty()) {
++//      unsealedTsFile = new UnsealedTsFile();
++//      unsealedTsFile.setFilePath(newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath());
++//      if (bufferWriteProcessor == null) {
++//        throw new FileNodeProcessorException(String.format(
++//            "The last of tsfile %s in filenode processor %s is not closed, "
++//                + "but the bufferwrite processor is null.",
++//            newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath(), getProcessorName()));
++//      }
++//      bufferwritedata = bufferWriteProcessor
++//          .queryBufferWriteData(deviceId, measurementId, dataType, mSchema.getProps());
++//
++//      try {
++//        List<Modification> pathModifications = context.getPathModifications(
++//            bufferWriteProcessor.getCurrentTsFileResource().getModFile(), deviceId
++//                + IoTDBConstant.PATH_SEPARATOR + measurementId
++//        );
++//        if (!pathModifications.isEmpty()) {
++//          QueryUtils.modifyChunkMetaData(bufferwritedata.right, pathModifications);
++//        }
++//      } catch (IOException e) {
++//        throw new FileNodeProcessorException(e);
++//      }
++//
++//      unsealedTsFile.setTimeSeriesChunkMetaDatas(bufferwritedata.right);
++//    }
++//    GlobalSortedSeriesDataSource globalSortedSeriesDataSource = new GlobalSortedSeriesDataSource(
++//        new Path(deviceId + "." + measurementId), bufferwriteDataInFiles, unsealedTsFile,
++//        bufferwritedata.left);
++//    return new QueryDataSource(globalSortedSeriesDataSource, overflowSeriesDataSource);
++//
++//  }
++//
++//  /**
++//   * append one specified tsfile to this filenode processor.
++//   *
++//   * @param appendFile the appended tsfile information
++//   * @param appendFilePath the seriesPath of appended file
++//   */
++//  public void appendFile(TsFileResource appendFile, String appendFilePath)
++//      throws FileNodeProcessorException {
++//    try {
++//      if (!appendFile.getFile().getParentFile().exists()) {
++//        appendFile.getFile().getParentFile().mkdirs();
++//      }
++//      // move file
++//      File originFile = new File(appendFilePath);
++//      File targetFile = appendFile.getFile();
++//      if (!originFile.exists()) {
++//        throw new FileNodeProcessorException(
++//            String.format("The appended file %s does not exist.", appendFilePath));
++//      }
++//      if (targetFile.exists()) {
++//        throw new FileNodeProcessorException(
++//            String.format("The appended target file %s already exists.",
++//                appendFile.getFile().getAbsolutePath()));
++//      }
++//      if (!originFile.renameTo(targetFile)) {
++//        LOGGER.warn("File renaming failed when appending new file. Origin: {}, Target: {}",
++//            originFile.getPath(), targetFile.getPath());
++//      }
++//      // append the new tsfile
++//      this.newFileNodes.add(appendFile);
++//      // update the lastUpdateTime
++//      for (Entry<String, Long> entry : appendFile.getEndTimeMap().entrySet()) {
++//        lastUpdateTimeMap.put(entry.getKey(), entry.getValue());
++//      }
++//      bufferwriteFlushAction.act();
++//      fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++//      // reconstruct the inverted index of the newFileNodes
++//      fileNodeFlushAction.act();
++//      addAllFileIntoIndex(newFileNodes);
++//    } catch (Exception e) {
++//      LOGGER.error("Failed to append the tsfile {} to filenode processor {}.", appendFile,
++//          getProcessorName());
++//      throw new FileNodeProcessorException(e);
++//    }
++//  }
++//
++//  /**
++//   * get overlap tsfiles which are conflict with the appendFile.
++//   *
++//   * @param appendFile the appended tsfile information
++//   */
++//  public List<String> getOverlapFiles(TsFileResource appendFile, String uuid)
++//      throws FileNodeProcessorException {
++//    List<String> overlapFiles = new ArrayList<>();
++//    try {
++//      for (TsFileResource tsFileResource : newFileNodes) {
++//        getOverlapFiles(appendFile, tsFileResource, uuid, overlapFiles);
++//      }
++//    } catch (IOException e) {
++//      LOGGER.error("Failed to get overlap tsfiles which conflict with the appendFile.");
++//      throw new FileNodeProcessorException(e);
++//    }
++//    return overlapFiles;
++//  }
++//
++//  private void getOverlapFiles(TsFileResource appendFile, TsFileResource tsFileResource,
++//      String uuid, List<String> overlapFiles) throws IOException {
++//    for (Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
++//      if (tsFileResource.getStartTimeMap().containsKey(entry.getKey()) &&
++//          tsFileResource.getEndTime(entry.getKey()) >= entry.getValue()
++//          && tsFileResource.getStartTime(entry.getKey()) <= appendFile
++//          .getEndTime(entry.getKey())) {
++//        String relativeFilePath =
++//            Constans.SYNC_SERVER + File.separatorChar + uuid + File.separatorChar
++//                + Constans.BACK_UP_DIRECTORY_NAME
++//                + File.separatorChar + tsFileResource.getRelativePath();
++//        File newFile = new File(
++//            DirectoryManager.getInstance().getTsFileFolder(tsFileResource.getBaseDirIndex()),
++//            relativeFilePath);
++//        if (!newFile.getParentFile().exists()) {
++//          newFile.getParentFile().mkdirs();
++//        }
++//        java.nio.file.Path link = FileSystems.getDefault().getPath(newFile.getPath());
++//        java.nio.file.Path target = FileSystems.getDefault()
++//            .getPath(tsFileResource.getFile().getAbsolutePath());
++//        Files.createLink(link, target);
++//        overlapFiles.add(newFile.getPath());
++//        break;
++//      }
++//    }
++//  }
++//
++//  /**
++//   * add time series.
++//   */
++//  public void addTimeSeries(String measurementId, TSDataType dataType, TSEncoding encoding,
++//      CompressionType compressor, Map<String, String> props) {
++//    fileSchema.registerMeasurement(new MeasurementSchema(measurementId, dataType, encoding,
++//        compressor, props));
++//  }
++//
++//  /**
++//   * submit the merge task to the <code>MergePool</code>.
++//   *
++//   * @return null -can't submit the merge task, because this filenode is not overflowed or it is
++//   * merging now. Future - submit the merge task successfully.
++//   */
++//  Future submitToMerge() {
++//    ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
++//    if (lastMergeTime > 0) {
++//      long thisMergeTime = System.currentTimeMillis();
++//      long mergeTimeInterval = thisMergeTime - lastMergeTime;
++//      ZonedDateTime lastDateTime = ofInstant(Instant.ofEpochMilli(lastMergeTime),
++//          zoneId);
++//      ZonedDateTime thisDateTime = ofInstant(Instant.ofEpochMilli(thisMergeTime),
++//          zoneId);
++//      LOGGER.info(
++//          "The filenode {} last merge time is {}, this merge time is {}, "
++//              + "merge time interval is {}s",
++//          getProcessorName(), lastDateTime, thisDateTime, mergeTimeInterval / 1000);
++//    }
++//    lastMergeTime = System.currentTimeMillis();
++//
++//    if (overflowProcessor != null && !overflowProcessor.isClosed()) {
++//      if (overflowProcessor.getFileSize() < IoTDBDescriptor.getInstance()
++//          .getConfig().getOverflowFileSizeThreshold()) {
++//        if (LOGGER.isInfoEnabled()) {
++//          LOGGER.info(
++//              "Skip this merge taks submission, because the size{} of overflow processor {} "
++//                  + "does not reaches the threshold {}.",
++//              MemUtils.bytesCntToStr(overflowProcessor.getFileSize()), getProcessorName(),
++//              MemUtils.bytesCntToStr(
++//                  IoTDBDescriptor.getInstance().getConfig().getOverflowFileSizeThreshold()));
++//        }
++//        return null;
++//      }
++//    } else {
++//      LOGGER.info(
++//          "Skip this merge taks submission, because the filenode processor {} "
++//              + "has no overflow processor.",
++//          getProcessorName());
++//      return null;
++//    }
++//    if (isOverflowed && isMerging == FileNodeProcessorStatus.NONE) {
++//      Runnable mergeThread;
++//      mergeThread = new MergeRunnale();
++//      LOGGER.info("Submit the merge task, the merge filenode is {}", getProcessorName());
++//      return MergePoolManager.getInstance().submit(mergeThread);
++//    } else {
++//      if (!isOverflowed) {
++//        LOGGER.info(
++//            "Skip this merge taks submission, because the filenode processor {} is not " +
++//                "overflowed.",
++//            getProcessorName());
++//      } else {
++//        LOGGER.warn(
++//            "Skip this merge task submission, because last merge task is not over yet, "
++//                + "the merge filenode processor is {}",
++//            getProcessorName());
++//      }
++//    }
++//    return null;
++//  }
++//
++//  /**
++//   * Prepare for merge, setCloseMark the bufferwrite and overflow.
++//   */
++//  private void prepareForMerge() {
++//    try {
++//      LOGGER.info("The filenode processor {} prepares for merge, closes the bufferwrite processor",
++//          getProcessorName());
++//      Future<Boolean> future = closeBufferWrite();
++//      future.get();
++//      LOGGER.info("The bufferwrite processor {} is closed successfully",
++//          getProcessorName());
++//      // try to get overflow processor
++//      getOverflowProcessor(getProcessorName());
++//      // must setCloseMark the overflow processor
++//      while (!getOverflowProcessor().canBeClosed()) {
++//        waitForClosing();
++//      }
++//      LOGGER.info("The filenode processor {} prepares for merge, closes the overflow processor",
++//          getProcessorName());
++//      getOverflowProcessor().close();
++//    } catch (ProcessorException | InterruptedException | ExecutionException e) {
++//      LOGGER.error("The filenode processor {} prepares for merge error.", getProcessorName());
++//      writeUnlock();
++//      throw new ErrorDebugException(e);
++//    }
++//  }
++//
++//  private void waitForClosing() {
++//    try {
++//      LOGGER.info(
++//          "The filenode processor {} prepares for merge, the overflow {} can't be closed, "
++//              + "wait 100ms,",
++//          getProcessorName(), getProcessorName());
++//      TimeUnit.MICROSECONDS.sleep(100);
++//    } catch (InterruptedException e) {
++//      Thread.currentThread().interrupt();
++//    }
++//  }
++//
++//  /**
++//   * Merge this storage group, merge the tsfile data with overflow data.
++//   */
++//  public void merge() throws FileNodeProcessorException {
++//    // setCloseMark bufferwrite and overflow, prepare for merge
++//    LOGGER.info("The filenode processor {} begins to merge.", getProcessorName());
++//    writeLock();
++//    prepareForMerge();
++//    // change status from overflowed to no overflowed
++//    isOverflowed = false;
++//    // change status from work to merge
++//    isMerging = FileNodeProcessorStatus.MERGING_WRITE;
++//    // check the empty file
++//    Map<String, Long> startTimeMap = emptyTsFileResource.getStartTimeMap();
++//    mergeCheckEmptyFile(startTimeMap);
++//
++//    for (TsFileResource tsFileResource : newFileNodes) {
++//      if (tsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
++//        tsFileResource.setOverflowChangeType(OverflowChangeType.CHANGED);
++//      }
++//    }
++//
++//    addAllFileIntoIndex(newFileNodes);
++//    synchronized (fileNodeProcessorStore) {
++//      fileNodeProcessorStore.setOverflowed(isOverflowed);
++//      fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
++//      fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++//      fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
++//      // flushMetadata this filenode information
++//      try {
++//        writeStoreToDisk(fileNodeProcessorStore);
++//      } catch (FileNodeProcessorException e) {
++//        LOGGER.error("The filenode processor {} writes restore information error when merging.",
++//            getProcessorName(), e);
++//        writeUnlock();
++//        throw new FileNodeProcessorException(e);
++//      }
++//    }
++//    // add numOfMergeFile to control the number of the merge file
++//    List<TsFileResource> backupIntervalFiles;
++//
++//    backupIntervalFiles = switchFileNodeToMerge();
++//    //
++//    // clear empty file
++//    //
++//    boolean needEmtpy = false;
++//    if (emptyTsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
++//      needEmtpy = true;
++//    }
++//    emptyTsFileResource.clear();
++//    // attention
++//    try {
++//      if (overflowProcessor.isClosed()) {
++//        overflowProcessor.reopen();
++//      }
++//      overflowProcessor.switchWorkToMerge();
++//    } catch (ProcessorException | IOException e) {
++//      LOGGER.error("The filenode processor {} can't switch overflow processor from work to merge.",
++//          getProcessorName(), e);
++//      writeUnlock();
++//      throw new FileNodeProcessorException(e);
++//    }
++//    LOGGER.info("The filenode processor {} switches from {} to {}.", getProcessorName(),
++//        FileNodeProcessorStatus.NONE, FileNodeProcessorStatus.MERGING_WRITE);
++//    writeUnlock();
++//
++//    // query tsfile data and overflow data, and merge them
++//    int numOfMergeFiles = 0;
++//    int allNeedMergeFiles = backupIntervalFiles.size();
++//    for (TsFileResource backupIntervalFile : backupIntervalFiles) {
++//      numOfMergeFiles++;
++//      if (backupIntervalFile.getOverflowChangeType() == OverflowChangeType.CHANGED) {
++//        // query data and merge
++//        String filePathBeforeMerge = backupIntervalFile.getRelativePath();
++//        try {
++//          LOGGER.info(
++//              "The filenode processor {} begins merging the {}/{} tsfile[{}] with "
++//                  + "overflow file, the process is {}%",
++//              getProcessorName(), numOfMergeFiles, allNeedMergeFiles, filePathBeforeMerge,
++//              (int) (((numOfMergeFiles - 1) / (float) allNeedMergeFiles) * 100));
++//          long startTime = System.currentTimeMillis();
++//          String newFile = queryAndWriteDataForMerge(backupIntervalFile);
++//          long endTime = System.currentTimeMillis();
++//          long timeConsume = endTime - startTime;
++//          ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
++//          LOGGER.info(
++//              "The fileNode processor {} has merged the {}/{} tsfile[{}->{}] over, "
++//                  + "start time of merge is {}, end time of merge is {}, "
++//                  + "time consumption is {}ms,"
++//                  + " the process is {}%",
++//              getProcessorName(), numOfMergeFiles, allNeedMergeFiles, filePathBeforeMerge,
++//              newFile, ofInstant(Instant.ofEpochMilli(startTime),
++//                  zoneId), ofInstant(Instant.ofEpochMilli(endTime), zoneId), timeConsume,
++//              numOfMergeFiles / (float) allNeedMergeFiles * 100);
++//        } catch (IOException | PathErrorException e) {
++//          LOGGER.error("Merge: query and insert data error.", e);
++//          throw new FileNodeProcessorException(e);
++//        }
++//      } else if (backupIntervalFile.getOverflowChangeType() == OverflowChangeType.MERGING_CHANGE) {
++//        LOGGER.error("The overflowChangeType of backupIntervalFile must not be {}",
++//            OverflowChangeType.MERGING_CHANGE);
++//        // handle this error, throw one runtime exception
++//        throw new FileNodeProcessorException(
++//            "The overflowChangeType of backupIntervalFile must not be "
++//                + OverflowChangeType.MERGING_CHANGE);
++//      } else {
++//        LOGGER.debug(
++//            "The filenode processor {} is merging, the interval file {} doesn't "
++//                + "need to be merged.",
++//            getProcessorName(), backupIntervalFile.getRelativePath());
++//      }
++//    }
++//
++//    // change status from merge to wait
++//    switchMergeToWaiting(backupIntervalFiles, needEmtpy);
++//
++//    // change status from wait to work
++//    switchWaitingToWorking();
++//  }
++//
++//  private void mergeCheckEmptyFile(Map<String, Long> startTimeMap) {
++//    if (emptyTsFileResource.getOverflowChangeType() == OverflowChangeType.NO_CHANGE) {
++//      return;
++//    }
++//    Iterator<Entry<String, Long>> iterator = emptyTsFileResource.getEndTimeMap().entrySet()
++//        .iterator();
++//    while (iterator.hasNext()) {
++//      Entry<String, Long> entry = iterator.next();
++//      String deviceId = entry.getKey();
++//      if (invertedIndexOfFiles.containsKey(deviceId)) {
++//        invertedIndexOfFiles.get(deviceId).get(0).setOverflowChangeType(OverflowChangeType.CHANGED);
++//        startTimeMap.remove(deviceId);
++//        iterator.remove();
++//      }
++//    }
++//    if (emptyTsFileResource.checkEmpty()) {
++//      emptyTsFileResource.clear();
++//    } else {
++//      if (!newFileNodes.isEmpty()) {
++//        TsFileResource first = newFileNodes.get(0);
++//        for (String deviceId : emptyTsFileResource.getStartTimeMap().keySet()) {
++//          first.setStartTime(deviceId, emptyTsFileResource.getStartTime(deviceId));
++//          first.setEndTime(deviceId, emptyTsFileResource.getEndTime(deviceId));
++//          first.setOverflowChangeType(OverflowChangeType.CHANGED);
++//        }
++//        emptyTsFileResource.clear();
++//      } else {
++//        emptyTsFileResource.setOverflowChangeType(OverflowChangeType.CHANGED);
++//      }
++//    }
++//  }
++//
++//  private List<TsFileResource> switchFileNodeToMerge() throws FileNodeProcessorException {
++//    List<TsFileResource> result = new ArrayList<>();
++//    if (emptyTsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
++//      // add empty
++//      result.add(emptyTsFileResource.backUp());
++//      if (!newFileNodes.isEmpty()) {
++//        throw new FileNodeProcessorException(
++//            String.format("The status of empty file is %s, but the new file list is not empty",
++//                emptyTsFileResource.getOverflowChangeType()));
++//      }
++//      return result;
++//    }
++//    if (newFileNodes.isEmpty()) {
++//      LOGGER.error("No file was changed when merging, the filenode is {}", getProcessorName());
++//      throw new FileNodeProcessorException(
++//          "No file was changed when merging, the filenode is " + getProcessorName());
++//    }
++//    for (TsFileResource tsFileResource : newFileNodes) {
++//      updateFileNode(tsFileResource, result);
++//    }
++//    return result;
++//  }
++//
++//  private void updateFileNode(TsFileResource tsFileResource, List<TsFileResource> result) {
++//    if (tsFileResource.getOverflowChangeType() == OverflowChangeType.NO_CHANGE) {
++//      result.add(tsFileResource.backUp());
++//    } else {
++//      Map<String, Long> startTimeMap = new HashMap<>();
++//      Map<String, Long> endTimeMap = new HashMap<>();
++//      for (String deviceId : tsFileResource.getEndTimeMap().keySet()) {
++//        List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
++//        int index = temp.indexOf(tsFileResource);
++//        int size = temp.size();
++//        // start time
++//        if (index == 0) {
++//          startTimeMap.put(deviceId, 0L);
++//        } else {
++//          startTimeMap.put(deviceId, tsFileResource.getStartTime(deviceId));
++//        }
++//        // end time
++//        if (index < size - 1) {
++//          endTimeMap.put(deviceId, temp.get(index + 1).getStartTime(deviceId) - 1);
++//        } else {
++//          endTimeMap.put(deviceId, tsFileResource.getEndTime(deviceId));
++//        }
++//      }
++//      TsFileResource node = new TsFileResource(startTimeMap, endTimeMap,
++//          tsFileResource.getOverflowChangeType(), tsFileResource.getFile());
++//      result.add(node);
++//    }
++//  }
++//
++//  private void switchMergeToWaiting(List<TsFileResource> backupIntervalFiles, boolean needEmpty)
++//      throws FileNodeProcessorException {
++//    LOGGER.info("The status of filenode processor {} switches from {} to {}.", getProcessorName(),
++//        FileNodeProcessorStatus.MERGING_WRITE, FileNodeProcessorStatus.WAITING);
++//    writeLock();
++//    try {
++//      oldMultiPassTokenSet = newMultiPassTokenSet;
++//      oldMultiPassCount = new CountDownLatch(newMultiPassCount.get());
++//      newMultiPassTokenSet = new HashSet<>();
++//      newMultiPassCount = new AtomicInteger(0);
++//      List<TsFileResource> result = new ArrayList<>();
++//      int beginIndex = 0;
++//      if (needEmpty) {
++//        TsFileResource empty = backupIntervalFiles.get(0);
++//        if (!empty.checkEmpty()) {
++//          updateEmpty(empty, result);
++//          beginIndex++;
++//        }
++//      }
++//      // reconstruct the file index
++//      addAllFileIntoIndex(backupIntervalFiles);
++//      // check the merge changed file
++//      for (int i = beginIndex; i < backupIntervalFiles.size(); i++) {
++//        TsFileResource newFile = newFileNodes.get(i - beginIndex);
++//        TsFileResource temp = backupIntervalFiles.get(i);
++//        if (newFile.getOverflowChangeType() == OverflowChangeType.MERGING_CHANGE) {
++//          updateMergeChanged(newFile, temp);
++//        }
++//        if (!temp.checkEmpty()) {
++//          result.add(temp);
++//        }
++//      }
++//      // add new file when merge
++//      for (int i = backupIntervalFiles.size() - beginIndex; i < newFileNodes.size(); i++) {
++//        TsFileResource fileNode = newFileNodes.get(i);
++//        if (fileNode.isClosed()) {
++//          result.add(fileNode.backUp());
++//        } else {
++//          result.add(fileNode);
++//        }
++//      }
++//
++//      isMerging = FileNodeProcessorStatus.WAITING;
++//      newFileNodes = result;
++//      // reconstruct the index
++//      addAllFileIntoIndex(newFileNodes);
++//      // clear merge changed
++//      for (TsFileResource fileNode : newFileNodes) {
++//        fileNode.clearMergeChanged();
++//      }
++//
++//      synchronized (fileNodeProcessorStore) {
++//        fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
++//        fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
++//        fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++//        try {
++//          writeStoreToDisk(fileNodeProcessorStore);
++//        } catch (FileNodeProcessorException e) {
++//          LOGGER.error(
++//              "Merge: failed to insert filenode information to revocery file, the filenode is " +
++//                  "{}.",
++//              getProcessorName(), e);
++//          throw new FileNodeProcessorException(
++//              "Merge: insert filenode information to revocery file failed, the filenode is "
++//                  + getProcessorName());
++//        }
++//      }
++//    } finally {
++//      writeUnlock();
++//    }
++//  }
++//
++//  private void updateEmpty(TsFileResource empty, List<TsFileResource> result) {
++//    for (String deviceId : empty.getStartTimeMap().keySet()) {
++//      if (invertedIndexOfFiles.containsKey(deviceId)) {
++//        TsFileResource temp = invertedIndexOfFiles.get(deviceId).get(0);
++//        if (temp.getMergeChanged().contains(deviceId)) {
++//          empty.setOverflowChangeType(OverflowChangeType.CHANGED);
++//          break;
++//        }
++//      }
++//    }
++//    empty.clearMergeChanged();
++//    result.add(empty.backUp());
++//  }
++//
++//  private void updateMergeChanged(TsFileResource newFile, TsFileResource temp) {
++//    for (String deviceId : newFile.getMergeChanged()) {
++//      if (temp.getStartTimeMap().containsKey(deviceId)) {
++//        temp.setOverflowChangeType(OverflowChangeType.CHANGED);
++//      } else {
++//        changeTypeToChanged(deviceId, newFile.getStartTime(deviceId),
++//            newFile.getEndTime(deviceId));
++//      }
++//    }
++//  }
++//
++//
++//  private void switchWaitingToWorking()
++//      throws FileNodeProcessorException {
++//
++//    LOGGER.info("The status of filenode processor {} switches from {} to {}.", getProcessorName(),
++//        FileNodeProcessorStatus.WAITING, FileNodeProcessorStatus.NONE);
++//
++//    if (oldMultiPassCount != null) {
++//      LOGGER.info("The old Multiple Pass Token set is {}, the old Multiple Pass Count is {}",
++//          oldMultiPassTokenSet,
++//          oldMultiPassCount);
++//      try {
++//        oldMultiPassCount.await();
++//      } catch (InterruptedException e) {
++//        LOGGER.info(
++//            "The filenode processor {} encountered an error when it waits for all old queries over.",
++//            getProcessorName());
++//        throw new FileNodeProcessorException(e);
++//      }
++//    }
++//
++//    try {
++//      writeLock();
++//      try {
++//        // delete the all files which are in the newFileNodes
++//        // notice: the last restore file of the interval file
++//
++//        List<String> bufferwriteDirPathList = DIRECTORY_MANAGER.getAllTsFileFolders();
++//        List<File> bufferwriteDirList = new ArrayList<>();
++//        collectBufferWriteDirs(bufferwriteDirPathList, bufferwriteDirList);
++//
++//        Set<String> bufferFiles = new HashSet<>();
++//        collectBufferWriteFiles(bufferFiles);
++//
++//        // add the restore file, if the last file is not closed
++//        if (!newFileNodes.isEmpty() && !newFileNodes.get(newFileNodes.size() - 1).isClosed()) {
++//          String bufferFileRestorePath =
++//              newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath() + RESTORE_FILE_SUFFIX;
++//          bufferFiles.add(bufferFileRestorePath);
++//        }
++//
++//        deleteBufferWriteFiles(bufferwriteDirList, bufferFiles);
++//
++//        // merge switch
++//        changeFileNodes();
++//
++//        // overflow switch from merge to work
++//        overflowProcessor.switchMergeToWork();
++//
++//        // insert status to file
++//        isMerging = FileNodeProcessorStatus.NONE;
++//        synchronized (fileNodeProcessorStore) {
++//          fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
++//          fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++//          fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
++//          writeStoreToDisk(fileNodeProcessorStore);
++//        }
++//      } catch (IOException e) {
++//        LOGGER.info(
++//            "The filenode processor {} encountered an error when its "
++//                + "status switched from {} to {}.",
++//            getProcessorName(), FileNodeProcessorStatus.NONE,
++//            FileNodeProcessorStatus.MERGING_WRITE);
++//        throw new FileNodeProcessorException(e);
++//      } finally {
++//        writeUnlock();
++//      }
++//    } finally {
++//      oldMultiPassTokenSet = null;
++//      oldMultiPassCount = null;
++//    }
++//
++//  }
++//
++//  private void collectBufferWriteDirs(List<String> bufferwriteDirPathList,
++//      List<File> bufferwriteDirList) {
++//    for (String bufferwriteDirPath : bufferwriteDirPathList) {
++//      if (bufferwriteDirPath.length() > 0
++//          && bufferwriteDirPath.charAt(bufferwriteDirPath.length() - 1)
++//          != File.separatorChar) {
++//        bufferwriteDirPath = bufferwriteDirPath + File.separatorChar;
++//      }
++//      bufferwriteDirPath = bufferwriteDirPath + getProcessorName();
++//      File bufferwriteDir = new File(bufferwriteDirPath);
++//      bufferwriteDirList.add(bufferwriteDir);
++//      if (!bufferwriteDir.exists()) {
++//        bufferwriteDir.mkdirs();
++//      }
++//    }
++//  }
++//
++//  private void collectBufferWriteFiles(Set<String> bufferFiles) {
++//    for (TsFileResource bufferFileNode : newFileNodes) {
++//      String bufferFilePath = bufferFileNode.getFile().getAbsolutePath();
++//      if (bufferFilePath != null) {
++//        bufferFiles.add(bufferFilePath);
++//      }
++//    }
++//  }
++//
++//  private void deleteBufferWriteFiles(List<File> bufferwriteDirList, Set<String> bufferFiles)
++//      throws IOException {
++//    for (File bufferwriteDir : bufferwriteDirList) {
++//      File[] files = bufferwriteDir.listFiles();
++//      if (files == null) {
++//        continue;
++//      }
++//      for (File file : files) {
++//        if (!bufferFiles.contains(file.getPath())) {
++//          FileReaderManager.getInstance().closeFileAndRemoveReader(file.getPath());
++//          if (!file.delete()) {
++//            LOGGER.warn("Cannot delete BufferWrite file {}", file.getPath());
++//          }
++//        }
++//      }
++//    }
++//  }
++//
++//  private void changeFileNodes() {
++//    for (TsFileResource fileNode : newFileNodes) {
++//      if (fileNode.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
++//        fileNode.setOverflowChangeType(OverflowChangeType.CHANGED);
++//      }
++//    }
++//  }
++//
++//  private String queryAndWriteDataForMerge(TsFileResource backupIntervalFile)
++//      throws IOException, FileNodeProcessorException, PathErrorException {
++//    Map<String, Long> startTimeMap = new HashMap<>();
++//    Map<String, Long> endTimeMap = new HashMap<>();
++//
++//    mergeFileWriter = null;
++//    mergeOutputPath = null;
++//    mergeBaseDir = null;
++//    mergeFileName = null;
++//    // modifications are blocked before mergeModification is created to avoid
++//    // losing some modification.
++//    mergeDeleteLock.lock();
++//    QueryContext context = new QueryContext();
++//    try {
++//      FileReaderManager.getInstance().increaseFileReaderReference(backupIntervalFile.getFilePath(),
++//          true);
++//      for (String deviceId : backupIntervalFile.getStartTimeMap().keySet()) {
++//        // query one deviceId
++//        List<Path> pathList = new ArrayList<>();
++//        mergeIsChunkGroupHasData = false;
++//        mergeStartPos = -1;
++//        ChunkGroupFooter footer;
++//        int numOfChunk = 0;
++//        try {
++//          List<String> pathStrings = mManager.getLeafNodePathInNextLevel(deviceId);
++//          for (String string : pathStrings) {
++//            pathList.add(new Path(string));
++//          }
++//        } catch (PathErrorException e) {
++//          LOGGER.error("Can't get all the paths from MManager, the deviceId is {}", deviceId);
++//          throw new FileNodeProcessorException(e);
++//        }
++//        if (pathList.isEmpty()) {
++//          continue;
++//        }
++//        for (Path path : pathList) {
++//          // query one measurement in the special deviceId
++//          String measurementId = path.getMeasurement();
++//          TSDataType dataType = mManager.getSeriesType(path.getFullPath());
++//          OverflowSeriesDataSource overflowSeriesDataSource = overflowProcessor.queryMerge(deviceId,
++//              measurementId, dataType, true, context);
++//          Filter timeFilter = FilterFactory
++//              .and(TimeFilter.gtEq(backupIntervalFile.getStartTime(deviceId)),
++//                  TimeFilter.ltEq(backupIntervalFile.getEndTime(deviceId)));
++//          SingleSeriesExpression seriesFilter = new SingleSeriesExpression(path, timeFilter);
++//
++//          for (OverflowInsertFile overflowInsertFile : overflowSeriesDataSource
++//              .getOverflowInsertFileList()) {
++//            FileReaderManager.getInstance()
++//                .increaseFileReaderReference(overflowInsertFile.getFilePath(),
++//                    false);
++//          }
++//
++//          IReader seriesReader = SeriesReaderFactory.getInstance()
++//              .createSeriesReaderForMerge(backupIntervalFile,
++//                  overflowSeriesDataSource, seriesFilter, context);
++//          numOfChunk += queryAndWriteSeries(seriesReader, path, seriesFilter, dataType,
++//              startTimeMap, endTimeMap, overflowSeriesDataSource);
++//        }
++//        if (mergeIsChunkGroupHasData) {
++//          // end the new rowGroupMetadata
++//          mergeFileWriter.endChunkGroup(0);
++//        }
++//      }
++//    } finally {
++//      FileReaderManager.getInstance().decreaseFileReaderReference(backupIntervalFile.getFilePath(),
++//          true);
++//
++//      if (mergeDeleteLock.isLocked()) {
++//        mergeDeleteLock.unlock();
++//      }
++//    }
++//
++//    if (mergeFileWriter != null) {
++//      mergeFileWriter.endFile(fileSchema);
++//    }
++//    backupIntervalFile.setFile(new File(mergeBaseDir + File.separator + mergeFileName));
++//    backupIntervalFile.setOverflowChangeType(OverflowChangeType.NO_CHANGE);
++//    backupIntervalFile.setStartTimeMap(startTimeMap);
++//    backupIntervalFile.setEndTimeMap(endTimeMap);
++//    backupIntervalFile.setModFile(mergingModification);
++//    mergingModification = null;
++//    return mergeFileName;
++//  }
++//
++//  private int queryAndWriteSeries(IReader seriesReader, Path path,
++//      SingleSeriesExpression seriesFilter, TSDataType dataType,
++//      Map<String, Long> startTimeMap, Map<String, Long> endTimeMap,
++//      OverflowSeriesDataSource overflowSeriesDataSource)
++//      throws IOException {
++//    int numOfChunk = 0;
++//    try {
++//      if (!seriesReader.hasNext()) {
++//        LOGGER.debug(
++//            "The time-series {} has no data with the filter {} in the filenode processor {}",
++//            path, seriesFilter, getProcessorName());
++//      } else {
++//        numOfChunk++;
++//        TimeValuePair timeValuePair = seriesReader.next();
++//        if (mergeFileWriter == null) {
++//          mergeBaseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
++//          mergeFileName = timeValuePair.getTimestamp()
++//              + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR + System.currentTimeMillis();
++//          mergeOutputPath = constructOutputFilePath(mergeBaseDir, getProcessorName(),
++//              mergeFileName);
++//          mergeFileName = getProcessorName() + File.separatorChar + mergeFileName;
++//          mergeFileWriter = new TsFileIOWriter(new File(mergeOutputPath));
++//          mergingModification = new ModificationFile(mergeOutputPath
++//              + ModificationFile.FILE_SUFFIX);
++//          mergeDeleteLock.unlock();
++//        }
++//        if (!mergeIsChunkGroupHasData) {
++//          // start a new rowGroupMetadata
++//          mergeIsChunkGroupHasData = true;
++//          // the datasize and numOfChunk is fake
++//          // the accurate datasize and numOfChunk will get after insert all this device data.
++//          mergeFileWriter.startFlushChunkGroup(path.getDevice());// TODO please check me.
++//          mergeStartPos = mergeFileWriter.getPos();
++//        }
++//        // init the serieswWriteImpl
++//        MeasurementSchema measurementSchema = fileSchema
++//            .getMeasurementSchema(path.getMeasurement());
++//        ChunkBuffer pageWriter = new ChunkBuffer(measurementSchema);
++//        int pageSizeThreshold = TSFileConfig.pageSizeInByte;
++//        ChunkWriterImpl seriesWriterImpl = new ChunkWriterImpl(measurementSchema, pageWriter,
++//            pageSizeThreshold);
++//        // insert the series data
++//        writeOneSeries(path.getDevice(), seriesWriterImpl, dataType,
++//            seriesReader,
++//            startTimeMap, endTimeMap, timeValuePair);
++//        // flushMetadata the series data
++//        seriesWriterImpl.writeToFileWriter(mergeFileWriter);
++//      }
++//    } finally {
++//      for (OverflowInsertFile overflowInsertFile : overflowSeriesDataSource
++//          .getOverflowInsertFileList()) {
++//        FileReaderManager.getInstance()
++//            .decreaseFileReaderReference(overflowInsertFile.getFilePath(),
++//                false);
++//      }
++//    }
++//    return numOfChunk;
++//  }
++//
++//
++//  private void writeOneSeries(String deviceId, ChunkWriterImpl seriesWriterImpl,
++//      TSDataType dataType, IReader seriesReader, Map<String, Long> startTimeMap,
++//      Map<String, Long> endTimeMap, TimeValuePair firstTVPair) throws IOException {
++//    long startTime;
++//    long endTime;
++//    TimeValuePair localTV = firstTVPair;
++//    writeTVPair(seriesWriterImpl, dataType, localTV);
++//    startTime = endTime = localTV.getTimestamp();
++//    if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId) > startTime) {
++//      startTimeMap.put(deviceId, startTime);
++//    }
++//    if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) < endTime) {
++//      endTimeMap.put(deviceId, endTime);
++//    }
++//    while (seriesReader.hasNext()) {
++//      localTV = seriesReader.next();
++//      endTime = localTV.getTimestamp();
++//      writeTVPair(seriesWriterImpl, dataType, localTV);
++//    }
++//    if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) < endTime) {
++//      endTimeMap.put(deviceId, endTime);
++//    }
++//  }
++//
++//  private void writeTVPair(ChunkWriterImpl seriesWriterImpl, TSDataType dataType,
++//      TimeValuePair timeValuePair) throws IOException {
++//    switch (dataType) {
++//      case BOOLEAN:
++//        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getBoolean());
++//        break;
++//      case INT32:
++//        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getInt());
++//        break;
++//      case INT64:
++//        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getLong());
++//        break;
++//      case FLOAT:
++//        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getFloat());
++//        break;
++//      case DOUBLE:
++//        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getDouble());
++//        break;
++//      case TEXT:
++//        seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getBinary());
++//        break;
++//      default:
++//        LOGGER.error("Not support data type: {}", dataType);
++//        break;
++//    }
++//  }
++//
++//
++//  private String constructOutputFilePath(String baseDir, String processorName, String fileName) {
++//
++//    String localBaseDir = baseDir;
++//    if (localBaseDir.charAt(localBaseDir.length() - 1) != File.separatorChar) {
++//      localBaseDir = localBaseDir + File.separatorChar + processorName;
++//    }
++//    File dataDir = new File(localBaseDir);
++//    if (!dataDir.exists()) {
++//      LOGGER.warn("The bufferwrite processor data dir doesn't exists, create new directory {}",
++//          localBaseDir);
++//      dataDir.mkdirs();
++//    }
++//    File outputFile = new File(dataDir, fileName);
++//    return outputFile.getPath();
++//  }
++//
++//  private FileSchema constructFileSchema(String processorName) throws WriteProcessException {
++//
++//    List<MeasurementSchema> columnSchemaList;
++//    columnSchemaList = mManager.getSchemaForFileName(processorName);
++//
++//    FileSchema schema = new FileSchema();
++//    for (MeasurementSchema measurementSchema : columnSchemaList) {
++//      schema.registerMeasurement(measurementSchema);
++//    }
++//    return schema;
++//
++//  }
++//
++//  @Override
++//  public boolean canBeClosed() {
++//    if (isMerging != FileNodeProcessorStatus.NONE) {
++//      LOGGER.info("The filenode {} can't be closed, because the filenode status is {}",
++//          getProcessorName(),
++//          isMerging);
++//      return false;
++//    }
++//    if (newMultiPassCount.get() != 0) {
++//      LOGGER.warn("The filenode {} can't be closed, because newMultiPassCount is {}. The newMultiPassTokenSet is {}",
++//          getProcessorName(), newMultiPassCount, newMultiPassTokenSet);
++//      return false;
++//    }
++//
++//    if (oldMultiPassCount == null) {
++//      return true;
++//    }
++//    if (oldMultiPassCount.getCount() == 0) {
++//      return true;
++//    } else {
++//      LOGGER.info("The filenode {} can't be closed, because oldMultiPassCount is {}",
++//          getProcessorName(), oldMultiPassCount.getCount());
++//      return false;
++//    }
++//  }
++//
++//  @Override
++//  public FileNodeFlushFuture flush() throws IOException {
++//    Future<Boolean> bufferWriteFlushFuture = null;
++//    Future<Boolean> overflowFlushFuture = null;
++//    if (bufferWriteProcessor != null) {
++//      bufferWriteFlushFuture = bufferWriteProcessor.flush();
++//    }
++//    if (overflowProcessor != null && !overflowProcessor.isClosed()) {
++//      overflowFlushFuture = overflowProcessor.flush();
++//    }
++//    return new FileNodeFlushFuture(bufferWriteFlushFuture, overflowFlushFuture);
++//  }
++//
++//  /**
++//   * Close the bufferwrite processor.
++//   */
++//  public Future<Boolean> closeBufferWrite() throws FileNodeProcessorException {
++//    if (bufferWriteProcessor == null) {
++//      return new ImmediateFuture<>(true);
++//    }
++//    try {
++//      while (!bufferWriteProcessor.canBeClosed()) {
++//        waitForBufferWriteClose();
++//      }
++//      bufferWriteProcessor.close();
++//      Future<Boolean> result = bufferWriteProcessor.getCloseFuture();
++//      closingBufferWriteProcessor.add(bufferWriteProcessor);
++//      bufferWriteProcessor = null;
++//      return result;
++//    } catch (BufferWriteProcessorException e) {
++//      throw new FileNodeProcessorException(e);
++//    }
++//  }
++//
++//
++//
++//  private void waitForBufferWriteClose() {
++//    try {
++//      LOGGER.info("The bufferwrite {} can't be closed, wait 100ms",
++//          bufferWriteProcessor.getProcessorName());
++//      TimeUnit.MICROSECONDS.sleep(100);
++//    } catch (InterruptedException e) {
++//      LOGGER.error("Unexpected interruption", e);
++//      Thread.currentThread().interrupt();
++//    }
++//  }
++//
++//  /**
++//   * Close the overflow processor.
++//   */
++//  public void closeOverflow() throws FileNodeProcessorException {
++//    if (overflowProcessor == null || overflowProcessor.isClosed()) {
++//      return;
++//    }
++//    try {
++//      while (!overflowProcessor.canBeClosed()) {
++//        waitForOverflowClose();
++//      }
++//      overflowProcessor.close();
++//    } catch (OverflowProcessorException e) {
++//      throw new FileNodeProcessorException(e);
++//    }
++//  }
++//
++//  private void waitForOverflowClose() {
++//    try {
++//      LOGGER.info("The overflow {} can't be closed, wait 100ms",
++//          overflowProcessor.getProcessorName());
++//      TimeUnit.MICROSECONDS.sleep(100);
++//    } catch (InterruptedException e) {
++//      LOGGER.error("Unexpected interruption", e);
++//      Thread.currentThread().interrupt();
++//    }
++//  }
++//
++//  @Override
++//  public void close() throws FileNodeProcessorException {
++//    LOGGER.info("Will setCloseMark FileNode Processor {}.", getProcessorName());
++//    Future<Boolean> result = closeBufferWrite();
++//    try {
++//      result.get();
++//    } catch (InterruptedException | ExecutionException e) {
++//      throw new FileNodeProcessorException(e);
++//    }
++//    closeOverflow();
++//    for (TsFileResource fileNode : newFileNodes) {
++//      if (fileNode.getModFile() != null) {
++//        try {
++//          fileNode.getModFile().close();
++//        } catch (IOException e) {
++//          throw new FileNodeProcessorException(e);
++//        }
++//      }
++//    }
++//  }
++//
++//  /**
++//   * deregister the filenode processor.
++//   */
++//  public void delete() throws ProcessorException {
++//    if (TsFileDBConf.isEnableStatMonitor()) {
++//      // remove the monitor
++//      LOGGER.info("Deregister the filenode processor: {} from monitor.", getProcessorName());
++//      StatMonitor.getInstance().deregisterStatistics(statStorageDeltaName);
++//    }
++//    closeBufferWrite();
++//    closeOverflow();
++//    for (TsFileResource fileNode : newFileNodes) {
++//      if (fileNode.getModFile() != null) {
++//        try {
++//          fileNode.getModFile().close();
++//        } catch (IOException e) {
++//          throw new FileNodeProcessorException(e);
++//        }
++//      }
++//    }
++//  }
++//
++//  @Override
++//  public long memoryUsage() {
++//    long memSize = 0;
++//    if (bufferWriteProcessor != null) {
++//      memSize += bufferWriteProcessor.memoryUsage();
++//    }
++//    if (overflowProcessor != null) {
++//      memSize += overflowProcessor.memoryUsage();
++//    }
++//    return memSize;
++//  }
++//
++//  private void writeStoreToDisk(FileNodeProcessorStore fileNodeProcessorStore)
++//      throws FileNodeProcessorException {
++//
++//    synchronized (fileNodeRestoreLock) {
++//      try (FileOutputStream fileOutputStream = new FileOutputStream(fileNodeRestoreFilePath)) {
++//        fileNodeProcessorStore.serialize(fileOutputStream);
++//        LOGGER.debug("The filenode processor {} writes restore information to the restore file",
++//            getProcessorName());
++//      } catch (IOException e) {
++//        throw new FileNodeProcessorException(e);
++//      }
++//    }
++//  }
++//
++//  private FileNodeProcessorStore readStoreFromDisk() throws FileNodeProcessorException {
++//
++//    synchronized (fileNodeRestoreLock) {
++//      File restoreFile = new File(fileNodeRestoreFilePath);
++//      if (!restoreFile.exists() || restoreFile.length() == 0) {
++//        try {
++//          return new FileNodeProcessorStore(false, new HashMap<>(),
++//              new TsFileResource(null, false),
++//              new ArrayList<>(), FileNodeProcessorStatus.NONE, 0);
++//        } catch (IOException e) {
++//          throw new FileNodeProcessorException(e);
++//        }
++//      }
++//      try (FileInputStream inputStream = new FileInputStream(fileNodeRestoreFilePath)) {
++//        return FileNodeProcessorStore.deSerialize(inputStream);
++//      } catch (IOException e) {
++//        LOGGER
++//            .error("Failed to deserialize the FileNodeRestoreFile {}, {}", fileNodeRestoreFilePath,
++//                e);
++//        throw new FileNodeProcessorException(e);
++//      }
++//    }
++//  }
++//
++//  String getFileNodeRestoreFilePath() {
++//    return fileNodeRestoreFilePath;
++//  }
++//
++//  /**
++//   * Delete data whose timestamp <= 'timestamp' and belong to timeseries deviceId.measurementId.
++//   *
++//   * @param deviceId the deviceId of the timeseries to be deleted.
++//   * @param measurementId the measurementId of the timeseries to be deleted.
++//   * @param timestamp the delete range is (0, timestamp].
++//   */
++//  public void delete(String deviceId, String measurementId, long timestamp) throws IOException {
++//    // TODO: how to avoid partial deletion?
++//    mergeDeleteLock.lock();
++//    long version = versionController.nextVersion();
++//
++//    // record what files are updated so we can roll back them in case of exception
++//    List<ModificationFile> updatedModFiles = new ArrayList<>();
++//
++//    try {
++//      String fullPath = deviceId +
++//          IoTDBConstant.PATH_SEPARATOR + measurementId;
++//      Deletion deletion = new Deletion(fullPath, version, timestamp);
++//      if (mergingModification != null) {
++//        mergingModification.write(deletion);
++//        updatedModFiles.add(mergingModification);
++//      }
++//      deleteBufferWriteFiles(deviceId, deletion, updatedModFiles);
++//      // delete data in memory
++//      OverflowProcessor ofProcessor = getOverflowProcessor(getProcessorName());
++//      ofProcessor.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
++//      if (bufferWriteProcessor != null) {
++//        bufferWriteProcessor.delete(deviceId, measurementId, timestamp);
++//      }
++//    } catch (Exception e) {
++//      // roll back
++//      for (ModificationFile modFile : updatedModFiles) {
++//        modFile.abort();
++//      }
++//      throw new IOException(e);
++//    } finally {
++//      mergeDeleteLock.unlock();
++//    }
++//  }
++//
++//  private void deleteBufferWriteFiles(String deviceId, Deletion deletion,
++//      List<ModificationFile> updatedModFiles) throws IOException {
++//    BufferWriteProcessor bufferWriteProcessor = getBufferWriteProcessor();
++//    TsFileResource resource = null;
++//    if (bufferWriteProcessor != null) {
++//      //bufferWriteProcessor == null means the bufferWriteProcessor is closed now.
++//      resource = bufferWriteProcessor.getCurrentTsFileResource();
++//      if (resource != null && resource.containsDevice(deviceId)) {
++//        resource.getModFile().write(deletion);
++//        updatedModFiles.add(resource.getModFile());
++//      }
++//    }
++//
++//    for (TsFileResource fileNode : newFileNodes) {
++//      if (fileNode != resource && fileNode.containsDevice(deviceId)
++//          && fileNode.getStartTime(deviceId) <= deletion.getTimestamp()) {
++//        fileNode.getModFile().write(deletion);
++//        updatedModFiles.add(fileNode.getModFile());
++//      }
++//    }
++//  }
++//
++//  /**
++//   * Similar to delete(), but only deletes data in BufferWrite. Only used by WAL recovery.
++//   */
++//  public void deleteBufferWrite(String deviceId, String measurementId, long timestamp)
++//      throws IOException, BufferWriteProcessorException {
++//    String fullPath = deviceId +
++//        IoTDBConstant.PATH_SEPARATOR + measurementId;
++//    long version = versionController.nextVersion();
++//    Deletion deletion = new Deletion(fullPath, version, timestamp);
++//
++//    List<ModificationFile> updatedModFiles = new ArrayList<>();
++//    try {
++//      deleteBufferWriteFiles(deviceId, deletion, updatedModFiles);
++//    } catch (IOException e) {
++//      for (ModificationFile modificationFile : updatedModFiles) {
++//        modificationFile.abort();
++//      }
++//      throw e;
++//    }
++//    if (bufferWriteProcessor != null) {
++//      try {
++//        bufferWriteProcessor.delete(deviceId, measurementId, timestamp);
++//      } catch (BufferWriteProcessorException e) {
++//        throw new IOException(e);
++//      }
++//    }
++//  }
++//
++//  /**
++//   * Similar to delete(), but only deletes data in Overflow. Only used by WAL recovery.
++//   */
++//  public void deleteOverflow(String deviceId, String measurementId, long timestamp)
++//      throws ProcessorException {
++//    long version = versionController.nextVersion();
++//
++//    OverflowProcessor overflowProcessor = getOverflowProcessor(getProcessorName());
++//    List<ModificationFile> updatedModFiles = new ArrayList<>();
++//    try {
++//      overflowProcessor.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
++//    } catch (IOException e) {
++//      for (ModificationFile modificationFile : updatedModFiles) {
++//        try {
++//          modificationFile.abort();
++//        } catch (IOException e1) {
++//          throw new ProcessorException(e);
++//        }
++//      }
++//      throw new ProcessorException(e);
++//    }
++//  }
++//
++//  public CopyOnReadLinkedList<BufferWriteProcessor> getClosingBufferWriteProcessor() {
++//    for (BufferWriteProcessor processor: closingBufferWriteProcessor.cloneList()) {
++//      if (processor.isClosed()) {
++//        closingBufferWriteProcessor.remove(processor);
++//      }
++//    }
++//    closingBufferWriteProcessor.reset();
++//    return closingBufferWriteProcessor;
++//  }
++//
++//  @Override
++//  public boolean equals(Object o) {
++//    if (this == o) {
++//      return true;
++//    }
++//    if (o == null || getClass() != o.getClass()) {
++//      return false;
++//    }
++//    if (!super.equals(o)) {
++//      return false;
++//    }
++//    FileNodeProcessor that = (FileNodeProcessor) o;
++//    return isOverflowed == that.isOverflowed &&
++//        numOfMergeFile == that.numOfMergeFile &&
++//        lastMergeTime == that.lastMergeTime &&
++//        multiPassLockToken == that.multiPassLockToken &&
++//        Objects.equals(statStorageDeltaName, that.statStorageDeltaName) &&
++//        Objects.equals(statParamsHashMap, that.statParamsHashMap) &&
++//        Objects.equals(lastUpdateTimeMap, that.lastUpdateTimeMap) &&
++//        Objects.equals(flushLastUpdateTimeMap, that.flushLastUpdateTimeMap) &&
++//        Objects.equals(invertedIndexOfFiles, that.invertedIndexOfFiles) &&
++//        Objects.equals(emptyTsFileResource, that.emptyTsFileResource) &&
++//        Objects.equals(newFileNodes, that.newFileNodes) &&
++//        isMerging == that.isMerging &&
++//        Objects.equals(fileNodeProcessorStore, that.fileNodeProcessorStore) &&
++//        Objects.equals(fileNodeRestoreFilePath, that.fileNodeRestoreFilePath) &&
++//        Objects.equals(bufferWriteProcessor, that.bufferWriteProcessor) &&
++//        Objects.equals(overflowProcessor, that.overflowProcessor) &&
++//        Objects.equals(oldMultiPassTokenSet, that.oldMultiPassTokenSet) &&
++//        Objects.equals(newMultiPassTokenSet, that.newMultiPassTokenSet) &&
++//        Objects.equals(oldMultiPassCount, that.oldMultiPassCount) &&
++//        Objects.equals(newMultiPassCount, that.newMultiPassCount) &&
++//        Objects.equals(parameters, that.parameters) &&
++//        Objects.equals(fileSchema, that.fileSchema) &&
++//        Objects.equals(fileNodeFlushAction, that.fileNodeFlushAction) &&
++//        Objects.equals(bufferwriteFlushAction, that.bufferwriteFlushAction) &&
++//        Objects.equals(overflowFlushAction, that.overflowFlushAction);
++//  }
++//
++//  @Override
++//  public int hashCode() {
++//    return processorName.hashCode();
++//  }
++//
++//  public class MergeRunnale implements Runnable {
++//
++//    @Override
++//    public void run() {
++//      try {
++//        ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
++//        long mergeStartTime = System.currentTimeMillis();
++//        merge();
++//        long mergeEndTime = System.currentTimeMillis();
++//        long intervalTime = mergeEndTime - mergeStartTime;
++//        LOGGER.info(
++//            "The filenode processor {} merge start time is {}, "
++//                + "merge end time is {}, merge consumes {}ms.",
++//            getProcessorName(), ofInstant(Instant.ofEpochMilli(mergeStartTime),
++//                zoneId), ofInstant(Instant.ofEpochMilli(mergeEndTime),
++//                zoneId), intervalTime);
++//      } catch (FileNodeProcessorException e) {
++//        LOGGER.error("The filenode processor {} encountered an error when merging.",
++//            getProcessorName(), e);
++//        throw new ErrorDebugException(e);
++//      }
++//    }
++//  }
++//
++//  /**
++//   * wait for all closing processors finishing their tasks
++//   */
++//  public void waitforAllClosed() throws FileNodeProcessorException {
++//    close();
++//    while (getClosingBufferWriteProcessor().size() != 0) {
++//      checkAllClosingProcessors();
++//      try {
++//        Thread.sleep(10);
++//      } catch (InterruptedException e) {
++//        LOGGER.error("Filenode Processor {} is interrupted when waiting for all closed.", processorName, e);
++//      }
++//    }
++//  }
++//
++//
++//  void checkAllClosingProcessors() {
++//    Iterator<BufferWriteProcessor> iterator =
++//        this.getClosingBufferWriteProcessor().iterator();
++//    while (iterator.hasNext()) {
++//      BufferWriteProcessor processor = iterator.next();
++//      try {
++//        if (processor.getCloseFuture().get(10, TimeUnit.MILLISECONDS)) {
++//          //if finished, we can remove it.
++//          iterator.remove();
++//        }
++//      } catch (InterruptedException | ExecutionException e) {
++//        LOGGER.error("Close bufferwrite processor {} failed.", processor.getProcessorName(), e);
++//      } catch (TimeoutException e) {
++//        //do nothing.
++//      }
++//    }
++//    this.getClosingBufferWriteProcessor().reset();
++//  }
++//}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
index 2d63912,dcd3924..541ad15
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
@@@ -28,15 -28,14 +28,15 @@@ import java.util.concurrent.ConcurrentH
  import org.apache.commons.io.FileUtils;
  import org.apache.iotdb.db.conf.IoTDBConfig;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeProcessor;
  import org.apache.iotdb.db.engine.filenode.TsFileResource;
  import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.FileNodeProcessorException;
  import org.apache.iotdb.db.exception.PathErrorException;
++import org.apache.iotdb.db.exception.ProcessorException;
  import org.apache.iotdb.db.exception.StartupException;
  import org.apache.iotdb.db.metadata.MManager;
 +import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.db.query.context.QueryContext;
  import org.apache.iotdb.db.service.IService;
  import org.apache.iotdb.db.service.ServiceType;
@@@ -115,7 -114,7 +115,7 @@@ public class FileNodeManagerV2 implemen
  
  
    private FileNodeProcessorV2 getProcessor(String devicePath)
--      throws FileNodeManagerException {
++      throws FileNodeManagerException, ProcessorException {
      String storageGroup = "";
      try {
        // return the storage group name
@@@ -217,7 -216,7 +217,7 @@@
    }
  
    private void delete(String processorName,
--      Iterator<Entry<String, FileNodeProcessor>> processorIterator)
++      Iterator<Entry<String, FileNodeProcessorV2>> processorIterator)
        throws FileNodeManagerException {
      // TODO
    }
@@@ -245,7 -244,7 +245,7 @@@
     * query data.
     */
    public QueryDataSourceV2 query(SingleSeriesExpression seriesExpression, QueryContext context)
--      throws FileNodeManagerException {
++      throws FileNodeManagerException, ProcessorException {
      String deviceId = seriesExpression.getSeriesPath().getDevice();
      String measurementId = seriesExpression.getSeriesPath().getMeasurement();
      FileNodeProcessorV2 fileNodeProcessor = getProcessor(deviceId);
@@@ -340,7 -339,7 +340,7 @@@
     */
    public void addTimeSeries(Path path, TSDataType dataType, TSEncoding encoding,
        CompressionType compressor,
--      Map<String, String> props) throws FileNodeManagerException {
++      Map<String, String> props) throws FileNodeManagerException, ProcessorException {
      FileNodeProcessorV2 fileNodeProcessor = getProcessor(path.getFullPath());
      fileNodeProcessor.addTimeSeries(path.getMeasurement(), dataType, encoding, compressor, props);
    }
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
index 12fa2d9,0b03327..f958c1c
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
@@@ -96,7 -93,7 +96,7 @@@ public class FileNodeProcessorV2 
  
    private VersionController versionController;
  
--  public FileNodeProcessorV2(String baseDir, String storageGroupName) throws FileNodeProcessorException {
++  public FileNodeProcessorV2(String baseDir, String storageGroupName) throws ProcessorException {
      this.storageGroupName = storageGroupName;
      lock = new ReentrantReadWriteLock();
      closeFileNodeCondition = lock.writeLock().newCondition();
@@@ -122,43 -119,10 +122,42 @@@
      this.fileSchema = constructFileSchema(storageGroupName);
    }
  
 -  // TODO: Jiang Tian
 -  private void recovery(){
 +  private void recovery() throws ProcessorException {
 +    List<String> tsfiles = new ArrayList<>();
 +    List<String> fileFolders = directoryManager.getAllTsFileFolders();
 +    for (String baseDir: fileFolders) {
 +      File fileFolder = new File(baseDir, storageGroupName);
 +      if (!fileFolder.exists()) {
 +        continue;
 +      }
 +      for (File tsfile: fileFolder.listFiles()) {
 +        tsfiles.add(tsfile.getPath());
 +      }
 +    }
 +
 +//    Collections.sort(tsfiles, );
 +
 +    for (String tsfile: tsfiles) {
 +      TsFileResourceV2 tsFileResource = new TsFileResourceV2(new File(tsfile));
 +      SeqTsFileRecoverPerformer recoverPerformer = new SeqTsFileRecoverPerformer(storageGroupName + "-", fileSchema, versionController, tsFileResource);
 +      recoverPerformer.recover();
 +    }
 +
 +    tsfiles.clear();
 +    String unseqFileFolder = IoTDBDescriptor.getInstance().getConfig().getOverflowDataDir();
 +    File fileFolder = new File(unseqFileFolder, storageGroupName);
 +    if (!fileFolder.exists()) {
 +      return;
 +    }
 +    for (File unseqFile: fileFolder.listFiles()) {
 +      tsfiles.add(unseqFile.getPath());
 +    }
 +
-     for
 +
    }
  
 +
 +
    private FileSchema constructFileSchema(String storageGroupName) {
      List<MeasurementSchema> columnSchemaList;
      columnSchemaList = mManager.getSchemaForFileName(storageGroupName);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/FlushPartialPolicy.java
index 5bef4b5,5bef4b5..54ccba2
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/FlushPartialPolicy.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/FlushPartialPolicy.java
@@@ -20,7 -20,7 +20,8 @@@ package org.apache.iotdb.db.engine.memc
  
  import org.apache.iotdb.db.concurrent.ThreadName;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
++import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.utils.MemUtils;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -60,7 -60,7 +61,11 @@@ public class FlushPartialPolicy impleme
  
    private Thread createWorkerThread() {
      return new Thread(() -> {
--      FileNodeManager.getInstance().forceFlush(BasicMemController.UsageLevel.SAFE);
++      try {
++        FileNodeManagerV2.getInstance().syncCloseAllProcessor();
++      } catch (FileNodeManagerException e) {
++        LOGGER.error("sync close all file node processor failed", e);
++      }
        try {
          Thread.sleep(sleepInterval);
        } catch (InterruptedException e) {
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/ForceFLushAllPolicy.java
index 14cc637,14cc637..423e63c
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/ForceFLushAllPolicy.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/ForceFLushAllPolicy.java
@@@ -19,7 -19,7 +19,8 @@@
  package org.apache.iotdb.db.engine.memcontrol;
  
  import org.apache.iotdb.db.concurrent.ThreadName;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
++import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.utils.MemUtils;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -55,7 -55,7 +56,13 @@@ public class ForceFLushAllPolicy implem
  
    private Thread createWorkerThread() {
      return new Thread(() ->
--            FileNodeManager.getInstance().forceFlush(BasicMemController.UsageLevel.DANGEROUS),
++    {
++      try {
++        FileNodeManagerV2.getInstance().syncCloseAllProcessor();
++      } catch (FileNodeManagerException e) {
++        logger.error("sync close all file node processor failed", e);
++      }
++    },
              ThreadName.FORCE_FLUSH_ALL_POLICY.getName());
    }
  }
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowMemtable.java
index dc83333,dc83333..0000000
deleted file mode 100644,100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowMemtable.java
+++ /dev/null
@@@ -1,111 -1,111 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *      http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.overflow.io;
--
--import java.util.HashMap;
--import java.util.Map;
--import org.apache.iotdb.db.engine.memtable.IMemTable;
--import org.apache.iotdb.db.engine.memtable.PrimitiveMemTable;
--import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.file.metadata.statistics.LongStatistics;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--
--/**
-- * This class is used to store and query all overflow data in memory.<br>
-- */
--public class OverflowMemtable extends PrimitiveMemTable {
--
--  /**
--   * store update and delete data
--   */
--  private Map<String, Map<String, LongStatistics>> indexTrees;
--
--  /**
--   * store insert data
--   */
--  private IMemTable memTable;
--
--  public OverflowMemtable() {
--    indexTrees = new HashMap<>();
--    memTable = new PrimitiveMemTable();
--  }
--
--  @Override
--  public void insert(TSRecord tsRecord) {
--    for (DataPoint dataPoint : tsRecord.dataPointList) {
--      memTable.write(tsRecord.deviceId, dataPoint.getMeasurementId(), dataPoint.getType(),
--              tsRecord.time,
--              dataPoint.getValue().toString());
--    }
--  }
--
--  /**
--   * @deprecated update time series data
--   */
--  @Deprecated
--  public void update(String deviceId, String measurementId, long startTime, long endTime,
--                     TSDataType dataType,
--                     byte[] value) {
--    if (!indexTrees.containsKey(deviceId)) {
--      indexTrees.put(deviceId, new HashMap<>());
--    }
--    if (!indexTrees.get(deviceId).containsKey(measurementId)) {
--      indexTrees.get(deviceId).put(measurementId, new LongStatistics());
--    }
--    indexTrees.get(deviceId).get(measurementId).updateStats(startTime, endTime);
--  }
--
--  public void delete(String deviceId, String measurementId, long timestamp, boolean isFlushing) {
--    super.delete(deviceId, measurementId, timestamp);
--  }
--
--  public ReadOnlyMemChunk queryOverflowInsertInMemory(String deviceId, String measurementId,
--      TSDataType dataType, Map<String, String> props) {
--    return super.query(deviceId, measurementId, dataType, props);
--  }
--
--  public boolean isEmptyOfOverflowSeriesMap() {
--    return super.isEmpty();
--  }
--
--//  public Map<String, Map<String, LongStatistics>> getOverflowSeriesMap() {
--//    return super;
--//  }
--
--  public boolean isEmptyOfMemTable() {
--    return memTable.isEmpty();
--  }
--
--  public IMemTable getMemTabale() {
--    return memTable;
--  }
--
--  public long getSize() {
--    // TODO: calculate the size of this overflow support
--    return 0;
--  }
--
--  @Override
--  public void clear() {
--//    indexTrees.clear();
--    super.clear();
--  }
--}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessor.java
index 9bc42d0,ebbcfc4..0000000
deleted file mode 100644,100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessor.java
+++ /dev/null
@@@ -1,820 -1,819 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *      http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.overflow.io;
--
--import static org.apache.iotdb.db.conf.IoTDBConstant.PATH_SEPARATOR;
--
--import java.io.File;
--import java.io.IOException;
--import java.time.Instant;
--import java.time.ZonedDateTime;
--import java.util.ArrayList;
--import java.util.Arrays;
--import java.util.Collections;
--import java.util.List;
--import java.util.Map;
--import java.util.Objects;
--import java.util.concurrent.ExecutionException;
--import java.util.concurrent.Future;
--import java.util.concurrent.atomic.AtomicLong;
--import java.util.concurrent.locks.ReentrantLock;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.Processor;
--import org.apache.iotdb.db.engine.bufferwrite.Action;
--import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
--import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
--import org.apache.iotdb.db.engine.memcontrol.BasicMemController.UsageLevel;
--import org.apache.iotdb.db.engine.memtable.IMemTable;
--import org.apache.iotdb.db.engine.memtable.MemSeriesLazyMerger;
--import org.apache.iotdb.db.engine.memtable.MemTableFlushCallBack;
--import org.apache.iotdb.db.engine.memtable.MemTablePool;
--import org.apache.iotdb.db.engine.memtable.PrimitiveMemTable;
--import org.apache.iotdb.db.engine.modification.Deletion;
--import org.apache.iotdb.db.engine.modification.ModificationFile;
--import org.apache.iotdb.db.engine.pool.FlushPoolManager;
--import org.apache.iotdb.db.engine.querycontext.MergeSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.OverflowInsertFile;
--import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
--import org.apache.iotdb.db.engine.version.VersionController;
--import org.apache.iotdb.db.exception.OverflowProcessorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.monitor.collector.MemTableWriteTimeCost;
--import org.apache.iotdb.db.qp.constant.DatetimeUtils;
--import org.apache.iotdb.db.query.context.QueryContext;
--import org.apache.iotdb.db.query.control.FileReaderManager;
--import org.apache.iotdb.db.utils.ImmediateFuture;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.db.writelog.node.WriteLogNode;
- import org.apache.iotdb.db.writelog.recover.UnseqTsFileRecoverPerformer;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.read.common.Path;
--import org.apache.iotdb.tsfile.utils.BytesUtils;
--import org.apache.iotdb.tsfile.utils.Pair;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.schema.FileSchema;
--import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
--import org.slf4j.Logger;
--import org.slf4j.LoggerFactory;
--
--public class OverflowProcessor extends Processor {
--
--  private static final Logger LOGGER = LoggerFactory.getLogger(OverflowProcessor.class);
--  private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
--  private OverflowResource workResource;
--  private OverflowResource mergeResource;
--
--  private List<IMemTable> overflowFlushMemTables = new ArrayList<>();
--  private IMemTable workSupport;
--//  private OverflowMemtable flushSupport;
--  private long flushId = -1;
--  private volatile Future<Boolean> flushFuture = new ImmediateFuture<>(true);
--  private volatile boolean isMerge;
--  private int valueCount;
--  private String parentPath;
--  private long lastFlushTime = -1;
--  private AtomicLong dataPathCount = new AtomicLong();
--  private ReentrantLock queryFlushLock = new ReentrantLock();
--
--  private Action overflowFlushAction;
--  private Action filenodeFlushAction;
--  private FileSchema fileSchema;
--
--  private long memThreshold = TSFileConfig.groupSizeInByte;
--  private AtomicLong memSize = new AtomicLong();
--
--  private VersionController versionController;
--
--  private boolean isClosed = true;
--  private boolean isFlush = false;
--
--  public OverflowProcessor(String processorName, Map<String, Action> parameters,
--      FileSchema fileSchema, VersionController versionController)
--      throws ProcessorException {
--    super(processorName);
--    this.fileSchema = fileSchema;
--    this.versionController = versionController;
--    String overflowDirPath = TsFileDBConf.getOverflowDataDir();
--    if (overflowDirPath.length() > 0
--        && overflowDirPath.charAt(overflowDirPath.length() - 1) != File.separatorChar) {
--      overflowDirPath = overflowDirPath + File.separatorChar;
--    }
--    this.parentPath = overflowDirPath + processorName;
--
--    overflowFlushAction = parameters.get(FileNodeConstants.OVERFLOW_FLUSH_ACTION);
--    filenodeFlushAction = parameters
--        .get(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION);
--    reopen();
--    try {
--      getLogNode();
--    } catch (IOException e) {
--      throw new ProcessorException(e);
--    }
--  }
--
--  public void reopen() throws ProcessorException {
--    if (!isClosed) {
--      return;
--    }
--    // recover file
--    File processorDataDir = new File(parentPath);
--    if (!processorDataDir.exists()) {
--      processorDataDir.mkdirs();
--    }
--    recover(processorDataDir);
--
--    // memory
--    if (workSupport == null) {
--      workSupport = new PrimitiveMemTable();
--    } else {
--      workSupport.clear();
--    }
--    isClosed = false;
--    isFlush = false;
--  }
--
--  public void checkOpen() throws OverflowProcessorException {
--    if (isClosed) {
--      throw new OverflowProcessorException("OverflowProcessor already closed");
--    }
--  }
--
--
--  private void recover(File parentFile) throws ProcessorException {
--    String[] subFilePaths = clearFile(parentFile.list());
--
--    try {
--      if (subFilePaths.length == 0) {
--        workResource = new OverflowResource(parentPath,
--            String.valueOf(dataPathCount.getAndIncrement()), versionController, processorName);
--      } else if (subFilePaths.length == 1) {
--        long count = Long.parseLong(subFilePaths[0]);
--        dataPathCount.addAndGet(count + 1);
--        workResource = new OverflowResource(parentPath, String.valueOf(count), versionController,
--            processorName);
--        LOGGER.info("The overflow processor {} recover from work status.", getProcessorName());
--      } else {
--        long count1 = Long.parseLong(subFilePaths[0]);
--        long count2 = Long.parseLong(subFilePaths[1]);
--        if (count1 > count2) {
--          long temp = count1;
--          count1 = count2;
--          count2 = temp;
--        }
--        dataPathCount.addAndGet(count2 + 1);
--        // work dir > merge dir
--        workResource = new OverflowResource(parentPath, String.valueOf(count2), versionController,
--            processorName);
--        mergeResource = new OverflowResource(parentPath, String.valueOf(count1), versionController,
--            processorName);
--        LOGGER.info("The overflow processor {} recover from merge status.", getProcessorName());
--      }
--    } catch (IOException e) {
--      throw new ProcessorException(e);
--    }
--
-     UnseqTsFileRecoverPerformer recoverPerformer =
-         new UnseqTsFileRecoverPerformer(workResource, fileSchema);
-     recoverPerformer.recover();
 -//    UnseqTsFileRecoverPerformer recoverPerformer =
 -//        new UnseqTsFileRecoverPerformer(workResource, fileSchema);
 -//    recoverPerformer.recover();
--  }
--
--  private String[] clearFile(String[] subFilePaths) {
--    // just clear the files whose name are number.
--    List<String> files = new ArrayList<>();
--    for (String file : subFilePaths) {
--      try {
--        Long.valueOf(file);
--        files.add(file);
--      } catch (NumberFormatException e) {
--        // ignore the exception, if the name of file is not a number.
--
--      }
--    }
--    return files.toArray(new String[files.size()]);
--  }
--
--  /**
--   * insert one time-series record
--   */
--  public void insert(TSRecord tsRecord) throws IOException {
--    MemTableWriteTimeCost.getInstance().init();
--    try {
--      checkOpen();
--    } catch (OverflowProcessorException e) {
--      throw new IOException(e);
--    }
--    // memory control
--    long memUage = MemUtils.getRecordSize(tsRecord);
--    UsageLevel usageLevel = BasicMemController.getInstance().acquireUsage(this, memUage);
--    switch (usageLevel) {
--      case SAFE:
--        // insert data
--        workSupport.insert(tsRecord);
--        valueCount++;
--        // check asyncFlush
--        memUage = memSize.addAndGet(memUage);
--        if (memUage > memThreshold) {
--          if (LOGGER.isWarnEnabled()) {
--            LOGGER.warn("The usage of memory {} in overflow processor {} reaches the threshold {}",
--                MemUtils.bytesCntToStr(memUage), getProcessorName(),
--                MemUtils.bytesCntToStr(memThreshold));
--          }
--          flush();
--        }
--        break;
--      case WARNING:
--        // insert data
--        workSupport.insert(tsRecord);
--        valueCount++;
--        // asyncFlush
--        memSize.addAndGet(memUage);
--        flush();
--        break;
--      case DANGEROUS:
--        throw new IOException("The insertion is rejected because dangerous memory level hit");
--    }
--
--
--  }
--
--  /**
--   * @deprecated update one time-series data which time range is from startTime from endTime.
--   */
--  @Deprecated
--  public void update(String deviceId, String measurementId, long startTime, long endTime,
--      TSDataType type, byte[] value) {
--//    workSupport.update(deviceId, measurementId, startTime, endTime, type, value);
--//    valueCount++;
--    throw new UnsupportedOperationException("update has been deprecated");
--  }
--
--  /**
--   * @deprecated this function need to be re-implemented.
--   */
--  @Deprecated
--  public void update(String deviceId, String measurementId, long startTime, long endTime,
--      TSDataType type, String value) {
--//    workSupport.update(deviceId, measurementId, startTime, endTime, type,
--//        convertStringToBytes(type, value));
--//    valueCount++;
--    throw new UnsupportedOperationException("update has been deprecated");
--  }
--
--  private byte[] convertStringToBytes(TSDataType type, String o) {
--    switch (type) {
--      case INT32:
--        return BytesUtils.intToBytes(Integer.valueOf(o));
--      case INT64:
--        return BytesUtils.longToBytes(Long.valueOf(o));
--      case BOOLEAN:
--        return BytesUtils.boolToBytes(Boolean.valueOf(o));
--      case FLOAT:
--        return BytesUtils.floatToBytes(Float.valueOf(o));
--      case DOUBLE:
--        return BytesUtils.doubleToBytes(Double.valueOf(o));
--      case TEXT:
--        return BytesUtils.stringToBytes(o);
--      default:
--        LOGGER.error("Unsupport data type: {}", type);
--        throw new UnsupportedOperationException("Unsupport data type:" + type);
--    }
--  }
--
--  /**
--   * Delete data of a timeseries whose time ranges from 0 to timestamp.
--   *
--   * @param deviceId the deviceId of the timeseries.
--   * @param measurementId the measurementId of the timeseries.
--   * @param timestamp the upper-bound of deletion time.
--   * @param version the version number of this deletion.
--   * @param updatedModFiles add successfully updated Modification files to the list, and abort them
--   * when exception is raised
--   */
--  public void delete(String deviceId, String measurementId, long timestamp, long version,
--      List<ModificationFile> updatedModFiles) throws IOException {
--    try {
--      checkOpen();
--    } catch (OverflowProcessorException e) {
--      throw new IOException(e);
--    }
--    workResource.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
--    workSupport.delete(deviceId, measurementId, timestamp);
--    if (isFlush()) {
--      mergeResource.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
--      for (IMemTable memTable : overflowFlushMemTables) {
--        if (memTable.containSeries(deviceId, measurementId)) {
--          memTable.delete(new Deletion(deviceId + PATH_SEPARATOR + measurementId, 0, timestamp));
--        }
--      }
--    }
--  }
--
--  /**
--   * query all overflow data which contain insert data in memory, insert data in file, update/delete
--   * data in memory, update/delete data in file.
--   *
--   * @return OverflowSeriesDataSource
--   */
--  public OverflowSeriesDataSource query(String deviceId, String measurementId,
--      TSDataType dataType, Map<String, String> props, QueryContext context)
--      throws IOException {
--    try {
--      checkOpen();
--    } catch (OverflowProcessorException e) {
--      throw new IOException(e);
--    }
--    queryFlushLock.lock();
--    try {
--      // query insert data in memory and unseqTsFiles
--      // memory
--      ReadOnlyMemChunk insertInMem = queryOverflowInsertInMemory(deviceId, measurementId,
--          dataType, props);
--      List<OverflowInsertFile> overflowInsertFileList = new ArrayList<>();
--      // work file
--      Pair<String, List<ChunkMetaData>> insertInDiskWork = queryWorkDataInOverflowInsert(deviceId,
--          measurementId,
--          dataType, context);
--      if (insertInDiskWork.left != null) {
--        overflowInsertFileList
--            .add(0, new OverflowInsertFile(insertInDiskWork.left,
--                insertInDiskWork.right));
--      }
--      // merge file
--      Pair<String, List<ChunkMetaData>> insertInDiskMerge = queryMergeDataInOverflowInsert(deviceId,
--          measurementId, dataType, context);
--      if (insertInDiskMerge.left != null) {
--        overflowInsertFileList
--            .add(0, new OverflowInsertFile(insertInDiskMerge.left
--                , insertInDiskMerge.right));
--      }
--      // work file
--      return new OverflowSeriesDataSource(new Path(deviceId + "." + measurementId), dataType,
--          overflowInsertFileList, insertInMem);
--    } finally {
--      queryFlushLock.unlock();
--    }
--  }
--
--  /**
--   * query insert data in memory table. while flushing, merge the work memory table with asyncFlush
--   * memory table.
--   *
--   * @return insert data in SeriesChunkInMemTable
--   */
--  private ReadOnlyMemChunk queryOverflowInsertInMemory(String deviceId, String measurementId,
--      TSDataType dataType, Map<String, String> props) {
--
--    MemSeriesLazyMerger memSeriesLazyMerger = new MemSeriesLazyMerger();
--    queryFlushLock.lock();
--    try {
--      if (!overflowFlushMemTables.isEmpty() && isFlush()) {
--        for (int i = overflowFlushMemTables.size() - 1; i >= 0; i--) {
--          memSeriesLazyMerger.addMemSeries(
--              overflowFlushMemTables.get(i).query(deviceId, measurementId, dataType, props));
--        }
--      }
--      memSeriesLazyMerger
--          .addMemSeries(workSupport.query(deviceId, measurementId, dataType, props));
--      // memSeriesLazyMerger has handled the props,
--      // so we do not need to handle it again in the following readOnlyMemChunk
--      return new ReadOnlyMemChunk(dataType, memSeriesLazyMerger, Collections.emptyMap());
--    } finally {
--      queryFlushLock.unlock();
--    }
--  }
--
--  /**
--   * Get the insert data which is WORK in unseqTsFile.
--   *
--   * @param deviceId deviceId of the target time-series
--   * @param measurementId measurementId of the target time-series
--   * @param dataType data type of the target time-series
--   * @return the seriesPath of unseqTsFile, List of TimeSeriesChunkMetaData for the special
--   * time-series.
--   */
--  private Pair<String, List<ChunkMetaData>> queryWorkDataInOverflowInsert(String deviceId,
--      String measurementId, TSDataType dataType, QueryContext context) {
--    return new Pair<>(
--        workResource.getInsertFilePath(),
--        workResource.getInsertMetadatas(deviceId, measurementId, dataType, context));
--  }
--
--  /**
--   * Get the all merge data in unseqTsFile and overflowFile.
--   *
--   * @return MergeSeriesDataSource
--   */
--  public MergeSeriesDataSource queryMerge(String deviceId, String measurementId,
--      TSDataType dataType, QueryContext context) {
--    Pair<String, List<ChunkMetaData>> mergeInsert = queryMergeDataInOverflowInsert(deviceId,
--        measurementId,
--        dataType, context);
--    return new MergeSeriesDataSource(new OverflowInsertFile(mergeInsert.left, mergeInsert.right));
--  }
--
--  public OverflowSeriesDataSource queryMerge(String deviceId, String measurementId,
--      TSDataType dataType, boolean isMerge, QueryContext context) {
--    Pair<String, List<ChunkMetaData>> mergeInsert = queryMergeDataInOverflowInsert(deviceId,
--        measurementId,
--        dataType, context);
--    OverflowSeriesDataSource overflowSeriesDataSource = new OverflowSeriesDataSource(
--        new Path(deviceId + "." + measurementId));
--    overflowSeriesDataSource.setReadableMemChunk(null);
--    overflowSeriesDataSource
--        .setOverflowInsertFileList(
--            Arrays.asList(new OverflowInsertFile(mergeInsert.left, mergeInsert.right)));
--    return overflowSeriesDataSource;
--  }
--
--  /**
--   * Get the insert data which is MERGE in unseqTsFile
--   *
--   * @return the seriesPath of unseqTsFile, List of TimeSeriesChunkMetaData for the special
--   * time-series.
--   **/
--  private Pair<String, List<ChunkMetaData>> queryMergeDataInOverflowInsert(String deviceId,
--      String measurementId, TSDataType dataType, QueryContext context) {
--    if (!isMerge) {
--      return new Pair<>(null, null);
--    }
--    return new Pair<>(
--        mergeResource.getInsertFilePath(),
--        mergeResource.getInsertMetadatas(deviceId, measurementId, dataType, context));
--  }
--
--
--  private void switchFlushToWork() {
--    LOGGER.info("Overflow Processor {} try to get flushQueryLock for switchFlushToWork", getProcessorName());
--    queryFlushLock.lock();
--    LOGGER.info("Overflow Processor {} get flushQueryLock for switchFlushToWork", getProcessorName());
--    try {
--//      flushSupport.clear();
--      workResource.appendMetadatas();
--      isFlush = false;
--    } finally {
--      queryFlushLock.unlock();
--    }
--  }
--
--  public void switchWorkToMerge() throws IOException {
--    if (mergeResource == null) {
--      mergeResource = workResource;
--      workResource = new OverflowResource(parentPath,
--          String.valueOf(dataPathCount.getAndIncrement()), versionController, processorName);
--    }
--    isMerge = true;
--    LOGGER.info("The overflow processor {} switch from WORK to MERGE", getProcessorName());
--  }
--
--  public void switchMergeToWork() throws IOException {
--    if (mergeResource != null) {
--      FileReaderManager.getInstance().closeFileAndRemoveReader(mergeResource.getInsertFilePath());
--      mergeResource.close();
--      mergeResource.deleteResource();
--      mergeResource = null;
--    }
--    isMerge = false;
--    LOGGER.info("The overflow processor {} switch from MERGE to WORK", getProcessorName());
--  }
--
--  public boolean isMerge() {
--    return isMerge;
--  }
--
--  public boolean isFlush() {
--    return isFlush;
--  }
--
--  private void removeFlushedMemTable(IMemTable memTable, TsFileIOWriter overflowIOWriter) {
--    this.writeLock();
--    //TODO check this implementation in BufferWriteProcessor
--    try {
--      overflowFlushMemTables.remove(memTable);
--    } finally {
--      this.writeUnlock();
--    }
--  }
--
--  private boolean flushTask(String displayMessage, IMemTable currentMemTableToFlush,
--      long flushId, MemTableFlushCallBack removeFlushedMemTable) {
--    boolean result;
--    long flushStartTime = System.currentTimeMillis();
--    try {
--      LOGGER.info("The overflow processor {} starts flushing {}.", getProcessorName(),
--          displayMessage);
--      // asyncFlush data
--      workResource
--          .flush(fileSchema, currentMemTableToFlush, getProcessorName(), flushId, removeFlushedMemTable);
--      filenodeFlushAction.act();
--      // insert-ahead log
--      if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
--        getLogNode().notifyEndFlush();
--      }
--      result = true;
--    } catch (IOException e) {
--      LOGGER.error("Flush overflow processor {} rowgroup to file error in {}. Thread {} exits.",
--          getProcessorName(), displayMessage, Thread.currentThread().getName(), e);
--      result = false;
--    } catch (Exception e) {
--      LOGGER.error("FilenodeFlushAction action failed. Thread {} exits.",
--          Thread.currentThread().getName(), e);
--      result = false;
--    } finally {
--      // switch from asyncFlush to work.
--      switchFlushToWork();
--    }
--    // log asyncFlush time
--    if (LOGGER.isInfoEnabled()) {
--      LOGGER
--          .info("The overflow processor {} ends flushing {}.", getProcessorName(), displayMessage);
--      long flushEndTime = System.currentTimeMillis();
--      LOGGER.info(
--          "The overflow processor {} asyncFlush {}, start time is {}, asyncFlush end time is {}," +
--              " time consumption is {}ms",
--          getProcessorName(), displayMessage,
--          DatetimeUtils.convertMillsecondToZonedDateTime(flushStartTime),
--          DatetimeUtils.convertMillsecondToZonedDateTime(flushEndTime),
--          flushEndTime - flushStartTime);
--    }
--    return result;
--  }
--
--  @Override
--  public synchronized Future<Boolean> flush() throws IOException {
--    // statistic information for asyncFlush
--    if (lastFlushTime > 0 && LOGGER.isInfoEnabled()) {
--      long thisFLushTime = System.currentTimeMillis();
--      ZonedDateTime lastDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(lastFlushTime),
--          IoTDBDescriptor.getInstance().getConfig().getZoneID());
--      ZonedDateTime thisDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(thisFLushTime),
--          IoTDBDescriptor.getInstance().getConfig().getZoneID());
--      LOGGER.info(
--          "The overflow processor {} last asyncFlush time is {}, this asyncFlush time is {},"
--              + " asyncFlush time interval is {}s",
--          getProcessorName(), lastDateTime, thisDateTime,
--          (thisFLushTime - lastFlushTime) / 1000);
--    }
--    lastFlushTime = System.currentTimeMillis();
--//    try {
--//      flushFuture.get();
--//    } catch (InterruptedException | ExecutionException e) {
--//      throw new IOException(e);
--//    }
--    if (valueCount > 0) {
--      try {
--        // backup newIntervalFile list and emptyIntervalFileNode
--        overflowFlushAction.act();
--      } catch (Exception e) {
--        LOGGER.error("Flush the overflow rowGroup to file faied, when overflowFlushAction act");
--        throw new IOException(e);
--      }
--      if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
--        try {
--          getLogNode().notifyStartFlush();
--        } catch (IOException e) {
--          LOGGER.error("Overflow processor {} encountered an error when notifying log node, {}",
--              getProcessorName(), e);
--        }
--      }
--      BasicMemController.getInstance().releaseUsage(this, memSize.get());
--      memSize.set(0);
--      valueCount = 0;
--
--//      long version = versionController.nextVersion();
--      //add mmd
--      overflowFlushMemTables.add(workSupport);
--      IMemTable tmpMemTableToFlush = workSupport;
--      workSupport = MemTablePool.getInstance().getEmptyMemTable(this);
--      flushId++;
--      flushFuture = FlushPoolManager.getInstance().submit(() -> flushTask("asynchronously",
--          tmpMemTableToFlush, flushId, this::removeFlushedMemTable));
--
--      // switch from work to asyncFlush
--//      switchWorkToFlush();
--//      flushFuture = FlushPoolManager.getInstance().submit(() ->
--//          flushTask("asynchronously", walTaskId));
--    } else {
--//      flushFuture = new ImmediateFuture(true);
--      LOGGER.info("Nothing data points to be flushed");
--    }
--    return flushFuture;
--
--  }
--
--  @Override
--  public void close() throws OverflowProcessorException {
--    if (isClosed) {
--      return;
--    }
--    LOGGER.info("The overflow processor {} starts setCloseMark operation.", getProcessorName());
--    long closeStartTime = System.currentTimeMillis();
--    // asyncFlush data
--    try {
--      flush().get();
--    } catch (InterruptedException | ExecutionException e) {
--      LOGGER.error("Encounter an interrupt error when waitting for the flushing, "
--              + "the bufferwrite processor is {}.",
--          getProcessorName(), e);
--      Thread.currentThread().interrupt();
--    } catch (IOException e) {
--      throw new OverflowProcessorException(e);
--    }
--    if (LOGGER.isInfoEnabled()) {
--      LOGGER.info("The overflow processor {} ends setCloseMark operation.", getProcessorName());
--      // log setCloseMark time
--      long closeEndTime = System.currentTimeMillis();
--      LOGGER.info(
--          "The setCloseMark operation of overflow processor {} starts at {} and ends at {}."
--              + " It comsumes {}ms.",
--          getProcessorName(), DatetimeUtils.convertMillsecondToZonedDateTime(closeStartTime),
--          DatetimeUtils.convertMillsecondToZonedDateTime(closeEndTime),
--          closeEndTime - closeStartTime);
--    }
--    try {
--      clear();
--    } catch (IOException e) {
--      throw new OverflowProcessorException(e);
--    }
--    isClosed = true;
--  }
--
--  public void clear() throws IOException {
--    if (workResource != null) {
--      workResource.close();
--      workResource = null;
--    }
--    if (mergeResource != null) {
--      mergeResource.close();
--      mergeResource = null;
--    }
--  }
--
--  @Override
--  public boolean canBeClosed() {
--    // TODO: consider merge
--    return !isMerge;
--  }
--
--  @Override
--  public long memoryUsage() {
--    return memSize.get();
--  }
--
--  public String getOverflowRestoreFile() {
--    return workResource.getPositionFilePath();
--  }
--
--  /**
--   * @return The sum of all timeseries's metadata size within this file.
--   */
--  public long getMetaSize() {
--    // TODO : [MemControl] implement this
--    return 0;
--  }
--
--  /**
--   * @return The size of overflow file corresponding to this processor.
--   */
--  public long getFileSize() {
--    return workResource.getInsertFile().length() + memoryUsage();
--  }
--
--  /**
--   * Check whether current overflow file contains too many metadata or size of current overflow file
--   * is too large If true, setCloseMark current file and open a new one.
--   */
--  private boolean checkSize() {
--    IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
--    long metaSize = getMetaSize();
--    long fileSize = getFileSize();
--    LOGGER.info(
--        "The overflow processor {}, the size of metadata reaches {},"
--            + " the size of file reaches {}.",
--        getProcessorName(), MemUtils.bytesCntToStr(metaSize), MemUtils.bytesCntToStr(fileSize));
--    if (metaSize >= config.getOverflowMetaSizeThreshold()
--        || fileSize >= config.getOverflowFileSizeThreshold()) {
--      LOGGER.info(
--          "The overflow processor {}, size({}) of the file {} reaches threshold {},"
--              + " size({}) of metadata reaches threshold {}.",
--          getProcessorName(), MemUtils.bytesCntToStr(fileSize), workResource.getInsertFilePath(),
--          MemUtils.bytesCntToStr(config.getOverflowMetaSizeThreshold()),
--          MemUtils.bytesCntToStr(metaSize),
--          MemUtils.bytesCntToStr(config.getOverflowMetaSizeThreshold()));
--      return true;
--    } else {
--      return false;
--    }
--  }
--
--  public WriteLogNode getLogNode() throws IOException {
--    return workResource.getLogNode();
--  }
--
--  public OverflowResource getWorkResource() {
--    return workResource;
--  }
--
--  @Override
--  public boolean equals(Object o) {
--    return this == o;
--  }
--
--  @Override
--  public int hashCode() {
--    return Objects.hash(super.hashCode());
--  }
--
--  /**
--   * used for test. We can block to wait for finishing flushing.
--   *
--   * @return the future of the asyncFlush() task.
--   */
--  public Future<Boolean> getFlushFuture() {
--    return flushFuture;
--  }
--
--  /**
--   * used for test. We can know when the asyncFlush() is called.
--   *
--   * @return the last asyncFlush() time.
--   */
--  public long getLastFlushTime() {
--    return lastFlushTime;
--  }
--
--  @Override
--  public String toString() {
--    return "OverflowProcessor in " + parentPath;
--  }
--
--  public boolean isClosed() {
--    return isClosed;
--  }
--
--
--//  private void switchWorkToFlush() {
--//    queryFlushLock.lock();
--//    try {
--//      Pair<> workSupport;
--//      workSupport = new OverflowMemtable();
--//      if(isFlushing){
--//        // isFlushing = true, indicating an AsyncFlushThread has been running, only add Current overflowInfo
--//        // into List.
--//
--//
--//      }else {
--//        isFlushing = true;
--////        flushFuture = FlushPoolManager.getInstance().submit(() ->
--//            flushTask("asynchronously", walTaskId));
--//      }
--//    } finally {
--//      queryFlushLock.unlock();
--//    }
--//  }
--
--//  private List<Pair<OverflowMemtable, OverflowResource>> flushTaskList;
--//
--//  private class AsyncFlushThread implements Runnable {
--//
--//    @Override
--//    public void run() {
--//      Pair<OverflowMemtable, OverflowResource> flushInfo;
--//      while (true) {
--//        queryFlushLock.lock();
--//        try {
--//          if (flushTaskList.isEmpty()) {
--//            // flushTaskList is empty, thus all asyncFlush tasks have done and switch
--//            OverflowMemtable temp = flushSupport == null ? new OverflowMemtable() : flushSupport;
--//            flushSupport = workSupport;
--//            workSupport = temp;
--//            isFlushing = true;
--//            break;
--//          }
--//          flushInfo = flushTaskList.remove(0);
--//        } finally {
--//          queryFlushLock.unlock();
--//        }
--//        asyncFlush(flushInfo);
--//      }
--//    }
--//  }
--}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/monitor/StatMonitor.java
index 7888b6f,7888b6f..bb5f04d
--- a/iotdb/src/main/java/org/apache/iotdb/db/monitor/StatMonitor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/monitor/StatMonitor.java
@@@ -31,7 -31,7 +31,7 @@@ import org.apache.iotdb.db.concurrent.I
  import org.apache.iotdb.db.concurrent.ThreadName;
  import org.apache.iotdb.db.conf.IoTDBConfig;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.MetadataArgsErrorException;
  import org.apache.iotdb.db.exception.PathErrorException;
@@@ -40,6 -40,6 +40,7 @@@ import org.apache.iotdb.db.metadata.MMa
  import org.apache.iotdb.db.monitor.MonitorConstants.FileNodeManagerStatConstants;
  import org.apache.iotdb.db.monitor.MonitorConstants.FileNodeProcessorStatConstants;
  import org.apache.iotdb.db.monitor.collector.FileSize;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.db.service.IService;
  import org.apache.iotdb.db.service.ServiceType;
  import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
@@@ -349,7 -349,7 +350,7 @@@ public class StatMonitor implements ISe
          if (seconds >= statMonitorDetectFreqSec) {
            runningTimeMillis = currentTimeMillis;
            // delete time-series data
--          FileNodeManager fManager = FileNodeManager.getInstance();
++          FileNodeManagerV2 fManager = FileNodeManagerV2.getInstance();
            try {
              for (Map.Entry<String, IStatistic> entry : statisticMap.entrySet()) {
                for (String statParamName : entry.getValue().getStatParamsHashMap().keySet()) {
@@@ -374,11 -374,11 +375,11 @@@
      }
  
      public void insert(Map<String, TSRecord> tsRecordHashMap) {
--      FileNodeManager fManager = FileNodeManager.getInstance();
++      FileNodeManagerV2 fManager = FileNodeManagerV2.getInstance();
        int pointNum;
        for (Map.Entry<String, TSRecord> entry : tsRecordHashMap.entrySet()) {
          try {
--          fManager.insert(entry.getValue(), true);
++          fManager.insert(new InsertPlan(entry.getValue()));
            numInsert.incrementAndGet();
            pointNum = entry.getValue().dataPointList.size();
            numPointsInsert.addAndGet(pointNum);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/monitor/collector/FileSize.java
index f3e3ffe,f3e3ffe..e9ecba2
--- a/iotdb/src/main/java/org/apache/iotdb/db/monitor/collector/FileSize.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/monitor/collector/FileSize.java
@@@ -30,8 -30,8 +30,9 @@@ import java.util.concurrent.atomic.Atom
  import org.apache.commons.io.FileUtils;
  import org.apache.iotdb.db.conf.IoTDBConfig;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
++import org.apache.iotdb.db.exception.ProcessorException;
  import org.apache.iotdb.db.monitor.IStatistic;
  import org.apache.iotdb.db.monitor.MonitorConstants;
  import org.apache.iotdb.db.monitor.MonitorConstants.FileSizeConstants;
@@@ -54,7 -54,7 +55,7 @@@ public class FileSize implements IStati
    private static final Logger LOGGER = LoggerFactory.getLogger(FileSize.class);
    private static final long ABNORMAL_VALUE = -1L;
    private static final long INIT_VALUE_IF_FILE_NOT_EXIST = 0L;
--  private FileNodeManager fileNodeManager;
++  private FileNodeManagerV2 fileNodeManager;
  
    @Override
    public Map<String, TSRecord> getAllStatisticsValue() {
@@@ -80,7 -80,7 +81,7 @@@
          fileNodeManager.addTimeSeries(path, TSDataType.valueOf(MonitorConstants.DATA_TYPE_INT64),
              TSEncoding.valueOf("RLE"), CompressionType.valueOf(TSFileConfig.compressor),
              Collections.emptyMap());
--      } catch (FileNodeManagerException e) {
++      } catch (FileNodeManagerException | ProcessorException e) {
          LOGGER.error("Register File Size Stats into fileNodeManager Failed.", e);
        }
      }
@@@ -114,7 -114,7 +115,7 @@@
    }
  
    private FileSize() {
--    fileNodeManager = FileNodeManager.getInstance();
++    fileNodeManager = FileNodeManagerV2.getInstance();
      if (config.isEnableStatMonitor()) {
        StatMonitor statMonitor = StatMonitor.getInstance();
        registerStatMetadata();
diff --cc iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
index 24f2f1b,6d58365..806b693
--- a/iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
@@@ -26,8 -26,7 +26,9 @@@ import java.util.Objects
  import org.apache.iotdb.db.qp.logical.Operator;
  import org.apache.iotdb.db.qp.logical.Operator.OperatorType;
  import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 +import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
  import org.apache.iotdb.tsfile.read.common.Path;
++import org.apache.iotdb.tsfile.write.record.TSRecord;
  
  public class InsertPlan extends PhysicalPlan {
  
@@@ -50,6 -48,6 +51,20 @@@
      this.values = new String[] {insertValue};
    }
  
++  public InsertPlan(TSRecord tsRecord) {
++    super(false, OperatorType.INSERT);
++    this.deviceId = tsRecord.deviceId;
++    this.time = tsRecord.time;
++    this.measurements = new String[tsRecord.dataPointList.size()];
++    this.dataTypes = new TSDataType[tsRecord.dataPointList.size()];
++    this.values = new String[tsRecord.dataPointList.size()];
++    for (int i = 0; i < tsRecord.dataPointList.size(); i++) {
++      measurements[i] = tsRecord.dataPointList.get(i).getMeasurementId();
++      dataTypes[i] = tsRecord.dataPointList.get(i).getType();
++      values[i] = tsRecord.dataPointList.get(i).getValue().toString();
++    }
++  }
++
    public InsertPlan(String deviceId, long insertTime, String[] measurementList,
        String[] insertValues) {
      super(false, Operator.OperatorType.INSERT);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
index f1a570d,f1a570d..20c9018
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
@@@ -18,22 -18,22 +18,18 @@@
   */
  package org.apache.iotdb.db.query.control;
  
--import java.io.IOException;
  import java.util.ArrayList;
--import java.util.Collections;
  import java.util.HashSet;
  import java.util.List;
  import java.util.Map;
  import java.util.Set;
  import java.util.concurrent.ConcurrentHashMap;
  import java.util.concurrent.atomic.AtomicLong;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
  import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
--import org.apache.iotdb.db.engine.querycontext.GlobalSortedSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
  import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
  import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
++import org.apache.iotdb.db.exception.ProcessorException;
  import org.apache.iotdb.db.query.context.QueryContext;
  import org.apache.iotdb.tsfile.read.common.Path;
  import org.apache.iotdb.tsfile.read.expression.ExpressionType;
@@@ -66,29 -66,29 +62,29 @@@ public class QueryResourceManager 
     * <p>
     * For example, during a query process Q1, given a query sql <sql>select device_1.sensor_1,
     * device_1.sensor_2, device_2.sensor_1, device_2.sensor_2</sql>, we will invoke
--   * <code>FileNodeManager.getInstance().beginQuery(device_1)</code> and
--   * <code>FileNodeManager.getInstance().beginQuery(device_2)</code> both once. Although there
++   * <code>FileNodeManagerV2.getInstance().beginQuery(device_1)</code> and
++   * <code>FileNodeManagerV2.getInstance().beginQuery(device_2)</code> both once. Although there
     * exists four paths, but the unique devices are only `device_1` and `device_2`. When invoking
--   * <code>FileNodeManager.getInstance().beginQuery(device_1)</code>, it returns result token `1`.
++   * <code>FileNodeManagerV2.getInstance().beginQuery(device_1)</code>, it returns result token `1`.
     * Similarly,
--   * <code>FileNodeManager.getInstance().beginQuery(device_2)</code> returns result token `2`.
++   * <code>FileNodeManagerV2.getInstance().beginQuery(device_2)</code> returns result token `2`.
     *
     * In the meanwhile, another query process Q2 aroused by other client is triggered, whose sql
--   * statement is same to Q1. Although <code>FileNodeManager.getInstance().beginQuery(device_1)
++   * statement is same to Q1. Although <code>FileNodeManagerV2.getInstance().beginQuery(device_1)
     * </code>
     * and
--   * <code>FileNodeManager.getInstance().beginQuery(device_2)</code> will be invoked again, it
++   * <code>FileNodeManagerV2.getInstance().beginQuery(device_2)</code> will be invoked again, it
     * returns result token `3` and `4` .
     *
--   * <code>FileNodeManager.getInstance().endQueryForGivenJob(device_1, 1)</code> and
--   * <code>FileNodeManager.getInstance().endQueryForGivenJob(device_2, 2)</code> must be invoked no matter how
++   * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob(device_1, 1)</code> and
++   * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob(device_2, 2)</code> must be invoked no matter how
     * query process Q1 exits normally or abnormally. So is Q2,
--   * <code>FileNodeManager.getInstance().endQueryForGivenJob(device_1, 3)</code> and
--   * <code>FileNodeManager.getInstance().endQueryForGivenJob(device_2, 4)</code> must be invoked
++   * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob(device_1, 3)</code> and
++   * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob(device_2, 4)</code> must be invoked
     *
     * Last but no least, to ensure the correctness of insert process and query process of IoTDB,
--   * <code>FileNodeManager.getInstance().beginQuery()</code> and
--   * <code>FileNodeManager.getInstance().endQueryForGivenJob()</code> must be executed rightly.
++   * <code>FileNodeManagerV2.getInstance().beginQuery()</code> and
++   * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob()</code> must be executed rightly.
     * </p>
     */
    private ConcurrentHashMap<Long, ConcurrentHashMap<String, List<Integer>>> queryTokensMap;
@@@ -126,7 -126,7 +122,7 @@@
  
      for (String deviceId : deviceIdSet) {
        putQueryTokenForCurrentRequestThread(jobId, deviceId,
--          FileNodeManager.getInstance().beginQuery(deviceId));
++          FileNodeManagerV2.getInstance().beginQuery(deviceId));
      }
    }
  
@@@ -140,7 -140,7 +136,7 @@@
      getUniquePaths(expression, deviceIdSet);
      for (String deviceId : deviceIdSet) {
        putQueryTokenForCurrentRequestThread(jobId, deviceId,
--          FileNodeManager.getInstance().beginQuery(deviceId));
++          FileNodeManagerV2.getInstance().beginQuery(deviceId));
      }
    }
  
@@@ -157,16 -157,16 +153,16 @@@
      deviceIdSet.removeAll(remoteDeviceIdSet);
      for (String deviceId : deviceIdSet) {
        putQueryTokenForCurrentRequestThread(jobId, deviceId,
--          FileNodeManager.getInstance().beginQuery(deviceId));
++          FileNodeManagerV2.getInstance().beginQuery(deviceId));
      }
    }
  
--  public QueryDataSource getQueryDataSource(Path selectedPath,
++  public QueryDataSourceV2 getQueryDataSource(Path selectedPath,
        QueryContext context)
--      throws FileNodeManagerException {
++      throws FileNodeManagerException, ProcessorException {
  
      SingleSeriesExpression singleSeriesExpression = new SingleSeriesExpression(selectedPath, null);
--    QueryDataSource queryDataSource = FileNodeManager.getInstance()
++    QueryDataSourceV2 queryDataSource = FileNodeManagerV2.getInstance()
          .query(singleSeriesExpression, context);
  
      // add used files to current thread request cached map
@@@ -177,7 -177,7 +173,7 @@@
  
    public QueryDataSourceV2 getQueryDataSourceV2(Path selectedPath,
        QueryContext context)
--      throws FileNodeManagerException {
++      throws FileNodeManagerException, ProcessorException {
  
      SingleSeriesExpression singleSeriesExpression = new SingleSeriesExpression(selectedPath, null);
      QueryDataSourceV2 queryDataSource = FileNodeManagerV2.getInstance().query(singleSeriesExpression, context);
@@@ -199,7 -199,7 +195,7 @@@
      }
      for (Map.Entry<String, List<Integer>> entry : queryTokensMap.get(jobId).entrySet()) {
        for (int token : entry.getValue()) {
--        FileNodeManager.getInstance().endQuery(entry.getKey(), token);
++        FileNodeManagerV2.getInstance().endQuery(entry.getKey(), token);
        }
      }
      queryTokensMap.remove(jobId);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
index 03c600d,03c600d..9c61dcb
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
@@@ -69,13 -69,13 +69,17 @@@ public class EngineQueryRouter implemen
            return engineExecutor.execute(context);
          }
  
--      } catch (QueryFilterOptimizationException e) {
++      } catch (QueryFilterOptimizationException | IOException e) {
          throw new FileNodeManagerException(e);
        }
      } else {
        EngineExecutorWithoutTimeGenerator engineExecutor = new EngineExecutorWithoutTimeGenerator(
            queryExpression);
--      return engineExecutor.execute(context);
++      try {
++        return engineExecutor.execute(context);
++      } catch (IOException e) {
++        throw new FileNodeManagerException(e);
++      }
      }
    }
  
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactory.java
index ffd335b,ffd335b..2fa34f4
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactory.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactory.java
@@@ -29,6 -29,6 +29,7 @@@ import org.apache.iotdb.db.engine.query
  import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
  import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
++import org.apache.iotdb.db.exception.ProcessorException;
  import org.apache.iotdb.db.query.context.QueryContext;
  import org.apache.iotdb.db.query.control.FileReaderManager;
  import org.apache.iotdb.db.query.control.QueryResourceManager;
@@@ -189,40 -189,40 +190,41 @@@ public class SeriesReaderFactory 
      return new SealedTsFilesReader(seriesInTsFileReader, context);
    }
  
--  /**
--   * construct ByTimestampReader, include sequential data and unsequential data.
--   *
--   * @param paths selected series path
--   * @param context query context
--   * @return the list of EngineReaderByTimeStamp
--   */
--  public static List<EngineReaderByTimeStamp> getByTimestampReadersOfSelectedPaths(
--      List<Path> paths, QueryContext context) throws IOException, FileNodeManagerException {
--
--    List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
--
--    for (Path path : paths) {
--
--      QueryDataSource queryDataSource = QueryResourceManager.getInstance().getQueryDataSource(path,
--          context);
--
--      PriorityMergeReaderByTimestamp mergeReaderByTimestamp = new PriorityMergeReaderByTimestamp();
--
--      // reader for sequence data
--      SequenceDataReaderByTimestamp tsFilesReader = new SequenceDataReaderByTimestamp(
--          queryDataSource.getSeqDataSource(), context);
--      mergeReaderByTimestamp.addReaderWithPriority(tsFilesReader, 1);
--
--      // reader for unSequence data
--      PriorityMergeReaderByTimestamp unSeqMergeReader = SeriesReaderFactory.getInstance()
--          .createUnSeqMergeReaderByTimestamp(queryDataSource.getOverflowSeriesDataSource());
--      mergeReaderByTimestamp.addReaderWithPriority(unSeqMergeReader, 2);
--
--      readersOfSelectedSeries.add(mergeReaderByTimestamp);
--    }
--
--    return readersOfSelectedSeries;
--  }
++//  /**
++//   * construct ByTimestampReader, include sequential data and unsequential data.
++//   *
++//   * @param paths selected series path
++//   * @param context query context
++//   * @return the list of EngineReaderByTimeStamp
++//   */
++//  public static List<EngineReaderByTimeStamp> getByTimestampReadersOfSelectedPaths(
++//      List<Path> paths, QueryContext context)
++//      throws IOException, FileNodeManagerException, ProcessorException {
++//
++//    List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
++//
++//    for (Path path : paths) {
++//
++//      QueryDataSource queryDataSource = QueryResourceManager.getInstance().getQueryDataSource(path,
++//          context);
++//
++//      PriorityMergeReaderByTimestamp mergeReaderByTimestamp = new PriorityMergeReaderByTimestamp();
++//
++//      // reader for sequence data
++//      SequenceDataReaderByTimestamp tsFilesReader = new SequenceDataReaderByTimestamp(
++//          queryDataSource.getSeqDataSource(), context);
++//      mergeReaderByTimestamp.addReaderWithPriority(tsFilesReader, 1);
++//
++//      // reader for unSequence data
++//      PriorityMergeReaderByTimestamp unSeqMergeReader = SeriesReaderFactory.getInstance()
++//          .createUnSeqMergeReaderByTimestamp(queryDataSource.getOverflowSeriesDataSource());
++//      mergeReaderByTimestamp.addReaderWithPriority(unSeqMergeReader, 2);
++//
++//      readersOfSelectedSeries.add(mergeReaderByTimestamp);
++//    }
++//
++//    return readersOfSelectedSeries;
++//  }
  
    /**
     * construct ByTimestampReader, include sequential data and unsequential data.
@@@ -232,7 -232,7 +234,8 @@@
     * @return the list of EngineReaderByTimeStamp
     */
    public static List<EngineReaderByTimeStamp> getByTimestampReadersOfSelectedPathsV2(
--      List<Path> paths, QueryContext context) throws IOException, FileNodeManagerException {
++      List<Path> paths, QueryContext context)
++      throws IOException, FileNodeManagerException, ProcessorException {
  
      List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
  
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactoryImpl.java
index 455254c,79d63f8..40969f7
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactoryImpl.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactoryImpl.java
@@@ -1,30 -19,33 +19,34 @@@
  package org.apache.iotdb.db.query.factory;
  
  import java.io.IOException;
+ import java.util.ArrayList;
  import java.util.List;
  import org.apache.iotdb.db.engine.filenodeV2.TsFileResourceV2;
- import org.apache.iotdb.db.engine.querycontext.GlobalSortedSeriesDataSourceV2;
+ import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
+ import org.apache.iotdb.db.exception.FileNodeManagerException;
++import org.apache.iotdb.db.exception.ProcessorException;
  import org.apache.iotdb.db.query.context.QueryContext;
- import org.apache.iotdb.db.query.reader.IAggregateReader;
+ import org.apache.iotdb.db.query.control.QueryResourceManager;
+ import org.apache.iotdb.db.query.reader.AllDataReader;
  import org.apache.iotdb.db.query.reader.IPointReader;
  import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+ import org.apache.iotdb.db.query.reader.merge.PriorityMergeReaderByTimestamp;
+ import org.apache.iotdb.db.query.reader.sequence.SequenceDataReaderByTimestampV2;
+ import org.apache.iotdb.db.query.reader.sequence.SequenceDataReaderV2;
  import org.apache.iotdb.tsfile.read.common.Path;
- import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
  import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
  
- public class SeriesReaderFactoryImpl implements ISeriesReaderFactory{
+ public class SeriesReaderFactoryImpl implements ISeriesReaderFactory {
  
-   @Override
-   public IPointReader createUnSeqReader(GlobalSortedSeriesDataSourceV2 overflowSeriesDataSource,
-       Filter filter) throws IOException {
+   private static final Logger logger = LoggerFactory.getLogger(SeriesReaderFactory.class);
  
-     return null;
+   private SeriesReaderFactoryImpl() {
    }
  
-   @Override
-   public IAggregateReader createSeqReader(GlobalSortedSeriesDataSourceV2 overflowSeriesDataSource,
-       Filter filter) throws IOException {
-     return null;
+   public static SeriesReaderFactoryImpl getInstance() {
+     return SeriesReaderFactoryHelper.INSTANCE;
    }
  
    @Override
@@@ -36,12 -56,70 +57,80 @@@
  
    @Override
    public List<EngineReaderByTimeStamp> createByTimestampReadersOfSelectedPaths(List<Path> paths,
-       QueryContext context) {
+       QueryContext context) throws FileNodeManagerException {
+     List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
+ 
+     for (Path path : paths) {
+ 
 -      QueryDataSourceV2 queryDataSource = QueryResourceManager.getInstance()
 -          .getQueryDataSourceV2(path,
 -              context);
++      QueryDataSourceV2 queryDataSource = null;
++      try {
++        queryDataSource = QueryResourceManager.getInstance()
++            .getQueryDataSourceV2(path,
++                context);
++      } catch (ProcessorException e) {
++        throw new FileNodeManagerException(e);
++      }
+ 
+       PriorityMergeReaderByTimestamp mergeReaderByTimestamp = new PriorityMergeReaderByTimestamp();
+ 
+       // reader for sequence data
+       SequenceDataReaderByTimestampV2 tsFilesReader = new SequenceDataReaderByTimestampV2(path,
+           queryDataSource.getSeqResources(), context);
+       mergeReaderByTimestamp.addReaderWithPriority(tsFilesReader, 1);
+ 
+       // reader for unSequence data
+       //TODO add create unseq reader
+       PriorityMergeReaderByTimestamp unSeqMergeReader = createUnSeqByTimestampReader(
+           queryDataSource.getUnseqResources());
+       mergeReaderByTimestamp.addReaderWithPriority(unSeqMergeReader, 2);
+ 
+       readersOfSelectedSeries.add(mergeReaderByTimestamp);
+     }
+ 
+     return readersOfSelectedSeries;
+   }
+ 
+   private PriorityMergeReaderByTimestamp createUnSeqByTimestampReader(
+       List<TsFileResourceV2> unseqResources) {
      return null;
    }
  
    @Override
-   public List<IPointReader> createReadersOfSelectedPaths(List<Path> paths, QueryContext context) {
-     return null;
+   public IPointReader createAllDataReader(Path path, Filter timeFilter, QueryContext context)
+       throws FileNodeManagerException, IOException {
 -    QueryDataSourceV2 queryDataSource = QueryResourceManager.getInstance()
 -        .getQueryDataSourceV2(path, context);
++    QueryDataSourceV2 queryDataSource = null;
++    try {
++      queryDataSource = QueryResourceManager.getInstance()
++          .getQueryDataSourceV2(path, context);
++    } catch (ProcessorException e) {
++      throw new FileNodeManagerException(e);
++    }
+ 
+     // sequence reader for one sealed tsfile
+     SequenceDataReaderV2 tsFilesReader;
+ 
+       tsFilesReader = new SequenceDataReaderV2(queryDataSource.getSeriesPath(),
+           queryDataSource.getSeqResources(),
+           timeFilter, context);
+ 
+     // unseq reader for all chunk groups in unSeqFile
+     IPointReader unSeqMergeReader = null;
+       unSeqMergeReader = createUnSeqReader(path, queryDataSource.getUnseqResources(), timeFilter);
+ 
+     if (!tsFilesReader.hasNext()) {
+       //only have unsequence data.
+       return unSeqMergeReader;
+     } else {
+       //merge sequence data with unsequence data.
+       return new AllDataReader(tsFilesReader, unSeqMergeReader);
+     }
+   }
+ 
+   private static class SeriesReaderFactoryHelper {
+ 
+     private static final SeriesReaderFactoryImpl INSTANCE = new SeriesReaderFactoryImpl();
+ 
+     private SeriesReaderFactoryHelper() {
+     }
    }
  }
diff --cc iotdb/src/main/java/org/apache/iotdb/db/service/CloseMergeService.java
index 5db4a78,5db4a78..70c69a8
--- a/iotdb/src/main/java/org/apache/iotdb/db/service/CloseMergeService.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/service/CloseMergeService.java
@@@ -26,7 -26,7 +26,7 @@@ import org.apache.iotdb.db.concurrent.I
  import org.apache.iotdb.db.concurrent.ThreadName;
  import org.apache.iotdb.db.conf.IoTDBConfig;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.exception.StartupException;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -176,7 -176,7 +176,7 @@@ public class CloseMergeService implemen
                + "time interval is {}s.", startDateTime, endDateTime, timeInterval / 1000);
        mergeAllLastTime = System.currentTimeMillis();
        try {
--        FileNodeManager.getInstance().mergeAll();
++        FileNodeManagerV2.getInstance().mergeAll();
        } catch (Exception e) {
          LOGGER.error("Merge all error.", e);
        }
@@@ -202,7 -202,7 +202,7 @@@
                + "time interval is {}s.", startDateTime, endDateTime, timeInterval / 1000);
        closeAllLastTime = System.currentTimeMillis();
        try {
--        FileNodeManager.getInstance().closeAll();
++        FileNodeManagerV2.getInstance().syncCloseAllProcessor();
        } catch (Exception e) {
          LOGGER.error("setCloseMark all error.", e);
        }
diff --cc iotdb/src/main/java/org/apache/iotdb/db/service/IoTDB.java
index 2e8f626,2e8f626..3e70542
--- a/iotdb/src/main/java/org/apache/iotdb/db/service/IoTDB.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/service/IoTDB.java
@@@ -18,24 -18,24 +18,17 @@@
   */
  package org.apache.iotdb.db.service;
  
--import java.io.IOException;
--import java.util.List;
  import org.apache.iotdb.db.concurrent.IoTDBDefaultThreadExceptionHandler;
--import org.apache.iotdb.db.conf.IoTDBConfig;
  import org.apache.iotdb.db.conf.IoTDBConstant;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
--import org.apache.iotdb.db.exception.PathErrorException;
--import org.apache.iotdb.db.exception.RecoverException;
  import org.apache.iotdb.db.exception.StartupException;
  import org.apache.iotdb.db.exception.builder.ExceptionBuilder;
--import org.apache.iotdb.db.metadata.MManager;
  import org.apache.iotdb.db.monitor.StatMonitor;
  import org.apache.iotdb.db.sync.receiver.SyncServiceManager;
  import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
--import org.apache.iotdb.db.writelog.manager.WriteLogNodeManager;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
@@@ -84,7 -84,7 +77,7 @@@ public class IoTDB implements IoTDBMBea
  
      boolean enableWAL = IoTDBDescriptor.getInstance().getConfig().isEnableWal();
      IoTDBDescriptor.getInstance().getConfig().setEnableWal(false);
--    FileNodeManager.getInstance().recovery();
++    FileNodeManagerV2.getInstance().recovery();
      IoTDBDescriptor.getInstance().getConfig().setEnableWal(enableWAL);
  
      // When registering statMonitor, we should start recovering some statistics
@@@ -94,7 -94,7 +87,7 @@@
        StatMonitor.getInstance().recovery();
      }
  
--    registerManager.register(FileNodeManager.getInstance());
++    registerManager.register(FileNodeManagerV2.getInstance());
      registerManager.register(MultiFileLogNodeManager.getInstance());
      registerManager.register(JMXService.getInstance());
      registerManager.register(JDBCService.getInstance());
diff --cc iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
index 77746e1,77746e1..d0f13de
--- a/iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
@@@ -39,7 -39,7 +39,7 @@@ import org.apache.iotdb.db.auth.authori
  import org.apache.iotdb.db.conf.IoTDBConfig;
  import org.apache.iotdb.db.conf.IoTDBConstant;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.exception.ArgsErrorException;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.PathErrorException;
@@@ -416,7 -416,7 +416,7 @@@ public class TSServiceImpl implements T
      switch (statement) {
        case "flushMetadata":
          try {
--          FileNodeManager.getInstance().closeAll();
++          FileNodeManagerV2.getInstance().syncCloseAllProcessor();
          } catch (FileNodeManagerException e) {
            LOGGER.error("meet error while FileNodeManager closing all!", e);
            throw new IOException(e);
@@@ -424,7 -424,7 +424,8 @@@
          return true;
        case "merge":
          try {
--          FileNodeManager.getInstance().mergeAll();
++          // TODO change to merge!!!
++          FileNodeManagerV2.getInstance().syncCloseAllProcessor();
          } catch (FileNodeManagerException e) {
            LOGGER.error("meet error while FileNodeManager merging all!", e);
            throw new IOException(e);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
index 2eb4c46,5d06a13..c98b7d6
--- a/iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
@@@ -42,10 -42,10 +42,10 @@@ import org.apache.commons.lang3.StringU
  import org.apache.iotdb.db.concurrent.ThreadName;
  import org.apache.iotdb.db.conf.IoTDBConfig;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
 -import org.apache.iotdb.db.conf.directories.Directories;
 -import org.apache.iotdb.db.engine.filenode.FileNodeManager;
 +import org.apache.iotdb.db.conf.directories.DirectoryManager;
- import org.apache.iotdb.db.engine.filenode.FileNodeManager;
  import org.apache.iotdb.db.engine.filenode.OverflowChangeType;
  import org.apache.iotdb.db.engine.filenode.TsFileResource;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.MetadataArgsErrorException;
  import org.apache.iotdb.db.exception.PathErrorException;
@@@ -81,7 -81,7 +81,7 @@@ public class SyncServiceImpl implement
  
    private static final Logger logger = LoggerFactory.getLogger(SyncServiceImpl.class);
  
--  private static final FileNodeManager fileNodeManager = FileNodeManager.getInstance();
++  private static final FileNodeManagerV2 fileNodeManager = FileNodeManagerV2.getInstance();
    /**
     * Metadata manager
     **/
@@@ -556,9 -556,9 +556,8 @@@
                }
              }
            }
--          if (insertExecutor
-               .insert(deviceId, record.getTimestamp(), measurementList.toArray(new String[]{}),
 -              .multiInsert(deviceId, record.getTimestamp(), measurementList.toArray(new String[]{}),
--                  insertValues.toArray(new String[]{})) <= 0) {
++          if (insertExecutor.insert(new InsertPlan(deviceId, record.getTimestamp(),
++              measurementList.toArray(new String[0]), insertValues.toArray(new String[0]))) <= 0) {
              throw new IOException("Inserting series data to IoTDB engine has failed.");
            }
          }
@@@ -634,8 -634,8 +633,7 @@@
            /** If there has no overlap data with the timeseries, inserting all data in the sync file **/
            if (originDataPoints.isEmpty()) {
              for (InsertPlan insertPlan : newDataPoints) {
-               if (insertExecutor.insert(insertPlan.getDeviceId(), insertPlan.getTime(),
 -              if (insertExecutor.multiInsert(insertPlan.getDeviceId(), insertPlan.getTime(),
--                  insertPlan.getMeasurements(), insertPlan.getValues()) <= 0) {
++              if (insertExecutor.insert(insertPlan) <= 0) {
                  throw new IOException("Inserting series data to IoTDB engine has failed.");
                }
              }
@@@ -643,8 -643,8 +641,7 @@@
              /** Compare every data to get valid data **/
              for (InsertPlan insertPlan : newDataPoints) {
                if (!originDataPoints.contains(insertPlan)) {
-                 if (insertExecutor.insert(insertPlan.getDeviceId(), insertPlan.getTime(),
 -                if (insertExecutor.multiInsert(insertPlan.getDeviceId(), insertPlan.getTime(),
--                    insertPlan.getMeasurements(), insertPlan.getValues()) <= 0) {
++                if (insertExecutor.insert(insertPlan) <= 0) {
                    throw new IOException("Inserting series data to IoTDB engine has failed.");
                  }
                }
diff --cc iotdb/src/main/java/org/apache/iotdb/db/utils/LoadDataUtils.java
index da08382,da08382..7f1e660
--- a/iotdb/src/main/java/org/apache/iotdb/db/utils/LoadDataUtils.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/utils/LoadDataUtils.java
@@@ -31,11 -31,11 +31,12 @@@ import java.util.List
  import java.util.Set;
  import org.apache.iotdb.db.conf.IoTDBConfig;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.PathErrorException;
  import org.apache.iotdb.db.exception.ProcessorException;
  import org.apache.iotdb.db.metadata.MManager;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
  import org.apache.iotdb.tsfile.write.record.TSRecord;
  import org.apache.iotdb.tsfile.write.schema.FileSchema;
@@@ -57,7 -57,7 +58,7 @@@ public class LoadDataUtils 
    private int writeInstanceThreshold;
    private boolean hasExtra = false;
    private long totalPointCount = 0;
--  private FileNodeManager fileNodeManager;
++  private FileNodeManagerV2 fileNodeManager;
    private IoTDBConfig conf = IoTDBDescriptor.getInstance().getConfig();
  
    /**
@@@ -65,7 -65,7 +66,7 @@@
     */
    public LoadDataUtils() {
      writeInstanceMap = new HashSet<>();
--    fileNodeManager = FileNodeManager.getInstance();
++    fileNodeManager = FileNodeManagerV2.getInstance();
      writeInstanceThreshold = conf.getWriteInstanceThreshold();
    }
  
@@@ -151,7 -151,7 +152,7 @@@
      }
      // appeared before, insert directly
      try {
--      fileNodeManager.insert(record, false);
++      fileNodeManager.insert(new InsertPlan(record));
      } catch (FileNodeManagerException e) {
        logger.error("failed when insert into fileNodeManager, record:{}", line, e);
      }
diff --cc iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
index e074a31,960b78d..ff40053
--- a/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
@@@ -60,8 -62,8 +63,8 @@@ public class LogReplayer 
    public LogReplayer(String logNodePrefix, String insertFilePath,
        ModificationFile modFile,
        VersionController versionController,
 -      TsFileResource currentTsFileResource,
 +      TsFileResourceV2 currentTsFileResource,
-       FileSchema fileSchema, IMemTable memTable) {
+       FileSchema fileSchema, IMemTable memTable, boolean acceptDuplication) {
      this.logNodePrefix = logNodePrefix;
      this.insertFilePath = insertFilePath;
      this.modFile = modFile;
@@@ -111,9 -114,11 +115,10 @@@
    }
  
    private void replayInsert(InsertPlan insertPlan) {
 -    TSRecord tsRecord = new TSRecord(insertPlan.getTime(), insertPlan.getDeviceId());
      if (currentTsFileResource != null) {
-       // the last chunk group may contain the same data with the logs, ignore such logs
-       if (currentTsFileResource.getEndTimeMap().get(insertPlan.getDeviceId()) >= insertPlan.getTime()) {
+       // the last chunk group may contain the same data with the logs, ignore such logs in seq file
 -      if (currentTsFileResource.getEndTime(insertPlan.getDeviceId()) >= insertPlan.getTime() &&
 -      !acceptDuplication) {
++      if (currentTsFileResource.getEndTimeMap().get(insertPlan.getDeviceId()) >= insertPlan.getTime() &&
++          !acceptDuplication) {
          return;
        }
        currentTsFileResource.updateTime(insertPlan.getDeviceId(), insertPlan.getTime());
diff --cc iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/UnSeqTsFileRecoverPerformer.java
index ab0736e,756f0a7..116fadf
--- a/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/UnSeqTsFileRecoverPerformer.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/UnSeqTsFileRecoverPerformer.java
@@@ -47,12 -46,12 +47,12 @@@ public class UnSeqTsFileRecoverPerforme
    private FileSchema fileSchema;
    private VersionController versionController;
    private LogReplayer logReplayer;
 -  private TsFileResource tsFileResource;
 +  private TsFileResourceV2 tsFileResource;
  
-   public SeqTsFileRecoverPerformer(String logNodePrefix,
+   public UnSeqTsFileRecoverPerformer(String logNodePrefix,
        FileSchema fileSchema, VersionController versionController,
 -      TsFileResource currentTsFileResource) {
 -    this.insertFilePath = currentTsFileResource.getFilePath();
 +      TsFileResourceV2 currentTsFileResource) {
 +    this.insertFilePath = currentTsFileResource.getFile().getPath();
      this.logNodePrefix = logNodePrefix;
      this.fileSchema = fileSchema;
      this.versionController = versionController;
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorStoreTest.java
index 891695d,891695d..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorStoreTest.java
+++ /dev/null
@@@ -1,91 -1,91 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.filenode;
--
--import static org.junit.Assert.assertEquals;
--
--import java.io.ByteArrayInputStream;
--import java.io.ByteArrayOutputStream;
--import java.util.ArrayList;
--import java.util.HashMap;
--import java.util.List;
--import java.util.Map;
--import org.junit.After;
--import org.junit.Before;
--import org.junit.Test;
--
--public class FileNodeProcessorStoreTest {
--
--  private boolean isOverflowed;
--  private Map<String, Long> lastUpdateTimeMap;
--  private TsFileResource emptyTsFileResource;
--  private List<TsFileResource> newFileNodes;
--  private int numOfMergeFile;
--  private FileNodeProcessorStatus fileNodeProcessorStatus;
--
--  private FileNodeProcessorStore fileNodeProcessorStore;
--
--  @Before
--  public void setUp() throws Exception {
--    isOverflowed = true;
--    lastUpdateTimeMap = new HashMap<>();
--    for (int i = 0; i < 10; i++) {
--      lastUpdateTimeMap.put("d" + i, (long) i);
--    }
--    emptyTsFileResource = TsFileResourceTest.constructTsfileResource();
--    newFileNodes = new ArrayList<>();
--    for (int i = 0; i < 5; i++) {
--      newFileNodes.add(TsFileResourceTest.constructTsfileResource());
--    }
--    numOfMergeFile = 5;
--    fileNodeProcessorStatus = FileNodeProcessorStatus.MERGING_WRITE;
--    fileNodeProcessorStore = new FileNodeProcessorStore(isOverflowed, lastUpdateTimeMap,
--        emptyTsFileResource, newFileNodes, fileNodeProcessorStatus, numOfMergeFile);
--  }
--
--  @After
--  public void tearDown() throws Exception {
--
--  }
--
--  @Test
--  public void testSerDeialize() throws Exception {
--    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
--    fileNodeProcessorStore.serialize(outputStream);
--    ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
--    FileNodeProcessorStore deFileNodeProcessorStore = FileNodeProcessorStore
--        .deSerialize(inputStream);
--
--    assertEquals(fileNodeProcessorStore.getLastUpdateTimeMap(),
--        deFileNodeProcessorStore.getLastUpdateTimeMap());
--    assertEquals(fileNodeProcessorStore.getNumOfMergeFile(),
--        deFileNodeProcessorStore.getNumOfMergeFile());
--    assertEquals(fileNodeProcessorStore.getFileNodeProcessorStatus(),
--        deFileNodeProcessorStore.getFileNodeProcessorStatus());
--    TsFileResourceTest.assertTsfileRecource(fileNodeProcessorStore.getEmptyTsFileResource(),
--        deFileNodeProcessorStore.getEmptyTsFileResource());
--    assertEquals(fileNodeProcessorStore.getNewFileNodes().size(),
--        deFileNodeProcessorStore.getNewFileNodes().size());
--    for (int i = 0; i < fileNodeProcessorStore.getNewFileNodes().size(); i++) {
--      TsFileResourceTest.assertTsfileRecource(fileNodeProcessorStore.getNewFileNodes().get(i),
--          deFileNodeProcessorStore.getNewFileNodes().get(i));
--    }
--  }
--
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorTest.java
index e135d42,e135d42..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorTest.java
+++ /dev/null
@@@ -1,134 -1,134 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--
--package org.apache.iotdb.db.engine.filenode;
--
--import static org.junit.Assert.assertEquals;
--import static org.junit.Assert.assertTrue;
--
--import java.io.IOException;
--import java.util.concurrent.ExecutionException;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.MetadataManagerHelper;
--import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
--import org.apache.iotdb.db.exception.ArgsErrorException;
--import org.apache.iotdb.db.exception.BufferWriteProcessorException;
--import org.apache.iotdb.db.exception.FileNodeManagerException;
--import org.apache.iotdb.db.exception.FileNodeProcessorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.exception.StartupException;
--import org.apache.iotdb.db.exception.qp.QueryProcessorException;
--import org.apache.iotdb.db.qp.QueryProcessor;
--import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
--import org.apache.iotdb.db.qp.executor.QueryProcessExecutor;
--import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
--import org.apache.iotdb.db.utils.EnvironmentUtils;
--import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.read.common.Path;
--import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--import org.junit.After;
--import org.junit.Assert;
--import org.junit.Before;
--import org.junit.Test;
--
--public class FileNodeProcessorTest {
--
--  FileNodeManager fileNodeManager;
--  FileNodeProcessor processor;
--  private QueryProcessExecutor queryExecutor;
--  private QueryProcessor queryProcessor;
--  private String deviceId = "root.vehicle.d0";
--  private String measurementId = "s0";
--  private TSDataType dataType = TSDataType.INT32;
--  private String processName = "root.vehicle";
--
--  @Before
--  public void setUp() throws FileNodeProcessorException, StartupException, IOException {
--    // init metadata
--    EnvironmentUtils.envSetUp();
--    MetadataManagerHelper.initMetadata();
--    fileNodeManager = FileNodeManager.getInstance();
--    processor = new FileNodeProcessor(IoTDBDescriptor.getInstance().getConfig().getFileNodeDir(), processName);
--    queryExecutor = new OverflowQPExecutor();
--    queryProcessor = new QueryProcessor(queryExecutor);
--  }
--
--  @After
--  public void tearDown() throws IOException, FileNodeManagerException {
--    EnvironmentUtils.cleanEnv();
--  }
--
--  @Test
--  public void testAsyncClose()
--      throws FileNodeProcessorException, BufferWriteProcessorException, ExecutionException, InterruptedException {
--
--    BufferWriteProcessor bwProcessor;
--    int i =1;
--    for (int j = 1; j < 5; j++) {
--      bwProcessor = processor.getBufferWriteProcessor(processName, System.currentTimeMillis());
--      for (; i <= 100 * j; i++) {
--        bwProcessor.write(deviceId, measurementId, i, dataType, String.valueOf(i));
--      }
--      processor.closeBufferWrite();
--    }
--    Assert.assertNotEquals(0, processor.getClosingBufferWriteProcessor().size());
--    processor.waitforAllClosed();
--    Assert.assertEquals(0, processor.getClosingBufferWriteProcessor().size());
--
--  }
--
--  @Test
--  public void testBufferWriteQuery()
--      throws ProcessorException, ArgsErrorException, QueryProcessorException, FileNodeManagerException, QueryFilterOptimizationException, IOException {
--
--    int i =1;
--    for (int j = 1; j <= 5; j++) {
--      for (; i <= 100 * j; i++) {
--        TSRecord tsRecord = new TSRecord(i, deviceId).addTuple(DataPoint.getDataPoint(dataType, measurementId, String.valueOf(i)));
--        fileNodeManager.insert(tsRecord, false);
--      }
--      fileNodeManager.closeAll();
--    }
--    QueryPlan queryPlan = (QueryPlan) queryProcessor
--        .parseSQLToPhysicalPlan("select " + new Path(deviceId.replace("root.", ""), measurementId).getFullPath() + " from root");
--
--    int count = 0;
--    QueryDataSet dataSet = queryExecutor.processQuery(queryPlan, EnvironmentUtils.TEST_QUERY_CONTEXT);
--    assertTrue(dataSet.hasNext());
--    while (dataSet.hasNext()) {
--      count++;
--      assertEquals(count, dataSet.next().getFields().get(0).getIntV());
--    }
--    assertEquals(500, count);
--
--    processor.waitforAllClosed();
--
--    count = 0;
--    dataSet = queryExecutor.processQuery(queryPlan, EnvironmentUtils.TEST_QUERY_CONTEXT);
--    assertTrue(dataSet.hasNext());
--    while (dataSet.hasNext()) {
--      count++;
--      assertEquals(count, dataSet.next().getFields().get(0).getIntV());
--    }
--    assertEquals(500, count);
--  }
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/TsFileResourceTest.java
index a1c9d24,a1c9d24..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/TsFileResourceTest.java
+++ /dev/null
@@@ -1,98 -1,98 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.filenode;
--
--import static org.junit.Assert.assertEquals;
--
--import java.io.ByteArrayInputStream;
--import java.io.ByteArrayOutputStream;
--import java.io.File;
--import java.io.IOException;
--import java.util.Collections;
--import java.util.HashMap;
--import java.util.Map;
--import org.junit.After;
--import org.junit.Before;
--import org.junit.Test;
--
--public class TsFileResourceTest {
--
--
--  private TsFileResource tsFileResource;
--
--  public static TsFileResource constructTsfileResource() {
--    TsFileResource tsFileResource;
--    String relativePath = "data/data/settled/b/relativePath";
--    Map<String, Long> startTimes = new HashMap<>();
--    Map<String, Long> endTimes = new HashMap<>();
--
--    tsFileResource = new TsFileResource(Collections.emptyMap(), Collections.emptyMap(),
--        OverflowChangeType.MERGING_CHANGE, new File(relativePath));
--    for (int i = 0; i < 10; i++) {
--      startTimes.put("d" + i, (long) i);
--    }
--    for (int i = 0; i < 10; i++) {
--      endTimes.put("d" + i, (long) (i + 10));
--    }
--    tsFileResource.setStartTimeMap(startTimes);
--    tsFileResource.setEndTimeMap(endTimes);
--    for (int i = 0; i < 5; i++) {
--      tsFileResource.addMergeChanged("d" + i);
--    }
--    return tsFileResource;
--  }
--
--  @Before
--  public void setUp() throws Exception {
--    this.tsFileResource = constructTsfileResource();
--  }
--
--  @After
--  public void tearDown() throws Exception {
--
--  }
--
--  @Test
--  public void testSerDeialize() throws Exception {
--    ByteArrayOutputStream outputStream = new ByteArrayOutputStream(0);
--    tsFileResource.serialize(outputStream);
--    ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
--    TsFileResource deTsfileResource = TsFileResource.deSerialize(inputStream);
--    assertTsfileRecource(tsFileResource, deTsfileResource);
--  }
--  @Test
--  public void testSerdeializeCornerCase() throws IOException {
--    ByteArrayOutputStream outputStream = new ByteArrayOutputStream(0);
--    tsFileResource.setFile(null);
--    tsFileResource.serialize(outputStream);
--    ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
--    TsFileResource deTsfileResource = TsFileResource.deSerialize(inputStream);
--    assertTsfileRecource(tsFileResource,deTsfileResource);
--  }
--
--  public static void assertTsfileRecource(TsFileResource tsFileResource,
--      TsFileResource deTsfileResource) {
--    assertEquals(tsFileResource.getBaseDirIndex(), deTsfileResource.getBaseDirIndex());
--    assertEquals(tsFileResource.getFile(), deTsfileResource.getFile());
--    assertEquals(tsFileResource.getOverflowChangeType(), deTsfileResource.getOverflowChangeType());
--    assertEquals(tsFileResource.getStartTimeMap(), deTsfileResource.getStartTimeMap());
--    assertEquals(tsFileResource.getEndTimeMap(), deTsfileResource.getEndTimeMap());
--    assertEquals(tsFileResource.getMergeChanged(), deTsfileResource.getMergeChanged());
--  }
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerBenchmark.java
index fdd0fe7,fdd0fe7..20e72c3
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerBenchmark.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerBenchmark.java
@@@ -21,11 -21,11 +21,12 @@@ package org.apache.iotdb.db.engine.file
  import java.io.IOException;
  import java.util.concurrent.CountDownLatch;
  import java.util.concurrent.atomic.AtomicLong;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.MetadataArgsErrorException;
  import org.apache.iotdb.db.exception.PathErrorException;
  import org.apache.iotdb.db.metadata.MManager;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.db.sync.test.RandomNum;
  import org.apache.iotdb.db.utils.EnvironmentUtils;
  import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@@ -113,7 -113,7 +114,7 @@@ public class FileNodeManagerBenchmark 
            long time = RandomNum.getRandomLong(1, seed);
            String deltaObject = devices[(int) (time % numOfDevice)];
            TSRecord tsRecord = getRecord(deltaObject, time);
--          FileNodeManager.getInstance().insert(tsRecord, true);
++          FileNodeManagerV2.getInstance().insert(new InsertPlan(tsRecord));
          }
        } catch (FileNodeManagerException e) {
          e.printStackTrace();
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2Test.java
index 31796c6,31796c6..8c7b761
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2Test.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2Test.java
@@@ -20,6 -20,6 +20,7 @@@ package org.apache.iotdb.db.engine.file
  
  import org.apache.iotdb.db.engine.MetadataManagerHelper;
  import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.db.utils.EnvironmentUtils;
  import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
  import org.apache.iotdb.tsfile.write.record.TSRecord;
@@@ -56,7 -56,7 +57,7 @@@ public class FileNodeProcessorV2Test 
      for (int j = 1; j <= 100; j++) {
        TSRecord record = new TSRecord(j, deviceId);
        record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(j)));
--      processor.insert(record);
++      processor.insert(new InsertPlan(record));
        processor.asyncForceClose();
      }
  
@@@ -75,7 -75,7 +76,7 @@@
      for (int j = 21; j <= 30; j++) {
        TSRecord record = new TSRecord(j, deviceId);
        record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(j)));
--      processor.insert(record);
++      processor.insert(new InsertPlan(record));
        processor.asyncForceClose();
      }
      processor.syncCloseFileNode();
@@@ -83,7 -83,7 +84,7 @@@
      for (int j = 10; j >= 1; j--) {
        TSRecord record = new TSRecord(j, deviceId);
        record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(j)));
--      processor.insert(record);
++      processor.insert(new InsertPlan(record));
        processor.asyncForceClose();
      }
  
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2Test.java
index 7950a64,7950a64..4620781
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2Test.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2Test.java
@@@ -31,6 -31,6 +31,7 @@@ import java.util.Map.Entry
  import org.apache.iotdb.db.engine.MetadataManagerHelper;
  import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
  import org.apache.iotdb.db.engine.version.SysTimeVersionController;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.db.utils.EnvironmentUtils;
  import org.apache.iotdb.db.utils.FileSchemaUtils;
  import org.apache.iotdb.db.utils.TimeValuePair;
@@@ -81,7 -81,7 +82,7 @@@ public class UnsealedTsFileProcessorV2T
      for (int i = 1; i <= 100; i++) {
        TSRecord record = new TSRecord(i, deviceId);
        record.addTuple(DataPoint.getDataPoint(dataType, measurementId, String.valueOf(i)));
--      processor.insert(record);
++      processor.insert(new InsertPlan(record));
      }
  
      // query data in memory
@@@ -126,7 -126,7 +127,7 @@@
        for (int i = 1; i <= 10; i++) {
          TSRecord record = new TSRecord(i, deviceId);
          record.addTuple(DataPoint.getDataPoint(dataType, measurementId, String.valueOf(i)));
--        processor.insert(record);
++        processor.insert(new InsertPlan(record));
        }
        processor.asyncFlush();
      }
@@@ -167,7 -167,7 +168,7 @@@
      for (int i = 1; i <= 100; i++) {
        TSRecord record = new TSRecord(i, deviceId);
        record.addTuple(DataPoint.getDataPoint(dataType, measurementId, String.valueOf(i)));
--      processor.insert(record);
++      processor.insert(new InsertPlan(record));
      }
  
      // query data in memory
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowFileSizeControlTest.java
index 48557b7,48557b7..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowFileSizeControlTest.java
+++ /dev/null
@@@ -1,145 -1,145 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.memcontrol;
--
--import static org.junit.Assert.assertTrue;
--import static org.junit.Assert.fail;
--
--import java.io.IOException;
--import java.util.HashMap;
--import java.util.Map;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.MetadataManagerHelper;
--import org.apache.iotdb.db.engine.bufferwrite.Action;
--import org.apache.iotdb.db.engine.bufferwrite.ActionException;
--import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
--import org.apache.iotdb.db.engine.version.SysTimeVersionController;
--import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
--import org.apache.iotdb.db.exception.OverflowProcessorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.utils.EnvironmentUtils;
--import org.apache.iotdb.db.utils.FileSchemaUtils;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
--import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--import org.junit.After;
--import org.junit.Before;
--import org.junit.Test;
--
--public class OverflowFileSizeControlTest {
--
--  private String nameSpacePath = "nsp";
--  private Map<String, Action> parameters = null;
--  private OverflowProcessor ofprocessor = null;
--  private TSFileConfig tsconfig = TSFileDescriptor.getInstance().getConfig();
--  private String deviceId = "root.vehicle.d0";
--  private String[] measurementIds = {"s0", "s1", "s2", "s3", "s4", "s5"};
--  private TSDataType[] dataTypes = {TSDataType.INT32, TSDataType.INT64, TSDataType.FLOAT,
--      TSDataType.DOUBLE,
--      TSDataType.BOOLEAN, TSDataType.TEXT};
--
--  private IoTDBConfig dbConfig = IoTDBDescriptor.getInstance().getConfig();
--  private long overflowFileSize;
--  private int groupSize;
--
--  private boolean skip = !false;
--
--  private Action overflowflushaction = new Action() {
--
--    @Override
--    public void act() throws ActionException {
--    }
--  };
--
--  private Action filenodeflushaction = new Action() {
--
--    @Override
--    public void act() throws ActionException {
--    }
--  };
--
--  private Action filenodemanagerbackupaction = new Action() {
--
--    @Override
--    public void act() throws ActionException {
--    }
--  };
--
--  private Action filenodemanagerflushaction = new Action() {
--
--    @Override
--    public void act() throws ActionException {
--    }
--  };
--
--  @Before
--  public void setUp() throws Exception {
--    parameters = new HashMap<>();
--    parameters.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowflushaction);
--    parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, filenodeflushaction);
--
--    overflowFileSize = dbConfig.getOverflowFileSizeThreshold();
--    groupSize = tsconfig.groupSizeInByte;
--    dbConfig.setOverflowFileSizeThreshold(10 * 1024 * 1024);
--    tsconfig.groupSizeInByte = 1024 * 1024;
--
--    MetadataManagerHelper.initMetadata();
--  }
--
--  @After
--  public void tearDown() throws Exception {
--    dbConfig.setOverflowFileSizeThreshold(overflowFileSize);
--    tsconfig.groupSizeInByte = groupSize;
--    EnvironmentUtils.cleanEnv();
--  }
--
--  @Test
--  public void testInsert()
--      throws InterruptedException, IOException, WriteProcessException, ProcessorException {
--    if (skip) {
--      return;
--    }
--    // insert one point: int
--    try {
--      ofprocessor = new OverflowProcessor(nameSpacePath, parameters,
--          FileSchemaUtils.constructFileSchema(deviceId), SysTimeVersionController.INSTANCE);
--      for (int i = 1; i < 1000000; i++) {
--        TSRecord record = new TSRecord(i, deviceId);
--        record.addTuple(DataPoint.getDataPoint(dataTypes[0], measurementIds[0], String.valueOf(i)));
--        if (i % 100000 == 0) {
--          System.out.println(i + "," + MemUtils.bytesCntToStr(ofprocessor.getFileSize()));
--        }
--      }
--      // wait to flushMetadata
--      Thread.sleep(1000);
--      ofprocessor.close();
--      assertTrue(ofprocessor.getFileSize() < dbConfig.getOverflowFileSizeThreshold());
--      fail("Method unimplemented");
--    } catch (OverflowProcessorException e) {
--      e.printStackTrace();
--      fail(e.getMessage());
--    }
--
--  }
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowMetaSizeControlTest.java
index 56afc32,56afc32..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowMetaSizeControlTest.java
+++ /dev/null
@@@ -1,146 -1,146 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied.  See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.memcontrol;
--
--import static org.junit.Assert.assertTrue;
--import static org.junit.Assert.fail;
--
--import java.io.IOException;
--import java.util.HashMap;
--import java.util.Map;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.MetadataManagerHelper;
--import org.apache.iotdb.db.engine.bufferwrite.Action;
--import org.apache.iotdb.db.engine.bufferwrite.ActionException;
--import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
--import org.apache.iotdb.db.engine.version.SysTimeVersionController;
--import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
--import org.apache.iotdb.db.exception.OverflowProcessorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.utils.EnvironmentUtils;
--import org.apache.iotdb.db.utils.FileSchemaUtils;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
--import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--import org.junit.After;
--import org.junit.Before;
--import org.junit.Test;
--
--public class OverflowMetaSizeControlTest {
--
--  private String nameSpacePath = "nsp";
--  private Map<String, Action> parameters = null;
--  private OverflowProcessor ofprocessor = null;
--  private TSFileConfig tsconfig = TSFileDescriptor.getInstance().getConfig();
--  private String deviceId = "root.vehicle.d0";
--  private String[] measurementIds = {"s0", "s1", "s2", "s3", "s4", "s5"};
--  private TSDataType[] dataTypes = {TSDataType.INT32, TSDataType.INT64, TSDataType.FLOAT,
--      TSDataType.DOUBLE,
--      TSDataType.BOOLEAN, TSDataType.TEXT};
--
--  private IoTDBConfig dbConfig = IoTDBDescriptor.getInstance().getConfig();
--  private long overflowFileSize;
--  private int groupSize;
--
--  private boolean skip = !false;
--
--  private Action overflowflushaction = new Action() {
--
--    @Override
--    public void act() throws ActionException {
--    }
--  };
--
--  private Action filenodeflushaction = new Action() {
--
--    @Override
--    public void act() throws ActionException {
--    }
--  };
--
--  private Action filenodemanagerbackupaction = new Action() {
--
--    @Override
--    public void act() throws ActionException {
--    }
--  };
--
--  private Action filenodemanagerflushaction = new Action() {
--
--    @Override
--    public void act() throws ActionException {
--    }
--  };
--
--  @Before
--  public void setUp() throws Exception {
--    parameters = new HashMap<String, Action>();
--    parameters.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowflushaction);
--    parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, filenodeflushaction);
--
--    overflowFileSize = dbConfig.getOverflowMetaSizeThreshold();
--    groupSize = tsconfig.groupSizeInByte;
--    dbConfig.setOverflowMetaSizeThreshold(3 * 1024 * 1024);
--    tsconfig.groupSizeInByte = 1024 * 1024;
--
--    MetadataManagerHelper.initMetadata();
--  }
--
--  @After
--  public void tearDown() throws Exception {
--    dbConfig.setOverflowMetaSizeThreshold(overflowFileSize);
--    tsconfig.groupSizeInByte = groupSize;
--    EnvironmentUtils.cleanEnv();
--  }
--
--  @Test
--  public void testInsert()
--      throws InterruptedException, IOException, WriteProcessException, ProcessorException {
--    if (skip) {
--      return;
--    }
--    // insert one point: int
--    try {
--      ofprocessor = new OverflowProcessor(nameSpacePath, parameters,
--          FileSchemaUtils.constructFileSchema(deviceId), SysTimeVersionController.INSTANCE);
--      for (int i = 1; i < 1000000; i++) {
--        TSRecord record = new TSRecord(i, deviceId);
--        record.addTuple(DataPoint.getDataPoint(dataTypes[0], measurementIds[0], String.valueOf(i)));
--        ofprocessor.insert(record);
--        if (i % 100000 == 0) {
--          System.out.println(i + "," + MemUtils.bytesCntToStr(ofprocessor.getMetaSize()));
--        }
--      }
--      // wait to flushMetadata
--      Thread.sleep(1000);
--      assertTrue(ofprocessor.getMetaSize() < dbConfig.getOverflowMetaSizeThreshold());
--      ofprocessor.close();
--      fail("Method unimplemented");
--    } catch (OverflowProcessorException e) {
--      e.printStackTrace();
--      fail(e.getMessage());
--    }
--
--  }
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
index 55f505f,03d3e57..41a5b7a
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
@@@ -30,15 -30,15 +30,18 @@@ import java.util.Collection
  import java.util.Collections;
  import java.util.Iterator;
  import org.apache.iotdb.db.conf.IoTDBDescriptor;
 -import org.apache.iotdb.db.conf.directories.Directories;
 -import org.apache.iotdb.db.engine.filenode.FileNodeManager;
 +import org.apache.iotdb.db.conf.directories.DirectoryManager;
- import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.engine.modification.io.LocalTextModificationAccessor;
  import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
++import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.MetadataArgsErrorException;
  import org.apache.iotdb.db.exception.PathErrorException;
++import org.apache.iotdb.db.exception.ProcessorException;
  import org.apache.iotdb.db.exception.StartupException;
  import org.apache.iotdb.db.metadata.MManager;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.db.query.control.QueryResourceManager;
  import org.apache.iotdb.db.utils.EnvironmentUtils;
  import org.apache.iotdb.db.utils.TimeValuePair;
@@@ -70,14 -70,14 +73,14 @@@ public class DeletionFileNodeTest 
  
    @Before
    public void setup() throws MetadataArgsErrorException,
--      PathErrorException, IOException, FileNodeManagerException, StartupException {
++      PathErrorException, IOException, FileNodeManagerException, StartupException, ProcessorException {
      EnvironmentUtils.envSetUp();
  
      MManager.getInstance().setStorageLevelToMTree(processorName);
      for (int i = 0; i < 10; i++) {
        MManager.getInstance().addPathToMTree(processorName + "." + measurements[i], dataType,
            encoding);
--      FileNodeManager.getInstance()
++      FileNodeManagerV2.getInstance()
            .addTimeSeries(new Path(processorName, measurements[i]), TSDataType.valueOf(dataType),
                TSEncoding.valueOf(encoding), CompressionType.valueOf(TSFileConfig.compressor),
                Collections.emptyMap());
@@@ -91,29 -91,29 +94,29 @@@
  
    @Test
    public void testDeleteInBufferWriteCache() throws
--      FileNodeManagerException {
++      FileNodeManagerException, ProcessorException {
  
      for (int i = 1; i <= 100; i++) {
        TSRecord record = new TSRecord(i, processorName);
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
  
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
  
      SingleSeriesExpression expression = new SingleSeriesExpression(new Path(processorName,
          measurements[5]), null);
      QueryResourceManager.getInstance().beginQueryOfGivenExpression(TEST_QUERY_JOB_ID, expression);
--    QueryDataSource dataSource = QueryResourceManager.getInstance()
++    QueryDataSourceV2 dataSource = QueryResourceManager.getInstance()
          .getQueryDataSource(expression.getSeriesPath(), TEST_QUERY_CONTEXT);
  
      Iterator<TimeValuePair> timeValuePairs =
--        dataSource.getSeqDataSource().getReadableChunk().getIterator();
++        dataSource.getSeqResources().get(0).getReadOnlyMemChunk().getIterator();
      int count = 0;
      while (timeValuePairs.hasNext()) {
        timeValuePairs.next();
@@@ -130,13 -130,13 +133,13 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
--    FileNodeManager.getInstance().closeAll();
++    FileNodeManagerV2.getInstance().syncCloseAllProcessor();
  
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 40);
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 40);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 30);
  
      Modification[] realModifications = new Modification[]{
          new Deletion(processorName + "." + measurements[5], 102, 50),
@@@ -166,16 -166,16 +169,16 @@@
    }
  
    @Test
--  public void testDeleteInOverflowCache() throws FileNodeManagerException {
++  public void testDeleteInOverflowCache() throws FileNodeManagerException, ProcessorException {
      // insert into BufferWrite
      for (int i = 101; i <= 200; i++) {
        TSRecord record = new TSRecord(i, processorName);
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
--    FileNodeManager.getInstance().closeAll();
++    FileNodeManagerV2.getInstance().syncCloseAllProcessor();
  
      // insert into Overflow
      for (int i = 1; i <= 100; i++) {
@@@ -183,23 -183,23 +186,23 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
  
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
  
      SingleSeriesExpression expression = new SingleSeriesExpression(new Path(processorName,
          measurements[5]), null);
  
      QueryResourceManager.getInstance().beginQueryOfGivenExpression(TEST_QUERY_JOB_ID, expression);
--    QueryDataSource dataSource = QueryResourceManager.getInstance()
++    QueryDataSourceV2 dataSource = QueryResourceManager.getInstance()
          .getQueryDataSource(expression.getSeriesPath(), TEST_QUERY_CONTEXT);
  
      Iterator<TimeValuePair> timeValuePairs =
--        dataSource.getOverflowSeriesDataSource().getReadableMemChunk().getIterator();
++        dataSource.getSeqResources().get(0).getReadOnlyMemChunk().getIterator();
      int count = 0;
      while (timeValuePairs.hasNext()) {
        timeValuePairs.next();
@@@ -218,9 -218,9 +221,9 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
--    FileNodeManager.getInstance().closeAll();
++    FileNodeManagerV2.getInstance().syncCloseAllProcessor();
  
      // insert into Overflow
      for (int i = 1; i <= 100; i++) {
@@@ -228,13 -228,13 +231,13 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
--    FileNodeManager.getInstance().closeAll();
++    FileNodeManagerV2.getInstance().syncCloseAllProcessor();
  
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 40);
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 40);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 30);
  
      Modification[] realModifications = new Modification[]{
          new Deletion(processorName + "." + measurements[5], 103, 50),
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionQueryTest.java
index 3e9bb99,3e9bb99..c2ff798
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionQueryTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionQueryTest.java
@@@ -26,13 -26,13 +26,15 @@@ import java.io.IOException
  import java.util.ArrayList;
  import java.util.Collections;
  import java.util.List;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
  import org.apache.iotdb.db.engine.memcontrol.BasicMemController.UsageLevel;
  import org.apache.iotdb.db.exception.FileNodeManagerException;
  import org.apache.iotdb.db.exception.MetadataArgsErrorException;
  import org.apache.iotdb.db.exception.PathErrorException;
++import org.apache.iotdb.db.exception.ProcessorException;
  import org.apache.iotdb.db.exception.StartupException;
  import org.apache.iotdb.db.metadata.MManager;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
  import org.apache.iotdb.db.query.executor.EngineQueryRouter;
  import org.apache.iotdb.db.utils.EnvironmentUtils;
  import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
@@@ -65,14 -65,14 +67,14 @@@ public class DeletionQueryTest 
  
    @Before
    public void setup() throws MetadataArgsErrorException,
--      PathErrorException, IOException, FileNodeManagerException, StartupException {
++      PathErrorException, IOException, FileNodeManagerException, StartupException, ProcessorException {
      EnvironmentUtils.envSetUp();
  
      MManager.getInstance().setStorageLevelToMTree(processorName);
      for (int i = 0; i < 10; i++) {
        MManager.getInstance().addPathToMTree(processorName + "." + measurements[i], dataType,
            encoding);
--      FileNodeManager.getInstance()
++      FileNodeManagerV2.getInstance()
            .addTimeSeries(new Path(processorName, measurements[i]), TSDataType.valueOf(dataType),
                TSEncoding.valueOf(encoding), CompressionType.valueOf(TSFileConfig.compressor),
                Collections.emptyMap());
@@@ -93,13 -93,13 +95,13 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
  
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
  
      List<Path> pathList = new ArrayList<>();
      pathList.add(new Path(processorName, measurements[3]));
@@@ -124,13 -124,13 +126,13 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
--    FileNodeManager.getInstance().closeAll();
++    FileNodeManagerV2.getInstance().syncCloseAllProcessor();
  
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 40);
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 40);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 30);
  
      List<Path> pathList = new ArrayList<>();
      pathList.add(new Path(processorName, measurements[3]));
@@@ -156,9 -156,9 +158,9 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
--    FileNodeManager.getInstance().closeAll();
++    FileNodeManagerV2.getInstance().syncCloseAllProcessor();
  
      // insert into Overflow
      for (int i = 1; i <= 100; i++) {
@@@ -166,13 -166,13 +168,13 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
  
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
  
      List<Path> pathList = new ArrayList<>();
      pathList.add(new Path(processorName, measurements[3]));
@@@ -198,9 -198,9 +200,9 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
--    FileNodeManager.getInstance().closeAll();
++    FileNodeManagerV2.getInstance().syncCloseAllProcessor();
  
      // insert into Overflow
      for (int i = 1; i <= 100; i++) {
@@@ -208,13 -208,13 +210,13 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
--    FileNodeManager.getInstance().closeAll();
++    FileNodeManagerV2.getInstance().syncCloseAllProcessor();
  
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 40);
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 40);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 30);
  
      List<Path> pathList = new ArrayList<>();
      pathList.add(new Path(processorName, measurements[3]));
@@@ -240,45 -240,45 +242,45 @@@
        for (int j = 0; j < 10; j++) {
          record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
        }
--      FileNodeManager.getInstance().insert(record, false);
++      FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
      }
  
--    FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
--    FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++    FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
... 1727 lines suppressed ...


[incubator-iotdb] 01/03: add logNode in unsealedTsFileprocessor and change the insert record interface to InsertPlan

Posted by qi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

qiaojialin pushed a commit to branch feature_async_close_tsfile
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit d66a237feb81e9a8be89cc45e5d3a20990c1997d
Author: qiaojialin <64...@qq.com>
AuthorDate: Sat Jun 22 16:39:11 2019 +0800

    add logNode in unsealedTsFileprocessor and change the insert record interface to InsertPlan
---
 .../{Directories.java => DirectoryManager.java}    | 11 ++--
 .../iotdb/db/engine/filenode/FileNodeManager.java  |  6 +-
 .../db/engine/filenode/FileNodeProcessor.java      | 12 ++--
 .../iotdb/db/engine/filenode/TsFileResource.java   | 11 ++--
 .../db/engine/filenodeV2/FileNodeManagerV2.java    | 11 ++--
 .../db/engine/filenodeV2/FileNodeProcessorV2.java  | 74 ++++++++++++++++------
 .../db/engine/filenodeV2/TsFileResourceV2.java     | 16 +++++
 .../filenodeV2/UnsealedTsFileProcessorV2.java      | 26 ++++++--
 .../iotdb/db/engine/memtable/AbstractMemTable.java | 14 ++--
 .../apache/iotdb/db/engine/memtable/IMemTable.java |  3 +-
 .../db/qp/executor/IQueryProcessExecutor.java      | 18 +-----
 .../iotdb/db/qp/executor/OverflowQPExecutor.java   | 64 ++++++-------------
 .../iotdb/db/qp/physical/crud/InsertPlan.java      | 10 +++
 .../iotdb/db/sync/receiver/SyncServiceImpl.java    | 10 +--
 .../java/org/apache/iotdb/db/utils/MemUtils.java   | 64 ++++++-------------
 .../org/apache/iotdb/db/utils/OpenFileNumUtil.java |  6 +-
 .../db/writelog/node/ExclusiveWriteLogNode.java    |  4 +-
 .../iotdb/db/writelog/recover/LogReplayer.java     | 19 +++---
 .../recover/SeqTsFileRecoverPerformer.java         |  7 +-
 .../java/org/apache/iotdb/db/engine/PathUtils.java |  6 +-
 .../bufferwrite/BufferWriteProcessorNewTest.java   |  6 +-
 .../bufferwrite/BufferWriteProcessorTest.java      | 14 ++--
 .../memcontrol/BufferwriteFileSizeControlTest.java |  4 +-
 .../memcontrol/BufferwriteMetaSizeControlTest.java |  4 +-
 .../engine/modification/DeletionFileNodeTest.java  |  4 +-
 .../iotdb/db/qp/plan/LogicalPlanSmallTest.java     |  9 +--
 .../apache/iotdb/db/qp/utils/MemIntQpExecutor.java | 30 ++++-----
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |  8 +--
 28 files changed, 236 insertions(+), 235 deletions(-)

diff --git a/iotdb/src/main/java/org/apache/iotdb/db/conf/directories/Directories.java b/iotdb/src/main/java/org/apache/iotdb/db/conf/directories/DirectoryManager.java
similarity index 93%
rename from iotdb/src/main/java/org/apache/iotdb/db/conf/directories/Directories.java
rename to iotdb/src/main/java/org/apache/iotdb/db/conf/directories/DirectoryManager.java
index 4380b0d..1c17c79 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/conf/directories/Directories.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/conf/directories/DirectoryManager.java
@@ -24,7 +24,6 @@ import java.util.Arrays;
 import java.util.List;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.conf.directories.strategy.DirectoryStrategy;
-import org.apache.iotdb.db.service.IoTDB;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -33,14 +32,14 @@ import org.slf4j.LoggerFactory;
  *
  * @author East
  */
-public class Directories {
+public class DirectoryManager {
 
-  private static final Logger LOGGER = LoggerFactory.getLogger(Directories.class);
+  private static final Logger LOGGER = LoggerFactory.getLogger(DirectoryManager.class);
 
   private List<String> tsfileFolders;
   private DirectoryStrategy strategy;
 
-  private Directories() {
+  private DirectoryManager() {
     tsfileFolders = new ArrayList<>(
         Arrays.asList(IoTDBDescriptor.getInstance().getConfig().getBufferWriteDirs()));
     initFolders();
@@ -56,7 +55,7 @@ public class Directories {
     }
   }
 
-  public static Directories getInstance() {
+  public static DirectoryManager getInstance() {
     return DirectoriesHolder.INSTANCE;
   }
 
@@ -105,7 +104,7 @@ public class Directories {
   }
 
   private static class DirectoriesHolder {
-    private static final Directories INSTANCE = new Directories();
+    private static final DirectoryManager INSTANCE = new DirectoryManager();
   }
 
   public String getWALFolder() {
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
index 13f2086..cd0d5de 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
@@ -37,7 +37,7 @@ import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.Processor;
 import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
 import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
@@ -78,7 +78,7 @@ public class FileNodeManager implements IStatistic, IService {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeManager.class);
   private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
-  private static final Directories directories = Directories.getInstance();
+  private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
   /**
    * a folder that persist FileNodeProcessorStore classes. Each stroage group will have a subfolder.
    * by default, it is system/info
@@ -961,7 +961,7 @@ public class FileNodeManager implements IStatistic, IService {
   }
 
   private void cleanBufferWrite(String processorName) throws IOException {
-    List<String> bufferwritePathList = directories.getAllTsFileFolders();
+    List<String> bufferwritePathList = DIRECTORY_MANAGER.getAllTsFileFolders();
     for (String bufferwritePath : bufferwritePathList) {
       bufferwritePath = standardizeDir(bufferwritePath) + processorName;
       File bufferDir = new File(bufferwritePath);
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
index a61edc9..1869aad 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
@@ -50,7 +50,7 @@ import java.util.function.Consumer;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.Processor;
 import org.apache.iotdb.db.engine.bufferwrite.Action;
 import org.apache.iotdb.db.engine.bufferwrite.ActionException;
@@ -120,7 +120,7 @@ public class FileNodeProcessor extends Processor implements IStatistic {
   private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeProcessor.class);
   private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
   private static final MManager mManager = MManager.getInstance();
-  private static final Directories directories = Directories.getInstance();
+  private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
   private final String statStorageDeltaName;
   private final HashMap<String, AtomicLong> statParamsHashMap = new HashMap<>();
   /**
@@ -545,7 +545,7 @@ public class FileNodeProcessor extends Processor implements IStatistic {
       //params.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
       params
           .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
-      String baseDir = directories.getNextFolderForTsfile();
+      String baseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
       LOGGER.info("Allocate folder {} for the new bufferwrite processor.", baseDir);
       // construct processor or restore
       try {
@@ -924,7 +924,7 @@ public class FileNodeProcessor extends Processor implements IStatistic {
                 + Constans.BACK_UP_DIRECTORY_NAME
                 + File.separatorChar + tsFileResource.getRelativePath();
         File newFile = new File(
-            Directories.getInstance().getTsFileFolder(tsFileResource.getBaseDirIndex()),
+            DirectoryManager.getInstance().getTsFileFolder(tsFileResource.getBaseDirIndex()),
             relativeFilePath);
         if (!newFile.getParentFile().exists()) {
           newFile.getParentFile().mkdirs();
@@ -1378,7 +1378,7 @@ public class FileNodeProcessor extends Processor implements IStatistic {
         // delete the all files which are in the newFileNodes
         // notice: the last restore file of the interval file
 
-        List<String> bufferwriteDirPathList = directories.getAllTsFileFolders();
+        List<String> bufferwriteDirPathList = DIRECTORY_MANAGER.getAllTsFileFolders();
         List<File> bufferwriteDirList = new ArrayList<>();
         collectBufferWriteDirs(bufferwriteDirPathList, bufferwriteDirList);
 
@@ -1577,7 +1577,7 @@ public class FileNodeProcessor extends Processor implements IStatistic {
         numOfChunk++;
         TimeValuePair timeValuePair = seriesReader.next();
         if (mergeFileWriter == null) {
-          mergeBaseDir = directories.getNextFolderForTsfile();
+          mergeBaseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
           mergeFileName = timeValuePair.getTimestamp()
               + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR + System.currentTimeMillis();
           mergeOutputPath = constructOutputFilePath(mergeBaseDir, getProcessorName(),
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/TsFileResource.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/TsFileResource.java
index 01e768f..142223d 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/TsFileResource.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/TsFileResource.java
@@ -30,8 +30,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Objects;
 import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.bufferwrite.RestorableTsFileIOWriter;
 import org.apache.iotdb.db.engine.modification.ModificationFile;
 import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
@@ -48,7 +47,7 @@ public class TsFileResource {
 
   private OverflowChangeType overflowChangeType;
 
-  //the file index of `settled` folder in the Directories.
+  //the file index of `settled` folder in the DirectoryManager.
   private int baseDirIndex;
   private File file;
   private Map<String, Long> startTimeMap;
@@ -114,7 +113,7 @@ public class TsFileResource {
 
     this.overflowChangeType = type;
     if (file != null) {
-      this.baseDirIndex = Directories.getInstance()
+      this.baseDirIndex = DirectoryManager.getInstance()
           .getTsFileFolderIndex(file.getParentFile().getParent());
       this.modFile = new ModificationFile(file.getAbsolutePath() + ModificationFile.FILE_SUFFIX);
     }
@@ -157,7 +156,7 @@ public class TsFileResource {
     File file = null;
     if (hasRelativePath) {
       String relativePath = ReadWriteIOUtils.readString(inputStream);
-      file = new File(Directories.getInstance().getTsFileFolder(baseDirIndex), relativePath);
+      file = new File(DirectoryManager.getInstance().getTsFileFolder(baseDirIndex), relativePath);
     }
     int size = ReadWriteIOUtils.readInt(inputStream);
     Map<String, Long> startTimes = new HashMap<>();
@@ -375,7 +374,7 @@ public class TsFileResource {
   public void setFile(File file) throws IOException {
     this.file = file;
     if (file != null) {
-      this.baseDirIndex = Directories.getInstance()
+      this.baseDirIndex = DirectoryManager.getInstance()
           .getTsFileFolderIndex(file.getParentFile().getParent());
       if (this.modFile != null) {
         this.modFile.close();
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
index dcd3924..2d63912 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
@@ -36,6 +36,7 @@ import org.apache.iotdb.db.exception.FileNodeProcessorException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.StartupException;
 import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.service.IService;
 import org.apache.iotdb.db.service.ServiceType;
@@ -157,22 +158,22 @@ public class FileNodeManagerV2 implements IService {
   /**
    * insert TsRecord into storage group.
    *
-   * @param tsRecord input Data
+   * @param insertPlan physical plan of insertion
    * @return an int value represents the insert type, 0: failed; 1: overflow; 2: bufferwrite
    */
-  public int insert(TSRecord tsRecord) throws FileNodeManagerException {
+  public int insert(InsertPlan insertPlan) throws FileNodeManagerException {
 
     FileNodeProcessorV2 fileNodeProcessor;
     try {
-      fileNodeProcessor = getProcessor(tsRecord.deviceId);
+      fileNodeProcessor = getProcessor(insertPlan.getDeviceId());
     } catch (Exception e) {
-      LOGGER.warn("get FileNodeProcessor of device {} failed, because {}", tsRecord.deviceId,
+      LOGGER.warn("get FileNodeProcessor of device {} failed, because {}", insertPlan.getDeviceId(),
           e.getMessage(), e);
       throw new FileNodeManagerException(e);
     }
 
     // TODO monitor: update statistics
-    return fileNodeProcessor.insert(tsRecord);
+    return fileNodeProcessor.insert(insertPlan);
   }
 
 
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
index 0b03327..2ae30c5 100755
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.IOException;
 import java.nio.file.Paths;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -31,21 +32,23 @@ import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.Supplier;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.filenode.CopyOnReadLinkedList;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
 import org.apache.iotdb.db.engine.version.SimpleFileVersionController;
 import org.apache.iotdb.db.engine.version.VersionController;
 import org.apache.iotdb.db.exception.FileNodeProcessorException;
+import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
+import org.apache.iotdb.db.writelog.recover.SeqTsFileRecoverPerformer;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
 import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.utils.Pair;
-import org.apache.iotdb.tsfile.write.record.TSRecord;
 import org.apache.iotdb.tsfile.write.schema.FileSchema;
 import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
 import org.slf4j.Logger;
@@ -56,7 +59,7 @@ public class FileNodeProcessorV2 {
   private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeProcessorV2.class);
 
   private static final MManager mManager = MManager.getInstance();
-  private static final Directories directories = Directories.getInstance();
+  private static final DirectoryManager directoryManager = DirectoryManager.getInstance();
 
   private FileSchema fileSchema;
 
@@ -119,10 +122,43 @@ public class FileNodeProcessorV2 {
     this.fileSchema = constructFileSchema(storageGroupName);
   }
 
-  // TODO: Jiang Tian
-  private void recovery(){
+  private void recovery() throws ProcessorException {
+    List<String> tsfiles = new ArrayList<>();
+    List<String> fileFolders = directoryManager.getAllTsFileFolders();
+    for (String baseDir: fileFolders) {
+      File fileFolder = new File(baseDir, storageGroupName);
+      if (!fileFolder.exists()) {
+        continue;
+      }
+      for (File tsfile: fileFolder.listFiles()) {
+        tsfiles.add(tsfile.getPath());
+      }
+    }
+
+    Collections.sort(tsfiles, );
+
+    for (String tsfile: tsfiles) {
+      TsFileResourceV2 tsFileResource = new TsFileResourceV2(new File(tsfile));
+      SeqTsFileRecoverPerformer recoverPerformer = new SeqTsFileRecoverPerformer(storageGroupName + "-", fileSchema, versionController, tsFileResource);
+      recoverPerformer.recover();
+    }
+
+    tsfiles.clear();
+    String unseqFileFolder = IoTDBDescriptor.getInstance().getConfig().getOverflowDataDir();
+    File fileFolder = new File(unseqFileFolder, storageGroupName);
+    if (!fileFolder.exists()) {
+      return;
+    }
+    for (File unseqFile: fileFolder.listFiles()) {
+      tsfiles.add(unseqFile.getPath());
+    }
+
+    for
+
   }
 
+
+
   private FileSchema constructFileSchema(String storageGroupName) {
     List<MeasurementSchema> columnSchemaList;
     columnSchemaList = mManager.getSchemaForFileName(storageGroupName);
@@ -151,11 +187,9 @@ public class FileNodeProcessorV2 {
   }
 
   /**
-   *
-   * @param tsRecord
    * @return -1: failed, 1: Overflow, 2:Bufferwrite
    */
-  public int insert(TSRecord tsRecord) {
+  public int insert(InsertPlan insertPlan) {
     lock.writeLock().lock();
     int insertResult;
 
@@ -164,16 +198,16 @@ public class FileNodeProcessorV2 {
         return -1;
       }
       // init map
-      latestTimeForEachDevice.putIfAbsent(tsRecord.deviceId, Long.MIN_VALUE);
-      latestFlushedTimeForEachDevice.putIfAbsent(tsRecord.deviceId, Long.MIN_VALUE);
+      latestTimeForEachDevice.putIfAbsent(insertPlan.getDeviceId(), Long.MIN_VALUE);
+      latestFlushedTimeForEachDevice.putIfAbsent(insertPlan.getDeviceId(), Long.MIN_VALUE);
 
       boolean result;
       // insert to sequence or unSequence file
-      if (tsRecord.time > latestFlushedTimeForEachDevice.get(tsRecord.deviceId)) {
-        result = insertUnsealedDataFile(tsRecord, true);
+      if (insertPlan.getTime() > latestFlushedTimeForEachDevice.get(insertPlan.getDeviceId())) {
+        result = insertUnsealedDataFile(insertPlan, true);
         insertResult = result ? 1 : -1;
       } else {
-        result = insertUnsealedDataFile(tsRecord, false);
+        result = insertUnsealedDataFile(insertPlan, false);
         insertResult = result ? 2 : -1;
       }
     } catch (Exception e) {
@@ -186,7 +220,7 @@ public class FileNodeProcessorV2 {
     return insertResult;
   }
 
-  private boolean insertUnsealedDataFile(TSRecord tsRecord, boolean sequence) throws IOException {
+  private boolean insertUnsealedDataFile(InsertPlan insertPlan, boolean sequence) throws IOException {
     lock.writeLock().lock();
     UnsealedTsFileProcessorV2 unsealedTsFileProcessor;
     try {
@@ -195,8 +229,8 @@ public class FileNodeProcessorV2 {
       if (sequence) {
         if (workSequenceTsFileProcessor == null) {
 
-          // TODO directories add method getAndCreateNextFolderTsfile
-          String baseDir = directories.getNextFolderForTsfile();
+          // TODO directoryManager add method getAndCreateNextFolderTsfile
+          String baseDir = directoryManager.getNextFolderForTsfile();
           String filePath = Paths.get(baseDir, storageGroupName, System.currentTimeMillis() + "-" + versionController.nextVersion()).toString();
 
           new File(baseDir, storageGroupName).mkdirs();
@@ -208,7 +242,7 @@ public class FileNodeProcessorV2 {
         unsealedTsFileProcessor = workSequenceTsFileProcessor;
       } else {
         if (workUnSequenceTsFileProcessor == null) {
-          // TODO check if the disk is full
+          // TODO check if the disk is full, move this
           String baseDir = IoTDBDescriptor.getInstance().getConfig().getOverflowDataDir();
           new File(baseDir, storageGroupName).mkdirs();
           String filePath = Paths.get(baseDir, storageGroupName, System.currentTimeMillis() + "-" + +versionController.nextVersion()).toString();
@@ -222,11 +256,11 @@ public class FileNodeProcessorV2 {
       }
 
       // insert BufferWrite
-      result = unsealedTsFileProcessor.insert(tsRecord);
+      result = unsealedTsFileProcessor.insert(insertPlan);
 
       // try to update the latest time of the device of this tsRecord
-      if (result && latestTimeForEachDevice.get(tsRecord.deviceId) < tsRecord.time) {
-        latestTimeForEachDevice.put(tsRecord.deviceId, tsRecord.time);
+      if (result && latestTimeForEachDevice.get(insertPlan.getDeviceId()) < insertPlan.getTime()) {
+        latestTimeForEachDevice.put(insertPlan.getDeviceId(), insertPlan.getTime());
       }
 
       // check memtable size and may asyncFlush the workMemtable
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/TsFileResourceV2.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/TsFileResourceV2.java
index 159ebc2..0fd2e31 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/TsFileResourceV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/TsFileResourceV2.java
@@ -22,6 +22,7 @@ import java.io.File;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import org.apache.iotdb.db.engine.filenode.TsFileResource;
 import org.apache.iotdb.db.engine.modification.ModificationFile;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
@@ -58,6 +59,13 @@ public class TsFileResourceV2 {
    */
   private ReadOnlyMemChunk readOnlyMemChunk;
 
+  public TsFileResourceV2(File file) {
+    this.file = file;
+    this.startTimeMap = new HashMap<>();
+    this.endTimeMap = new HashMap<>();
+    this.closed = true;
+  }
+
   public TsFileResourceV2(File file, UnsealedTsFileProcessorV2 processor) {
     this.file = file;
     this.startTimeMap = new HashMap<>();
@@ -124,4 +132,12 @@ public class TsFileResourceV2 {
   public UnsealedTsFileProcessorV2 getUnsealedFileProcessor() {
     return processor;
   }
+
+  public void updateTime(String deviceId, long time) {
+    startTimeMap.putIfAbsent(deviceId, time);
+    Long endTime = endTimeMap.get(deviceId);
+    if (endTime == null || endTime < time) {
+      endTimeMap.put(deviceId, time);
+    }
+  }
 }
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2.java
index cad3b29..2a72e07 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2.java
@@ -37,6 +37,9 @@ import org.apache.iotdb.db.engine.memtable.MemTablePool;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
 import org.apache.iotdb.db.engine.version.VersionController;
 import org.apache.iotdb.db.qp.constant.DatetimeUtils;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
+import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
+import org.apache.iotdb.db.writelog.node.WriteLogNode;
 import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -76,6 +79,8 @@ public class UnsealedTsFileProcessorV2 {
 
   private Supplier flushUpdateLatestFlushTimeCallback;
 
+  private WriteLogNode logNode;
+
   /**
    * sync this object in query() and asyncFlush()
    */
@@ -91,6 +96,7 @@ public class UnsealedTsFileProcessorV2 {
     this.tsFileResource = new TsFileResourceV2(tsfile, this);
     this.versionController = versionController;
     this.writer = new NativeRestorableIOWriter(tsfile);
+    this.logNode = MultiFileLogNodeManager.getInstance().getNode(storageGroupName + "-" + tsfile.getName());
     this.closeUnsealedFileCallback = closeUnsealedFileCallback;
     this.flushUpdateLatestFlushTimeCallback = flushUpdateLatestFlushTimeCallback;
   }
@@ -99,10 +105,10 @@ public class UnsealedTsFileProcessorV2 {
    * insert a TsRecord into the workMemtable. If the memory usage is beyond the memTableThreshold,
    * put it into flushing list.
    *
-   * @param tsRecord data to be written
+   * @param insertPlan physical plan of insertion
    * @return succeed or fail
    */
-  public boolean insert(TSRecord tsRecord) {
+  public boolean insert(InsertPlan insertPlan) {
 
     if (workMemTable == null) {
       // TODO change the impl of getEmptyMemTable to non-blocking
@@ -114,13 +120,18 @@ public class UnsealedTsFileProcessorV2 {
       }
     }
 
-    // TODO insert WAL
+    try {
+      logNode.write(insertPlan);
+    } catch (IOException e) {
+      LOGGER.error("write WAL failed", e);
+      return false;
+    }
 
     // update start time of this memtable
-    tsFileResource.updateStartTime(tsRecord.deviceId, tsRecord.time);
+    tsFileResource.updateStartTime(insertPlan.getDeviceId(), insertPlan.getTime());
 
     // insert tsRecord to work memtable
-    workMemTable.insert(tsRecord);
+    workMemTable.insert(insertPlan);
 
     return true;
   }
@@ -143,10 +154,13 @@ public class UnsealedTsFileProcessorV2 {
       if (workMemTable == null) {
         return;
       }
+      logNode.notifyStartFlush();
       flushingMemTables.addLast(workMemTable);
       FlushManager.getInstance().registerUnsealedTsFileProcessor(this);
       flushUpdateLatestFlushTimeCallback.get();
       workMemTable = null;
+    } catch (IOException e) {
+      LOGGER.error("WAL notify start flush failed", e);
     } finally {
       flushQueryLock.writeLock().unlock();
     }
@@ -203,7 +217,9 @@ public class UnsealedTsFileProcessorV2 {
       MemTableFlushTaskV2 flushTask = new MemTableFlushTaskV2(writer, storageGroupName,
           this::releaseFlushedMemTableCallback);
       flushTask.flushMemTable(fileSchema, memTableToFlush, versionController.nextVersion());
+      logNode.notifyEndFlush();
     }
+
     // for sync flush
     synchronized (memTableToFlush) {
       memTableToFlush.notify();
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/memtable/AbstractMemTable.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/memtable/AbstractMemTable.java
index 8a74fc6..a33dcdc 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/memtable/AbstractMemTable.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/memtable/AbstractMemTable.java
@@ -29,6 +29,7 @@ import org.apache.iotdb.db.engine.modification.Modification;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
 import org.apache.iotdb.db.monitor.collector.MemTableWriteTimeCost;
 import org.apache.iotdb.db.monitor.collector.MemTableWriteTimeCost.MemTableWriteTimeCostType;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
 import org.apache.iotdb.db.utils.MemUtils;
 import org.apache.iotdb.db.utils.TimeValuePair;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -80,13 +81,14 @@ public abstract class AbstractMemTable implements IMemTable {
   protected abstract IWritableMemChunk genMemSeries(TSDataType dataType);
 
 
-  public void insert(TSRecord tsRecord) {
-    for (DataPoint dataPoint : tsRecord.dataPointList) {
-      write(tsRecord.deviceId, dataPoint.getMeasurementId(), dataPoint.getType(),
-          tsRecord.time,
-          dataPoint.getValue().toString());
+  @Override
+  public void insert(InsertPlan insertPlan) {
+    for (int i = 0; i < insertPlan.getValues().length; i++) {
+      write(insertPlan.getDeviceId(), insertPlan.getMeasurements()[i],
+          insertPlan.getDataTypes()[i], insertPlan.getTime(), insertPlan.getValues()[i]);
+
     }
-    long recordSizeInByte = MemUtils.getRecordSize(tsRecord);
+    long recordSizeInByte = MemUtils.getRecordSize(insertPlan);
     memSize += recordSizeInByte;
   }
 
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/memtable/IMemTable.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/memtable/IMemTable.java
index 1caa846..e103dd0 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/memtable/IMemTable.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/memtable/IMemTable.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.db.engine.memtable;
 import java.util.Map;
 import org.apache.iotdb.db.engine.modification.Deletion;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
 import org.apache.iotdb.db.utils.MemUtils;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.utils.Binary;
@@ -54,7 +55,7 @@ public interface IMemTable {
    */
   long memSize();
 
-  void insert(TSRecord tsRecord);
+  void insert(InsertPlan insertPlan);
 
   ReadOnlyMemChunk query(String deviceId, String measurement, TSDataType dataType,
       Map<String, String> props);
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/IQueryProcessExecutor.java b/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/IQueryProcessExecutor.java
index 6377f44..f284259 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/IQueryProcessExecutor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/IQueryProcessExecutor.java
@@ -25,6 +25,7 @@ import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
@@ -106,26 +107,11 @@ public interface IQueryProcessExecutor {
   boolean delete(Path path, long deleteTime) throws ProcessorException;
 
   /**
-   * insert a single value. Only used in test
-   *
-   * @param path seriesPath to be inserted
-   * @param insertTime - it's time point but not a range
-   * @param value value to be inserted
-   * @return - Operate Type.
-   */
-  int insert(Path path, long insertTime, String value) throws ProcessorException;
-
-  /**
    * execute insert command and return whether the operator is successful.
    *
-   * @param deviceId deviceId to be inserted
-   * @param insertTime - it's time point but not a range
-   * @param measurementList measurements to be inserted
-   * @param insertValues values to be inserted
    * @return - Operate Type.
    */
-  int multiInsert(String deviceId, long insertTime, String[] measurementList,
-      String[] insertValues) throws ProcessorException;
+  int insert(InsertPlan insertPlan) throws ProcessorException;
 
   boolean judgePathExists(Path fullPath);
 
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/OverflowQPExecutor.java b/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/OverflowQPExecutor.java
index 75f6e6f..17d79d3 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/OverflowQPExecutor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/qp/executor/OverflowQPExecutor.java
@@ -35,7 +35,7 @@ import org.apache.iotdb.db.auth.entity.PathPrivilege;
 import org.apache.iotdb.db.auth.entity.PrivilegeType;
 import org.apache.iotdb.db.auth.entity.Role;
 import org.apache.iotdb.db.auth.entity.User;
-import org.apache.iotdb.db.engine.filenode.FileNodeManager;
+import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
 import org.apache.iotdb.db.exception.ArgsErrorException;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
@@ -58,7 +58,6 @@ import org.apache.iotdb.db.qp.physical.sys.MetadataPlan;
 import org.apache.iotdb.db.qp.physical.sys.PropertyPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
 import org.apache.iotdb.db.query.dataset.AuthDataSet;
-import org.apache.iotdb.db.query.executor.EngineQueryRouter;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.db.utils.AuthUtils;
 import org.apache.iotdb.db.utils.LoadDataUtils;
@@ -73,8 +72,6 @@ import org.apache.iotdb.tsfile.read.expression.IExpression;
 import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
 import org.apache.iotdb.tsfile.utils.Binary;
 import org.apache.iotdb.tsfile.utils.Pair;
-import org.apache.iotdb.tsfile.write.record.TSRecord;
-import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
 import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -83,11 +80,11 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
 
   private static final Logger LOG = LoggerFactory.getLogger(OverflowQPExecutor.class);
 
-  private FileNodeManager fileNodeManager;
+  private FileNodeManagerV2 fileNodeManager;
   private MManager mManager = MManager.getInstance();
 
   public OverflowQPExecutor() {
-    fileNodeManager = FileNodeManager.getInstance();
+    fileNodeManager = FileNodeManagerV2.getInstance();
   }
 
   public static String checkValue(TSDataType dataType, String value) throws ProcessorException {
@@ -126,10 +123,7 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
         }
         return flag;
       case INSERT:
-        InsertPlan insert = (InsertPlan) plan;
-        int result = multiInsert(insert.getDeviceId(), insert.getTime(), insert.getMeasurements(),
-            insert.getValues());
-        return result > 0;
+        return insert((InsertPlan)plan) == 0;
       case CREATE_ROLE:
       case DELETE_ROLE:
       case CREATE_USER:
@@ -230,10 +224,7 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
       fileNodeManager.update(deviceId, measurementId, startTime, endTime, dataType, value);
       return true;
     } catch (PathErrorException e) {
-      throw new ProcessorException(e.getMessage());
-    } catch (FileNodeManagerException e) {
-      e.printStackTrace();
-      throw new ProcessorException(e.getMessage());
+      throw new ProcessorException(e);
     }
   }
 
@@ -254,35 +245,17 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
     }
   }
 
-  @Override
-  // return 0: failed, 1: Overflow, 2:Bufferwrite
-  public int insert(Path path, long timestamp, String value) throws ProcessorException {
-    String deviceId = path.getDevice();
-    String measurementId = path.getMeasurement();
-
-    try {
-      TSDataType type = mManager.getSeriesType(deviceId + "," + measurementId);
-      TSRecord tsRecord = new TSRecord(timestamp, deviceId);
-      DataPoint dataPoint = DataPoint.getDataPoint(type, measurementId, value);
-      tsRecord.addTuple(dataPoint);
-      return fileNodeManager.insert(tsRecord, false);
-
-    } catch (PathErrorException e) {
-      throw new ProcessorException("Error in insert: " + e.getMessage());
-    } catch (FileNodeManagerException e) {
-      e.printStackTrace();
-      throw new ProcessorException(e);
-    }
-  }
 
   @Override
-  public int multiInsert(String deviceId, long insertTime, String[] measurementList,
-      String[] insertValues)
+  public int insert(InsertPlan insertPlan)
       throws ProcessorException {
-    try {
-      TSRecord tsRecord = new TSRecord(insertTime, deviceId);
 
-      MNode node = mManager.getNodeByDeviceIdFromCache(deviceId);
+    try {
+      String[] measurementList = insertPlan.getMeasurements();
+      String deviceId = insertPlan.getDeviceId();
+      MNode node = mManager.getNodeByDeviceIdFromCache(insertPlan.getDeviceId());
+      String[] values = insertPlan.getValues();
+      TSDataType[] dataTypes = new TSDataType[measurementList.length];
 
       for (int i = 0; i < measurementList.length; i++) {
         if (!node.hasChild(measurementList[i])) {
@@ -297,13 +270,11 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
                   measurementList[i]));
         }
 
-        TSDataType dataType = measurementNode.getSchema().getType();
-        String value = insertValues[i];
-        value = checkValue(dataType, value);
-        DataPoint dataPoint = DataPoint.getDataPoint(dataType, measurementList[i], value);
-        tsRecord.addTuple(dataPoint);
+        dataTypes[i] = measurementNode.getSchema().getType();
+        values[i] = checkValue(dataTypes[i], values[i]);
       }
-      return fileNodeManager.insert(tsRecord, false);
+      insertPlan.setDataTypes(dataTypes);
+      return fileNodeManager.insert(insertPlan);
 
     } catch (PathErrorException | FileNodeManagerException e) {
       throw new ProcessorException(e.getMessage());
@@ -539,7 +510,8 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
               fileNodeManager.deleteOneFileNode(deleteFileNode);
             }
             for (String closeFileNode : closeFileNodes) {
-              fileNodeManager.closeOneFileNode(closeFileNode);
+              // TODO add close file node method in FileNodeManager
+//              fileNodeManager.(closeFileNode);
             }
           }
           break;
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java b/iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
index 6d58365..24f2f1b 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
@@ -26,6 +26,7 @@ import java.util.Objects;
 import org.apache.iotdb.db.qp.logical.Operator;
 import org.apache.iotdb.db.qp.logical.Operator.OperatorType;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 
 public class InsertPlan extends PhysicalPlan {
@@ -33,6 +34,7 @@ public class InsertPlan extends PhysicalPlan {
   private static final long serialVersionUID = 6102845312368561515L;
   private String deviceId;
   private String[] measurements;
+  private TSDataType[] dataTypes;
   private String[] values;
   private long time;
 
@@ -75,6 +77,14 @@ public class InsertPlan extends PhysicalPlan {
     this.time = time;
   }
 
+  public TSDataType[] getDataTypes() {
+    return dataTypes;
+  }
+
+  public void setDataTypes(TSDataType[] dataTypes) {
+    this.dataTypes = dataTypes;
+  }
+
   @Override
   public List<Path> getPaths() {
     List<Path> ret = new ArrayList<>();
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java b/iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
index 5d06a13..2eb4c46 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
@@ -42,7 +42,7 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.iotdb.db.concurrent.ThreadName;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.filenode.FileNodeManager;
 import org.apache.iotdb.db.engine.filenode.OverflowChangeType;
 import org.apache.iotdb.db.engine.filenode.TsFileResource;
@@ -471,7 +471,7 @@ public class SyncServiceImpl implements SyncService.Iface {
         String relativePath = path.substring(header.length());
         TsFileResource fileNode = new TsFileResource(startTimeMap, endTimeMap,
             OverflowChangeType.NO_CHANGE, new File(
-            Directories.getInstance().getNextFolderIndexForTsFile() + File.separator + relativePath)
+            DirectoryManager.getInstance().getNextFolderIndexForTsFile() + File.separator + relativePath)
         );
         // call interface of load external file
         try {
@@ -557,7 +557,7 @@ public class SyncServiceImpl implements SyncService.Iface {
             }
           }
           if (insertExecutor
-              .multiInsert(deviceId, record.getTimestamp(), measurementList.toArray(new String[]{}),
+              .insert(deviceId, record.getTimestamp(), measurementList.toArray(new String[]{}),
                   insertValues.toArray(new String[]{})) <= 0) {
             throw new IOException("Inserting series data to IoTDB engine has failed.");
           }
@@ -634,7 +634,7 @@ public class SyncServiceImpl implements SyncService.Iface {
           /** If there has no overlap data with the timeseries, inserting all data in the sync file **/
           if (originDataPoints.isEmpty()) {
             for (InsertPlan insertPlan : newDataPoints) {
-              if (insertExecutor.multiInsert(insertPlan.getDeviceId(), insertPlan.getTime(),
+              if (insertExecutor.insert(insertPlan.getDeviceId(), insertPlan.getTime(),
                   insertPlan.getMeasurements(), insertPlan.getValues()) <= 0) {
                 throw new IOException("Inserting series data to IoTDB engine has failed.");
               }
@@ -643,7 +643,7 @@ public class SyncServiceImpl implements SyncService.Iface {
             /** Compare every data to get valid data **/
             for (InsertPlan insertPlan : newDataPoints) {
               if (!originDataPoints.contains(insertPlan)) {
-                if (insertExecutor.multiInsert(insertPlan.getDeviceId(), insertPlan.getTime(),
+                if (insertExecutor.insert(insertPlan.getDeviceId(), insertPlan.getTime(),
                     insertPlan.getMeasurements(), insertPlan.getValues()) <= 0) {
                   throw new IOException("Inserting series data to IoTDB engine has failed.");
                 }
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/utils/MemUtils.java b/iotdb/src/main/java/org/apache/iotdb/db/utils/MemUtils.java
index 65f3188..9d758a4 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/utils/MemUtils.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/utils/MemUtils.java
@@ -19,6 +19,7 @@
 package org.apache.iotdb.db.utils;
 
 import org.apache.iotdb.db.conf.IoTDBConstant;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.utils.Binary;
 import org.apache.iotdb.tsfile.write.record.TSRecord;
@@ -44,56 +45,29 @@ public class MemUtils {
   /**
    * function for getting the record size.
    */
-  public static long getRecordSize(TSRecord record) {
+  public static long getRecordSize(InsertPlan insertPlan) {
     long memSize = 0;
-    for (DataPoint dataPoint : record.dataPointList) {
-      memSize += getPointSize(dataPoint);
+    for (int i = 0; i < insertPlan.getValues().length; i++) {
+      switch (insertPlan.getDataTypes()[i]) {
+        case INT32:
+          memSize += 8L + 4L; break;
+        case INT64:
+          memSize += 8L + 8L; break;
+        case FLOAT:
+          memSize += 8L + 4L; break;
+        case DOUBLE:
+          memSize += 8L + 8L; break;
+        case BOOLEAN:
+          memSize += 8L + 1L; break;
+        case TEXT:
+          memSize += 8L + insertPlan.getValues()[i].length() * 2; break;
+        default:
+          memSize += 8L + 8L;
+      }
     }
     return memSize;
   }
 
-  private static long getPointSize(DataPoint dataPoint) {
-    switch (dataPoint.getType()) {
-      case INT32:
-        return 8L + 4L;
-      case INT64:
-        return 8L + 8L;
-      case FLOAT:
-        return 8L + 4L;
-      case DOUBLE:
-        return 8L + 8L;
-      case BOOLEAN:
-        return 8L + 1L;
-      case TEXT:
-        return 8L + dataPoint.getValue().toString().length() * 2;
-      default:
-        return 8L + 8L;
-    }
-  }
-
-  /**
-   * @param value can be null if the type is not TEXT
-   */
-  public static long getPointSize(TSDataType type, String value) {
-    switch (type) {
-      case INT32:
-        return 8L + 4L;
-      case INT64:
-        return 8L + 8L;
-      case FLOAT:
-        return 8L + 4L;
-      case DOUBLE:
-        return 8L + 8L;
-      case BOOLEAN:
-        return 8L + 1L;
-      case TEXT:
-        return 8L + value.length() * 2;
-      default:
-        return 8L + 8L;
-    }
-  }
-
-
   /**
    * Calculate how much memory will be used if the given record is written to Bufferwrite.
    */
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/utils/OpenFileNumUtil.java b/iotdb/src/main/java/org/apache/iotdb/db/utils/OpenFileNumUtil.java
index 32d9b47..438226e 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/utils/OpenFileNumUtil.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/utils/OpenFileNumUtil.java
@@ -28,7 +28,7 @@ import java.util.List;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,7 +48,7 @@ public class OpenFileNumUtil {
   private static final String SEARCH_OPEN_DATA_FILE_BY_PID = "lsof -p %d";
 
   private static IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
-  private static Directories directories = Directories.getInstance();
+  private static DirectoryManager directoryManager = DirectoryManager.getInstance();
   private static final String[] COMMAND_TEMPLATE = {"/bin/bash", "-c", ""};
   private static boolean isOutputValid = false;
   private int pid;
@@ -256,7 +256,7 @@ public class OpenFileNumUtil {
   public enum OpenFileNumStatistics {
     TOTAL_OPEN_FILE_NUM(null),
     DATA_OPEN_FILE_NUM(Collections.singletonList(config.getDataDir())),
-    DELTA_OPEN_FILE_NUM(directories.getAllTsFileFolders()),
+    DELTA_OPEN_FILE_NUM(directoryManager.getAllTsFileFolders()),
     OVERFLOW_OPEN_FILE_NUM(Collections.singletonList(config.getOverflowDataDir())),
     WAL_OPEN_FILE_NUM(Collections.singletonList(config.getWalFolder())),
     METADATA_OPEN_FILE_NUM(Collections.singletonList(config.getMetadataDir())),
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/writelog/node/ExclusiveWriteLogNode.java b/iotdb/src/main/java/org/apache/iotdb/db/writelog/node/ExclusiveWriteLogNode.java
index 914a682..23cb319 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/writelog/node/ExclusiveWriteLogNode.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/writelog/node/ExclusiveWriteLogNode.java
@@ -25,7 +25,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.commons.io.FileUtils;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.qp.physical.PhysicalPlan;
 import org.apache.iotdb.db.qp.physical.transfer.PhysicalPlanLogTransfer;
 import org.apache.iotdb.db.writelog.io.ILogReader;
@@ -66,7 +66,7 @@ public class ExclusiveWriteLogNode implements WriteLogNode, Comparable<Exclusive
   public ExclusiveWriteLogNode(String identifier) {
     this.identifier = identifier;
     this.logDirectory =
-        Directories.getInstance().getWALFolder() + File.separator + this.identifier;
+        DirectoryManager.getInstance().getWALFolder() + File.separator + this.identifier;
     new File(logDirectory).mkdirs();
   }
 
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java b/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
index 75e65c2..e074a31 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
@@ -23,6 +23,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.List;
 import org.apache.iotdb.db.engine.filenode.TsFileResource;
+import org.apache.iotdb.db.engine.filenodeV2.TsFileResourceV2;
 import org.apache.iotdb.db.engine.memtable.IMemTable;
 import org.apache.iotdb.db.engine.modification.Deletion;
 import org.apache.iotdb.db.engine.modification.ModificationFile;
@@ -51,7 +52,7 @@ public class LogReplayer {
   private String insertFilePath;
   private ModificationFile modFile;
   private VersionController versionController;
-  private TsFileResource currentTsFileResource;
+  private TsFileResourceV2 currentTsFileResource;
   // fileSchema is used to get the measurement data type
   private FileSchema fileSchema;
   private IMemTable recoverMemTable;
@@ -59,7 +60,7 @@ public class LogReplayer {
   public LogReplayer(String logNodePrefix, String insertFilePath,
       ModificationFile modFile,
       VersionController versionController,
-      TsFileResource currentTsFileResource,
+      TsFileResourceV2 currentTsFileResource,
       FileSchema fileSchema, IMemTable memTable) {
     this.logNodePrefix = logNodePrefix;
     this.insertFilePath = insertFilePath;
@@ -110,24 +111,20 @@ public class LogReplayer {
   }
 
   private void replayInsert(InsertPlan insertPlan) {
-    TSRecord tsRecord = new TSRecord(insertPlan.getTime(), insertPlan.getDeviceId());
     if (currentTsFileResource != null) {
       // the last chunk group may contain the same data with the logs, ignore such logs
-      if (currentTsFileResource.getEndTime(insertPlan.getDeviceId()) >= insertPlan.getTime()) {
+      if (currentTsFileResource.getEndTimeMap().get(insertPlan.getDeviceId()) >= insertPlan.getTime()) {
         return;
       }
       currentTsFileResource.updateTime(insertPlan.getDeviceId(), insertPlan.getTime());
     }
     String[] measurementList = insertPlan.getMeasurements();
-    String[] insertValues = insertPlan.getValues();
-
+    TSDataType[] dataTypes = new TSDataType[measurementList.length];
     for (int i = 0; i < measurementList.length; i++) {
-      TSDataType dataType = fileSchema.getMeasurementDataType(measurementList[i]);
-      String value = insertValues[i];
-      DataPoint dataPoint = DataPoint.getDataPoint(dataType, measurementList[i], value);
-      tsRecord.addTuple(dataPoint);
+      dataTypes[i] = fileSchema.getMeasurementDataType(measurementList[i]);
     }
-    recoverMemTable.insert(tsRecord);
+    insertPlan.setDataTypes(dataTypes);
+    recoverMemTable.insert(insertPlan);
   }
 
   private void replayUpdate(UpdatePlan updatePlan) {
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/SeqTsFileRecoverPerformer.java b/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/SeqTsFileRecoverPerformer.java
index 339a39a..ab0736e 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/SeqTsFileRecoverPerformer.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/SeqTsFileRecoverPerformer.java
@@ -24,6 +24,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.nio.channels.FileChannel;
 import org.apache.iotdb.db.engine.filenode.TsFileResource;
+import org.apache.iotdb.db.engine.filenodeV2.TsFileResourceV2;
 import org.apache.iotdb.db.engine.memtable.IMemTable;
 import org.apache.iotdb.db.engine.memtable.MemTableFlushTask;
 import org.apache.iotdb.db.engine.memtable.PrimitiveMemTable;
@@ -46,12 +47,12 @@ public class SeqTsFileRecoverPerformer {
   private FileSchema fileSchema;
   private VersionController versionController;
   private LogReplayer logReplayer;
-  private TsFileResource tsFileResource;
+  private TsFileResourceV2 tsFileResource;
 
   public SeqTsFileRecoverPerformer(String logNodePrefix,
       FileSchema fileSchema, VersionController versionController,
-      TsFileResource currentTsFileResource) {
-    this.insertFilePath = currentTsFileResource.getFilePath();
+      TsFileResourceV2 currentTsFileResource) {
+    this.insertFilePath = currentTsFileResource.getFile().getPath();
     this.logNodePrefix = logNodePrefix;
     this.fileSchema = fileSchema;
     this.versionController = versionController;
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/engine/PathUtils.java b/iotdb/src/test/java/org/apache/iotdb/db/engine/PathUtils.java
index 315586b..3fb8826 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/PathUtils.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/PathUtils.java
@@ -21,15 +21,15 @@ package org.apache.iotdb.db.engine;
 import java.io.File;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 
 public class PathUtils {
 
   private static IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
-  private static Directories directories = Directories.getInstance();
+  private static DirectoryManager directoryManager = DirectoryManager.getInstance();
 
   public static File getBufferWriteDir(String nameSpacePath) {
-    String bufferwriteDirPath = directories.getFolderForTest();
+    String bufferwriteDirPath = directoryManager.getFolderForTest();
     if (bufferwriteDirPath.length() > 0
         && bufferwriteDirPath.charAt(bufferwriteDirPath.length() - 1) != File.separatorChar) {
       bufferwriteDirPath = bufferwriteDirPath + File.separatorChar;
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessorNewTest.java b/iotdb/src/test/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessorNewTest.java
index 400a4d8..43930b2 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessorNewTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessorNewTest.java
@@ -31,7 +31,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.MetadataManagerHelper;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
 import org.apache.iotdb.db.engine.version.SysTimeVersionController;
@@ -93,7 +93,7 @@ public class BufferWriteProcessorNewTest {
   @Test
   public void testWriteAndFlush()
       throws BufferWriteProcessorException, WriteProcessException, IOException, InterruptedException {
-    bufferwrite = new BufferWriteProcessor(Directories.getInstance().getFolderForTest(),
+    bufferwrite = new BufferWriteProcessor(DirectoryManager.getInstance().getFolderForTest(),
         processorName, filename,
         parameters, x->{},  SysTimeVersionController.INSTANCE,
         FileSchemaUtils.constructFileSchema(processorName));
@@ -148,7 +148,7 @@ public class BufferWriteProcessorNewTest {
 
     // test recovery
     BufferWriteProcessor bufferWriteProcessor = new BufferWriteProcessor(
-        Directories.getInstance().getFolderForTest(), processorName, filename, parameters, x->{},
+        DirectoryManager.getInstance().getFolderForTest(), processorName, filename, parameters, x->{},
         SysTimeVersionController.INSTANCE,
         FileSchemaUtils.constructFileSchema(processorName));
     pair = bufferWriteProcessor.queryBufferWriteData(processorName, measurementId, dataType, props);
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessorTest.java b/iotdb/src/test/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessorTest.java
index 28ecf56..3fd05a4 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessorTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessorTest.java
@@ -36,7 +36,7 @@ import java.util.Map;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.MetadataManagerHelper;
 import org.apache.iotdb.db.engine.PathUtils;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
@@ -92,7 +92,7 @@ public class BufferWriteProcessorTest {
   private TSFileConfig TsFileConf = TSFileDescriptor.getInstance().getConfig();
   private Map<String, Action> parameters = new HashMap<>();
   private BufferWriteProcessor bufferwrite;
-  private Directories directories = Directories.getInstance();
+  private DirectoryManager directoryManager = DirectoryManager.getInstance();
   private String deviceId = "root.vehicle.d0";
   private String measurementId = "s0";
   private TSDataType dataType = TSDataType.INT32;
@@ -124,7 +124,7 @@ public class BufferWriteProcessorTest {
   @Test
   public void testWriteAndAbnormalRecover()
       throws WriteProcessException, InterruptedException, IOException, ProcessorException {
-    bufferwrite = new BufferWriteProcessor(directories.getFolderForTest(), deviceId, insertPath,
+    bufferwrite = new BufferWriteProcessor(directoryManager.getFolderForTest(), deviceId, insertPath,
         parameters, bfcloseConsumer, SysTimeVersionController.INSTANCE,
         FileSchemaUtils.constructFileSchema(deviceId));
     for (int i = 1; i < 100; i++) {
@@ -154,7 +154,7 @@ public class BufferWriteProcessorTest {
     }
     file.renameTo(restoreFile);
     BufferWriteProcessor bufferWriteProcessor = new BufferWriteProcessor(
-        directories.getFolderForTest(), deviceId,
+        directoryManager.getFolderForTest(), deviceId,
         insertPath, parameters, bfcloseConsumer, SysTimeVersionController.INSTANCE,
         FileSchemaUtils.constructFileSchema(deviceId));
     assertTrue(insertFile.exists());
@@ -178,7 +178,7 @@ public class BufferWriteProcessorTest {
   @Test
   public void testWriteAndNormalRecover()
       throws WriteProcessException, ProcessorException, InterruptedException {
-    bufferwrite = new BufferWriteProcessor(directories.getFolderForTest(), deviceId, insertPath,
+    bufferwrite = new BufferWriteProcessor(directoryManager.getFolderForTest(), deviceId, insertPath,
         parameters, bfcloseConsumer, SysTimeVersionController.INSTANCE,
         FileSchemaUtils.constructFileSchema(deviceId));
     for (int i = 1; i < 100; i++) {
@@ -192,7 +192,7 @@ public class BufferWriteProcessorTest {
     File restoreFile = new File(dataFile, restoreFilePath);
     assertTrue(restoreFile.exists());
     BufferWriteProcessor bufferWriteProcessor = new BufferWriteProcessor(
-        directories.getFolderForTest(), deviceId,
+        directoryManager.getFolderForTest(), deviceId,
         insertPath, parameters, bfcloseConsumer, SysTimeVersionController.INSTANCE,
         FileSchemaUtils.constructFileSchema(deviceId));
     Pair<ReadOnlyMemChunk, List<ChunkMetaData>> pair = bufferWriteProcessor
@@ -215,7 +215,7 @@ public class BufferWriteProcessorTest {
   @Test
   public void testWriteAndQuery()
       throws WriteProcessException, InterruptedException, ProcessorException {
-    bufferwrite = new BufferWriteProcessor(directories.getFolderForTest(), deviceId, insertPath,
+    bufferwrite = new BufferWriteProcessor(directoryManager.getFolderForTest(), deviceId, insertPath,
         parameters, bfcloseConsumer, SysTimeVersionController.INSTANCE,
         FileSchemaUtils.constructFileSchema(deviceId));
     assertTrue(bufferwrite.canBeClosed());
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/BufferwriteFileSizeControlTest.java b/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/BufferwriteFileSizeControlTest.java
index 227bc7b..89fa997 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/BufferwriteFileSizeControlTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/BufferwriteFileSizeControlTest.java
@@ -28,7 +28,7 @@ import java.util.Map;
 import java.util.function.Consumer;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.MetadataManagerHelper;
 import org.apache.iotdb.db.engine.PathUtils;
 import org.apache.iotdb.db.engine.bufferwrite.Action;
@@ -137,7 +137,7 @@ public class BufferwriteFileSizeControlTest {
     parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fnflushaction);
 
     try {
-      processor = new BufferWriteProcessor(Directories.getInstance().getFolderForTest(), nsp,
+      processor = new BufferWriteProcessor(DirectoryManager.getInstance().getFolderForTest(), nsp,
           filename,
           parameters, bfcloseConsumer, SysTimeVersionController.INSTANCE,
           FileSchemaUtils.constructFileSchema(nsp));
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/BufferwriteMetaSizeControlTest.java b/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/BufferwriteMetaSizeControlTest.java
index 9ec6019..35c466f 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/BufferwriteMetaSizeControlTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/BufferwriteMetaSizeControlTest.java
@@ -27,7 +27,7 @@ import java.util.HashMap;
 import java.util.Map;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.MetadataManagerHelper;
 import org.apache.iotdb.db.engine.PathUtils;
 import org.apache.iotdb.db.engine.bufferwrite.Action;
@@ -135,7 +135,7 @@ public class BufferwriteMetaSizeControlTest {
     parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fnflushaction);
 
     try {
-      processor = new BufferWriteProcessor(Directories.getInstance().getFolderForTest(), nsp,
+      processor = new BufferWriteProcessor(DirectoryManager.getInstance().getFolderForTest(), nsp,
           filename,
           parameters, x->{}, SysTimeVersionController.INSTANCE, FileSchemaUtils.constructFileSchema(nsp));
     } catch (BufferWriteProcessorException e) {
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java b/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
index 03d3e57..55f505f 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
@@ -30,7 +30,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.filenode.FileNodeManager;
 import org.apache.iotdb.db.engine.modification.io.LocalTextModificationAccessor;
 import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
@@ -144,7 +144,7 @@ public class DeletionFileNodeTest {
         new Deletion(processorName + "." + measurements[3], 104, 30),
     };
 
-    String fileNodePath = Directories.getInstance().getTsFileFolder(0) + File.separator
+    String fileNodePath = DirectoryManager.getInstance().getTsFileFolder(0) + File.separator
         + processorName;
     File fileNodeDir = new File(fileNodePath);
     File[] modFiles = fileNodeDir.listFiles((dir, name)
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/qp/plan/LogicalPlanSmallTest.java b/iotdb/src/test/java/org/apache/iotdb/db/qp/plan/LogicalPlanSmallTest.java
index f8786a9..1740df1 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/qp/plan/LogicalPlanSmallTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/qp/plan/LogicalPlanSmallTest.java
@@ -28,6 +28,7 @@ import org.apache.iotdb.db.exception.qp.QueryProcessorException;
 import org.apache.iotdb.db.qp.logical.RootOperator;
 import org.apache.iotdb.db.qp.logical.crud.QueryOperator;
 import org.apache.iotdb.db.qp.logical.crud.SFWOperator;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
 import org.apache.iotdb.db.qp.strategy.LogicalGenerator;
 import org.apache.iotdb.db.qp.strategy.optimizer.ConcatPathOptimizer;
 import org.apache.iotdb.db.qp.utils.MemIntQpExecutor;
@@ -146,10 +147,10 @@ public class LogicalPlanSmallTest {
     Path path4 = new Path(
         new StringContainer(new String[]{"root", "vehicle", "d4", "s1"},
             SystemConstant.PATH_SEPARATOR));
-    executor.insert(path1, 10, "10");
-    executor.insert(path2, 10, "10");
-    executor.insert(path3, 10, "10");
-    executor.insert(path4, 10, "10");
+    executor.insert(new InsertPlan(path1.getDevice(), 10, path1.getMeasurement(), "10"));
+    executor.insert(new InsertPlan(path2.getDevice(), 10, path2.getMeasurement(), "10"));
+    executor.insert(new InsertPlan(path3.getDevice(), 10, path3.getMeasurement(), "10"));
+    executor.insert(new InsertPlan(path4.getDevice(), 10, path4.getMeasurement(), "10"));
     ConcatPathOptimizer concatPathOptimizer = new ConcatPathOptimizer(executor);
     operator = (SFWOperator) concatPathOptimizer.transform(operator);
     // expected to throw LogicalOptimizeException: Wrong use of SLIMIT: SLIMIT is not allowed to be used with
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/qp/utils/MemIntQpExecutor.java b/iotdb/src/test/java/org/apache/iotdb/db/qp/utils/MemIntQpExecutor.java
index 7afced3..d512baf 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/qp/utils/MemIntQpExecutor.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/qp/utils/MemIntQpExecutor.java
@@ -27,6 +27,7 @@ import java.util.Map.Entry;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
+import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.exception.FileNodeManagerException;
 import org.apache.iotdb.db.exception.PathErrorException;
 import org.apache.iotdb.db.exception.ProcessorException;
@@ -38,7 +39,6 @@ import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
 import org.apache.iotdb.db.qp.physical.crud.UpdatePlan;
 import org.apache.iotdb.db.qp.physical.sys.AuthorPlan;
 import org.apache.iotdb.db.query.context.QueryContext;
-import org.apache.iotdb.db.query.executor.EngineQueryRouter;
 import org.apache.iotdb.db.query.fill.IFill;
 import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -101,10 +101,7 @@ public class MemIntQpExecutor extends QueryProcessExecutor {
         }
         return flag;
       case INSERT:
-        InsertPlan insert = (InsertPlan) plan;
-        int result = multiInsert(insert.getDeviceId(), insert.getTime(), insert.getMeasurements(),
-            insert.getValues());
-        return result == 0;
+        return insert((InsertPlan) plan) == 0;
       default:
         throw new UnsupportedOperationException();
     }
@@ -181,18 +178,6 @@ public class MemIntQpExecutor extends QueryProcessExecutor {
   }
 
   @Override
-  public int insert(Path path, long insertTime, String value) {
-    String strPath = path.toString();
-    if (!demoMemDataBase.containsKey(strPath)) {
-      demoMemDataBase.put(strPath, new TestSeries());
-    }
-    demoMemDataBase.get(strPath).data.put(insertTime, Integer.valueOf(value));
-    timeStampUnion.add(insertTime);
-    LOG.info("insert into {}:<{},{}>", path, insertTime, value);
-    return 0;
-  }
-
-  @Override
   public List<String> getAllPaths(String fullPath) {
     return fakeAllPaths != null ? fakeAllPaths.get(fullPath) : new ArrayList<String>() {
       {
@@ -202,8 +187,15 @@ public class MemIntQpExecutor extends QueryProcessExecutor {
   }
 
   @Override
-  public int multiInsert(String deviceId, long insertTime, String[] measurementList,
-      String[] insertValues) {
+  public int insert(InsertPlan insertPlan) {
+    for (int i = 0; i < insertPlan.getMeasurements().length; i++) {
+      String strPath = insertPlan.getDeviceId() + IoTDBConstant.PATH_SEPARATOR + insertPlan.getMeasurements()[i];
+      if (!demoMemDataBase.containsKey(strPath)) {
+        demoMemDataBase.put(strPath, new TestSeries());
+      }
+      demoMemDataBase.get(strPath).data.put(insertPlan.getTime(), Integer.valueOf(insertPlan.getValues()[i]));
+      timeStampUnion.add(insertPlan.getTime());
+    }
     return 0;
   }
 
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java b/iotdb/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
index edb6a20..e757b6c 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
@@ -25,7 +25,7 @@ import org.apache.iotdb.db.auth.authorizer.IAuthorizer;
 import org.apache.iotdb.db.auth.authorizer.LocalFileAuthorizer;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.cache.RowGroupBlockMetaDataCache;
 import org.apache.iotdb.db.engine.cache.TsFileMetaDataCache;
 import org.apache.iotdb.db.engine.filenode.FileNodeManager;
@@ -57,7 +57,7 @@ public class EnvironmentUtils {
   private static final Logger LOGGER = LoggerFactory.getLogger(EnvironmentUtils.class);
 
   private static IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
-  private static Directories directories = Directories.getInstance();
+  private static DirectoryManager directoryManager = DirectoryManager.getInstance();
   private static TSFileConfig tsfileConfig = TSFileDescriptor.getInstance().getConfig();
 
   public static long TEST_QUERY_JOB_ID = QueryResourceManager.getInstance().assignJobId();
@@ -99,7 +99,7 @@ public class EnvironmentUtils {
 
   private static void cleanAllDir() throws IOException {
     // delete bufferwrite
-    for (String path : directories.getAllTsFileFolders()) {
+    for (String path : directoryManager.getAllTsFileFolders()) {
       cleanDir(path);
     }
     // delete overflow
@@ -179,7 +179,7 @@ public class EnvironmentUtils {
 
   private static void createAllDir() throws IOException {
     // create bufferwrite
-    for (String path : directories.getAllTsFileFolders()) {
+    for (String path : directoryManager.getAllTsFileFolders()) {
       createDir(path);
     }
     // create overflow


[incubator-iotdb] 02/03: add unseq doc

Posted by qi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

qiaojialin pushed a commit to branch feature_async_close_tsfile
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 9e4acf84086a8bcf7314daa9dc2f3b3e5e899d6f
Author: qiaojialin <64...@qq.com>
AuthorDate: Sat Jun 22 16:54:27 2019 +0800

    add unseq doc
---
 iotdb/iotdb/conf/iotdb-engine.properties                | 17 +++++++++++++++--
 .../main/java/org/apache/iotdb/db/conf/IoTDBConfig.java |  1 +
 .../iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java |  2 +-
 3 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/iotdb/iotdb/conf/iotdb-engine.properties b/iotdb/iotdb/conf/iotdb-engine.properties
index 6752691..5f04f08 100644
--- a/iotdb/iotdb/conf/iotdb-engine.properties
+++ b/iotdb/iotdb/conf/iotdb-engine.properties
@@ -61,8 +61,8 @@ force_wal_period_in_ms=10
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # data_dir=/path/iotdb/data/data
 
-# tsfile dir
-# For this property, multiple directories should be set, and all directories should be separated by ",". All TsFiles will be allocated separately in all these directories. Moreover, setting absolute directories is suggested.
+# sequence tsfile dir
+# For this property, multiple directories can be set, and all directories should be separated by ",". All TsFiles will be allocated separately in all these directories. Moreover, setting absolute directories is suggested.
 # If this property is unset, system will save the TsFiles in the default relative path directory under the data_dir folder(i.e., %IOTDB_HOME%/data/data/settled).
 # If some are absolute, system will save the data in exact location they point to.
 # If some are relative, system will save the data in the relative path directory they indicate under the data_dir folder.
@@ -74,6 +74,19 @@ force_wal_period_in_ms=10
 # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
 # tsfile_dir=/path/iotdb/data/data,data/data
 
+# unSequence tsfile dir
+# For this property, multiple directories can be set, and all directories should be separated by ",". All Overflow Files will be allocated separately in all these directories. Moreover, setting absolute directories is suggested.
+# If this property is unset, system will save the Overflow Files in the default relative path directory under the data_dir folder(i.e., %IOTDB_HOME%/data/data/overflow).
+# If some are absolute, system will save the data in exact location they point to.
+# If some are relative, system will save the data in the relative path directory they indicate under the data_dir folder.
+# Note: If some are assigned an empty string(i.e.,zero-length), they will be handled as a relative path.
+# For windows platform
+# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative.
+# overflow_dir=D:\\iotdb\\data\\unseq,overflow2
+# For Linux platform
+# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
+# overflow_dir=/path/iotdb/data/unseq1,overflow2
+
 # mult_dir_strategy
 # The strategy is used to choose a directory from tsfile_dir for the system to store a new tsfile.
 # System provides three strategies to choose from, or user can create his own strategy by extending org.apache.iotdb.db.conf.directories.strategy.DirectoryStrategy.
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/iotdb/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index a2ce8ae..991b5ae 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -34,6 +34,7 @@ public class IoTDBConfig {
   public static final String DEFAULT_DATA_DIR = "data";
   public static final String DEFAULT_SYS_DIR = "system";
   public static final String DEFAULT_TSFILE_DIR = "settled";
+  public static final String DEFAULT_OVERFLOW_DIR = "unorder";
   public static final String MULT_DIR_STRATEGY_PREFIX =
       "org.apache.iotdb.db.conf.directories.strategy.";
   public static final String DEFAULT_MULT_DIR_STRATEGY = "MaxDiskUsableSpaceFirstStrategy";
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
index 2ae30c5..12fa2d9 100755
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
@@ -135,7 +135,7 @@ public class FileNodeProcessorV2 {
       }
     }
 
-    Collections.sort(tsfiles, );
+//    Collections.sort(tsfiles, );
 
     for (String tsfile: tsfiles) {
       TsFileResourceV2 tsFileResource = new TsFileResourceV2(new File(tsfile));