You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by hx...@apache.org on 2019/04/20 15:43:40 UTC

[incubator-iotdb] branch refactor_bufferwrite updated: remove unused files

This is an automated email from the ASF dual-hosted git repository.

hxd pushed a commit to branch refactor_bufferwrite
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git


The following commit(s) were added to refs/heads/refactor_bufferwrite by this push:
     new 9f94c15  remove unused files
9f94c15 is described below

commit 9f94c154fe7b7801cf718f3492328e3814545531
Author: xiangdong huang <sa...@gmail.com>
AuthorDate: Sat Apr 20 23:43:23 2019 +0800

    remove unused files
---
 .../engine/bufferwrite/BufferWriteProcessor.java   |   2 -
 .../iotdb/db/engine/filenode/FileNodeManager.java  |   6 +-
 .../db/engine/overflow/OverflowProcessor.java      |  29 -
 .../db/engine/overflow/io/OverflowProcessor2.java  | 707 ---------------------
 .../iotdb/db/engine/tsfiledata/TsFileResource.java | 305 ---------
 .../org/apache/iotdb/db/experiments/PerfTest.java  | 135 ----
 .../apache/iotdb/db/integration/IoTDBDaemonIT.java |   1 -
 7 files changed, 4 insertions(+), 1181 deletions(-)

diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
index 05cffc4..5e6935c 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
@@ -81,8 +81,6 @@ public class BufferWriteProcessor extends Processor {
   private String insertFilePath;
   private String bufferWriteRelativePath;
 
-  private List<TsFileResource> tsFileResources;
-
   private WriteLogNode logNode;
   private VersionController versionController;
 
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
index 048eebf..340e0f0 100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
@@ -570,14 +570,16 @@ public class FileNodeManager implements IStatistic, IService {
       Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator)
       throws FileNodeManagerException {
     if (!processorMap.containsKey(processorName)) {
+      //TODO do we need to call processorIterator.remove() ?
       LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
       return;
     }
     LOGGER.info("Try to delete the filenode processor {}.", processorName);
     FileNodeProcessor processor = processorMap.get(processorName);
     if (!processor.tryWriteLock()) {
-      LOGGER.warn("Can't get the write lock of the filenode processor {}.", processorName);
-      return;
+      throw new FileNodeManagerException(String
+          .format("Delete the filenode processor %s because Can't get the write lock.",
+              processorName));
     }
 
     try {
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/OverflowProcessor.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/OverflowProcessor.java
deleted file mode 100644
index fdccb80..0000000
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/OverflowProcessor.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package org.apache.iotdb.db.engine.overflow;
-
-import java.io.IOException;
-import org.apache.iotdb.db.engine.bufferwrite.Action;
-import org.apache.iotdb.db.engine.tsfiledata.TsFileProcessor;
-import org.apache.iotdb.db.engine.version.VersionController;
-import org.apache.iotdb.db.exception.BufferWriteProcessorException;
-import org.apache.iotdb.tsfile.write.schema.FileSchema;
-
-public class OverflowProcessor extends TsFileProcessor {
-
-  /**
-   * constructor of BufferWriteProcessor. data will be stored in baseDir/processorName/ folder.
-   *
-   * @param processorName processor name
-   * @param fileSchemaRef file schema
-   * @throws BufferWriteProcessorException BufferWriteProcessorException
-   */
-  public OverflowProcessor(String processorName,
-      Action beforeFlushAction,
-      Action afterFlushAction,
-      Action afterCloseAction,
-      VersionController versionController,
-      FileSchema fileSchemaRef)
-      throws BufferWriteProcessorException, IOException {
-    super(processorName, beforeFlushAction, afterFlushAction, afterCloseAction, versionController,
-        fileSchemaRef);
-  }
-}
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessor2.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessor2.java
deleted file mode 100644
index 7cd5b5c..0000000
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessor2.java
+++ /dev/null
@@ -1,707 +0,0 @@
-///**
-// * Licensed to the Apache Software Foundation (ASF) under one
-// * or more contributor license agreements.  See the NOTICE file
-// * distributed with this work for additional information
-// * regarding copyright ownership.  The ASF licenses this file
-// * to you under the Apache License, Version 2.0 (the
-// * "License"); you may not use this file except in compliance
-// * with the License.  You may obtain a copy of the License at
-// *
-// *      http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing,
-// * software distributed under the License is distributed on an
-// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// * KIND, either express or implied.  See the License for the
-// * specific language governing permissions and limitations
-// * under the License.
-// */
-//package org.apache.iotdb.db.engine.overflow.io;
-//
-//import java.io.File;
-//import java.io.IOException;
-//import java.time.Instant;
-//import java.time.ZonedDateTime;
-//import java.util.ArrayList;
-//import java.util.Arrays;
-//import java.util.Collections;
-//import java.util.List;
-//import java.util.Map;
-//import java.util.Objects;
-//import java.util.concurrent.ExecutionException;
-//import java.util.concurrent.Future;
-//import java.util.concurrent.atomic.AtomicLong;
-//import java.util.concurrent.locks.ReentrantLock;
-//import org.apache.iotdb.db.conf.IoTDBConfig;
-//import org.apache.iotdb.db.conf.IoTDBConstant;
-//import org.apache.iotdb.db.conf.IoTDBDescriptor;
-//import org.apache.iotdb.db.engine.Processor;
-//import org.apache.iotdb.db.engine.bufferwrite.Action;
-//import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
-//import org.apache.iotdb.db.engine.bufferwrite.RestorableTsFileIOWriter;
-//import org.apache.iotdb.db.engine.filenode.FileNodeManager;
-//import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
-//import org.apache.iotdb.db.engine.memtable.MemSeriesLazyMerger;
-//import org.apache.iotdb.db.engine.modification.Modification;
-//import org.apache.iotdb.db.engine.modification.ModificationFile;
-//import org.apache.iotdb.db.engine.pool.FlushManager;
-//import org.apache.iotdb.db.engine.querycontext.MergeSeriesDataSource;
-//import org.apache.iotdb.db.engine.querycontext.OverflowInsertFile;
-//import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
-//import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
-//import org.apache.iotdb.db.engine.version.VersionController;
-//import org.apache.iotdb.db.exception.OverflowProcessorException;
-//import org.apache.iotdb.db.qp.constant.DatetimeUtils;
-//import org.apache.iotdb.db.query.context.QueryContext;
-//import org.apache.iotdb.db.utils.ImmediateFuture;
-//import org.apache.iotdb.db.utils.MemUtils;
-//import org.apache.iotdb.db.utils.QueryUtils;
-//import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
-//import org.apache.iotdb.db.writelog.node.WriteLogNode;
-//import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
-//import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
-//import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-//import org.apache.iotdb.tsfile.read.common.Path;
-//import org.apache.iotdb.tsfile.utils.BytesUtils;
-//import org.apache.iotdb.tsfile.utils.Pair;
-//import org.apache.iotdb.tsfile.write.record.TSRecord;
-//import org.apache.iotdb.tsfile.write.schema.FileSchema;
-//import org.slf4j.Logger;
-//import org.slf4j.LoggerFactory;
-//
-//public class OverflowProcessor extends Processor {
-//  private static final String OVERFLOW_FILENAME = "unseqTsFile";
-//  private static final Logger LOGGER = LoggerFactory.getLogger(OverflowProcessor.class);
-//  private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
-//  private RestorableTsFileIOWriter workResource;
-//  private RestorableTsFileIOWriter mergeResource;
-//
-//  private OverflowMemtable workMemtable;
-//  private OverflowMemtable flushMemtable;
-//
-//  private volatile Future<Boolean> flushFuture = new ImmediateFuture<>(true);
-//  private volatile boolean isMerge;
-//  private int valueCount;
-//  private String parentPath;
-//  private long lastFlushTime = -1;
-//  private AtomicLong dataPathCount = new AtomicLong();
-//  private ReentrantLock queryFlushLock = new ReentrantLock();
-//
-//  private Action overflowFlushAction;
-//  private Action filenodeFlushAction;
-//  private FileSchema fileSchema;
-//
-//  private long memThreshold = TSFileConfig.groupSizeInByte;
-//  private AtomicLong memSize = new AtomicLong();
-//
-//  private WriteLogNode logNode;
-//  private VersionController versionController;
-//
-//  public OverflowProcessor(String processorName, Map<String, Action> parameters,
-//      FileSchema fileSchema, VersionController versionController)
-//      throws IOException {
-//    super(processorName);
-//    this.fileSchema = fileSchema;
-//    this.versionController = versionController;
-//
-//    File processorDataDir = new File(TsFileDBConf.getOverflowDataDir(), processorName);
-//    if (!processorDataDir.exists()) {
-//      processorDataDir.mkdirs();
-//    }
-//    // recover file
-//    recovery(processorName, processorDataDir);
-//    // memory
-//    workMemtable = new OverflowMemtable();
-//    overflowFlushAction = parameters.get(FileNodeConstants.OVERFLOW_FLUSH_ACTION);
-//    filenodeFlushAction = parameters
-//        .get(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION);
-//
-//    if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
-//      logNode = MultiFileLogNodeManager.getInstance().getNode(
-//          processorName + IoTDBConstant.OVERFLOW_LOG_NODE_SUFFIX,
-//          getOverflowRestoreFile(),
-//          FileNodeManager.getInstance().getRestoreFilePath(processorName));
-//    }
-//  }
-//
-//  private void recovery(String processorName, File processorDataDir) throws IOException {
-//    //remove those folders whose names are not number
-//    String[] subFolderPaths = clearFile(processorDataDir.list());
-//    if (subFolderPaths.length == 0) {
-//      //no overflow file
-//      File folder = new File(processorDataDir, String.valueOf(dataPathCount.getAndIncrement()));
-//      folder.mkdirs();
-//      workResource = new RestorableTsFileIOWriter(processorName,
-//          new File(folder, OVERFLOW_FILENAME).getAbsolutePath());
-//    } else if (subFolderPaths.length == 1) {
-//      //one overflow file exists
-//      long count = Long.parseLong(subFolderPaths[0]);
-//      dataPathCount.addAndGet(count + 1);
-//      File folder = new File(processorDataDir, String.valueOf(count));
-//      workResource = new RestorableTsFileIOWriter(processorName,
-//          new File(folder, OVERFLOW_FILENAME).getAbsolutePath());
-//      LOGGER.info("The overflow processor {} recover from work status.", getProcessorName());
-//    } else {
-//      long count1 = Long.parseLong(subFolderPaths[0]);
-//      long count2 = Long.parseLong(subFolderPaths[1]);
-//      if (count1 > count2) {
-//        long temp = count1;
-//        count1 = count2;
-//        count2 = temp;
-//      }
-//      dataPathCount.addAndGet(count2 + 1);
-//      // work dir > merge dir
-//      String workPath = new File(new File(processorDataDir, String.valueOf(count2)),
-//          OVERFLOW_FILENAME).getAbsolutePath();
-//      String mergePath = new File(new File(processorDataDir, String.valueOf(count1)),
-//          OVERFLOW_FILENAME).getAbsolutePath();
-//      workResource = new RestorableTsFileIOWriter(processorName, workPath);
-//      mergeResource = new RestorableTsFileIOWriter(processorName, mergePath);
-//      LOGGER.info("The overflow processor {} recover from merge status.", getProcessorName());
-//    }
-//  }
-//
-//  private String[] clearFile(String[] subFilePaths) {
-//    // just clear the files whose name are number.
-//    List<String> files = new ArrayList<>();
-//    for (String file : subFilePaths) {
-//      try {
-//        Long.valueOf(file);
-//        files.add(file);
-//      } catch (NumberFormatException e) {
-//        // ignore the exception, if the name of file is not a number.
-//
-//      }
-//    }
-//    return files.toArray(new String[files.size()]);
-//  }
-//
-//  /**
-//   * insert one time-series record
-//   */
-//  public void insert(TSRecord tsRecord) throws IOException {
-//    // memory control
-//    long memUage = MemUtils.getRecordSize(tsRecord);
-//    BasicMemController.getInstance().reportUse(this, memUage);
-//    // write data
-//    workMemtable.insert(tsRecord);
-//    valueCount++;
-//    // check flush
-//    memUage = memSize.addAndGet(memUage);
-//    if (memUage > memThreshold) {
-//      LOGGER.warn("The usage of memory {} in overflow processor {} reaches the threshold {}",
-//          MemUtils.bytesCntToStr(memUage), getProcessorName(),
-//          MemUtils.bytesCntToStr(memThreshold));
-//      flush();
-//    }
-//  }
-//
-//  /**
-//   * @deprecated update one time-series data which time range is from startTime from endTime.
-//   */
-//  @Deprecated
-//  public void update(String deviceId, String measurementId, long startTime, long endTime,
-//      TSDataType type, byte[] value) {
-//    workMemtable.update(deviceId, measurementId, startTime, endTime, type, value);
-//    valueCount++;
-//  }
-//
-//  /**
-//   * @deprecated this function need to be re-implemented.
-//   */
-//  @Deprecated
-//  public void update(String deviceId, String measurementId, long startTime, long endTime,
-//      TSDataType type, String value) {
-//    workMemtable.update(deviceId, measurementId, startTime, endTime, type,
-//        convertStringToBytes(type, value));
-//    valueCount++;
-//  }
-//
-//  private byte[] convertStringToBytes(TSDataType type, String o) {
-//    switch (type) {
-//      case INT32:
-//        return BytesUtils.intToBytes(Integer.valueOf(o));
-//      case INT64:
-//        return BytesUtils.longToBytes(Long.valueOf(o));
-//      case BOOLEAN:
-//        return BytesUtils.boolToBytes(Boolean.valueOf(o));
-//      case FLOAT:
-//        return BytesUtils.floatToBytes(Float.valueOf(o));
-//      case DOUBLE:
-//        return BytesUtils.doubleToBytes(Double.valueOf(o));
-//      case TEXT:
-//        return BytesUtils.stringToBytes(o);
-//      default:
-//        LOGGER.error("Unsupport data type: {}", type);
-//        throw new UnsupportedOperationException("Unsupport data type:" + type);
-//    }
-//  }
-//
-//  /**
-//   * Delete data of a timeseries whose time ranges from 0 to timestamp.
-//   *
-//   * @param deviceId the deviceId of the timeseries.
-//   * @param measurementId the measurementId of the timeseries.
-//   * @param timestamp the upper-bound of deletion time.
-//   * @param version the version number of this deletion.
-//   * @param updatedModFiles add successfully updated Modification files to the list, and abort them
-//   * when exception is raised
-//   */
-//  public void delete(String deviceId, String measurementId, long timestamp, long version,
-//      List<ModificationFile> updatedModFiles) throws IOException {
-//    workResource.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
-//    workMemtable.delete(deviceId, measurementId, timestamp, false);
-//    if (isFlush()) {
-//      mergeResource.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
-//      flushMemtable.delete(deviceId, measurementId, timestamp, true);
-//    }
-//  }
-//
-//  /**
-//   * query all overflow data which contain insert data in memory, insert data in file, update/delete
-//   * data in memory, update/delete data in file.
-//   *
-//   * @return OverflowSeriesDataSource
-//   */
-//  public OverflowSeriesDataSource query(String deviceId, String measurementId,
-//      TSDataType dataType, Map<String, String> props, QueryContext context)
-//      throws IOException {
-//    queryFlushLock.lock();
-//    try {
-//      // query insert data in memory and unseqTsFiles
-//      // memory
-//      ReadOnlyMemChunk insertInMem = queryOverflowInsertInMemory(deviceId, measurementId,
-//          dataType, props);
-//      List<OverflowInsertFile> overflowInsertFileList = new ArrayList<>();
-//      // work file
-//      Pair<String, List<ChunkMetaData>> insertInDiskWork = queryWorkDataInOverflowInsert(deviceId,
-//          measurementId,
-//          dataType, context);
-//      if (insertInDiskWork.left != null) {
-//        overflowInsertFileList
-//            .add(0, new OverflowInsertFile(insertInDiskWork.left,
-//                insertInDiskWork.right));
-//      }
-//      // merge file
-//      Pair<String, List<ChunkMetaData>> insertInDiskMerge = queryMergeDataInOverflowInsert(deviceId,
-//          measurementId, dataType, context);
-//      if (insertInDiskMerge.left != null) {
-//        overflowInsertFileList
-//            .add(0, new OverflowInsertFile(insertInDiskMerge.left
-//                , insertInDiskMerge.right));
-//      }
-//      // work file
-//      return new OverflowSeriesDataSource(new Path(deviceId + "." + measurementId), dataType,
-//          overflowInsertFileList, insertInMem);
-//    } finally {
-//      queryFlushLock.unlock();
-//    }
-//  }
-//
-//  /**
-//   * query insert data in memory table. while flushing, merge the work memory table with flush
-//   * memory table.
-//   *
-//   * @return insert data in SeriesChunkInMemTable
-//   */
-//  private ReadOnlyMemChunk queryOverflowInsertInMemory(String deviceId, String measurementId,
-//      TSDataType dataType, Map<String, String> props) {
-//
-//    MemSeriesLazyMerger memSeriesLazyMerger = new MemSeriesLazyMerger();
-//    queryFlushLock.lock();
-//    try {
-//      if (flushMemtable != null && isFlush()) {
-//        memSeriesLazyMerger
-//            .addMemSeries(
-//                flushMemtable.queryOverflowInsertInMemory(deviceId, measurementId, dataType, props));
-//      }
-//      memSeriesLazyMerger
-//          .addMemSeries(workMemtable.queryOverflowInsertInMemory(deviceId, measurementId,
-//              dataType, props));
-//      // memSeriesLazyMerger has handled the props,
-//      // so we do not need to handle it again in the following readOnlyMemChunk
-//      return new ReadOnlyMemChunk(dataType, memSeriesLazyMerger, Collections.emptyMap());
-//    } finally {
-//      queryFlushLock.unlock();
-//    }
-//  }
-//
-//  /**
-//   * Get the insert data which is WORK in unseqTsFile.
-//   *
-//   * @param deviceId deviceId of the target time-series
-//   * @param measurementId measurementId of the target time-series
-//   * @param dataType data type of the target time-series
-//   * @return the seriesPath of unseqTsFile, List of TimeSeriesChunkMetaData for the special
-//   * time-series.
-//   */
-//  private Pair<String, List<ChunkMetaData>> queryWorkDataInOverflowInsert(String deviceId,
-//      String measurementId, TSDataType dataType, QueryContext context) {
-//    return new Pair<>(
-//        workResource.getInsertFilePath(),
-//        getMetadatas(deviceId, measurementId, dataType, context));
-//  }
-//
-//  private List<ChunkMetaData> getMetadatas(String deviceId, String measurementId, TSDataType dataType, QueryContext context) {
-//    List<ChunkMetaData> result = workResource.getMetadatas(deviceId, measurementId, dataType);
-//    try {
-//      List<Modification> modifications = context.getPathModifications(modificationFile,
-//          deviceId + IoTDBConstant.PATH_SEPARATOR + measurementId);
-//      QueryUtils.modifyChunkMetaData(result, modifications);
-//    } catch (IOException e) {
-//      LOGGER.error("Cannot access the modification file of Overflow {}, because:", parentPath,
-//          e);
-//    }
-//    return result;
-//  }
-//
-//  /**
-//   * Get the all merge data in unseqTsFile and overflowFile.
-//   *
-//   * @return MergeSeriesDataSource
-//   */
-//  public MergeSeriesDataSource queryMerge(String deviceId, String measurementId,
-//      TSDataType dataType, QueryContext context) {
-//    Pair<String, List<ChunkMetaData>> mergeInsert = queryMergeDataInOverflowInsert(deviceId,
-//        measurementId,
-//        dataType, context);
-//    return new MergeSeriesDataSource(new OverflowInsertFile(mergeInsert.left, mergeInsert.right));
-//  }
-//
-//  public OverflowSeriesDataSource queryMerge(String deviceId, String measurementId,
-//      TSDataType dataType, boolean isMerge, QueryContext context) {
-//    Pair<String, List<ChunkMetaData>> mergeInsert = queryMergeDataInOverflowInsert(deviceId,
-//        measurementId,
-//        dataType, context);
-//    OverflowSeriesDataSource overflowSeriesDataSource = new OverflowSeriesDataSource(
-//        new Path(deviceId + "." + measurementId));
-//    overflowSeriesDataSource.setReadableMemChunk(null);
-//    overflowSeriesDataSource
-//        .setOverflowInsertFileList(
-//            Arrays.asList(new OverflowInsertFile(mergeInsert.left, mergeInsert.right)));
-//    return overflowSeriesDataSource;
-//  }
-//
-//  /**
-//   * Get the insert data which is MERGE in unseqTsFile
-//   *
-//   * @return the seriesPath of unseqTsFile, List of TimeSeriesChunkMetaData for the special
-//   * time-series.
-//   **/
-//  private Pair<String, List<ChunkMetaData>> queryMergeDataInOverflowInsert(String deviceId,
-//      String measurementId, TSDataType dataType, QueryContext context) {
-//    if (!isMerge) {
-//      return new Pair<>(null, null);
-//    }
-//    return new Pair<>(
-//        mergeResource.getInsertFilePath(),
-//        getMetadatas(deviceId, measurementId, dataType, context));
-//  }
-//
-//  private void switchWorkToFlush() {
-//    queryFlushLock.lock();
-//    try {
-//      flushMemtable = workMemtable;
-//      workMemtable = new OverflowSupport();
-//    } finally {
-//      queryFlushLock.unlock();
-//    }
-//  }
-//
-//  private void switchFlushToWork() {
-//    queryFlushLock.lock();
-//    try {
-//      flushMemtable.clear();
-//      workResource.appendMetadatas();
-//      flushMemtable = null;
-//    } finally {
-//      queryFlushLock.unlock();
-//    }
-//  }
-//
-//  public void switchWorkToMerge() throws IOException {
-//    if (mergeResource == null) {
-//      mergeResource = workResource;
-//      workResource = new OverflowResource(parentPath,
-//          String.valueOf(dataPathCount.getAndIncrement()), versionController);
-//    }
-//    isMerge = true;
-//    LOGGER.info("The overflow processor {} switch from WORK to MERGE", getProcessorName());
-//  }
-//
-//  public void switchMergeToWork() throws IOException {
-//    if (mergeResource != null) {
-//      mergeResource.close();
-//      mergeResource.deleteResource();
-//      mergeResource = null;
-//    }
-//    isMerge = false;
-//    LOGGER.info("The overflow processor {} switch from MERGE to WORK", getProcessorName());
-//  }
-//
-//  public boolean isMerge() {
-//    return isMerge;
-//  }
-//
-//  public boolean isFlush() {
-//    //see BufferWriteProcess.isFlush()
-//    return  flushMemtable != null;
-//  }
-//
-//  private boolean flushTask(String displayMessage) {
-//    boolean result;
-//    long flushStartTime = System.currentTimeMillis();
-//    try {
-//      LOGGER.info("The overflow processor {} starts flushing {}.", getProcessorName(),
-//                  displayMessage);
-//      // flush data
-//      workResource
-//          .flush(fileSchema, flushMemtable.getMemTabale(),
-//              getProcessorName());
-//      filenodeFlushAction.act();
-//      // write-ahead log
-//      if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
-//        logNode.notifyEndFlush(null);
-//      }
-//      result = true;
-//    } catch (IOException e) {
-//      LOGGER.error("Flush overflow processor {} rowgroup to file error in {}. Thread {} exits.",
-//          getProcessorName(), displayMessage, Thread.currentThread().getName(), e);
-//      result = false;
-//    } catch (Exception e) {
-//      LOGGER.error("FilenodeFlushAction action failed. Thread {} exits.",
-//          Thread.currentThread().getName(), e);
-//      result = false;
-//    } finally {
-//        // switch from flush to work.
-//        switchFlushToWork();
-//    }
-//    // log flush time
-//    if (LOGGER.isInfoEnabled()) {
-//      LOGGER
-//          .info("The overflow processor {} ends flushing {}.", getProcessorName(), displayMessage);
-//      long flushEndTime = System.currentTimeMillis();
-//      LOGGER.info(
-//          "The overflow processor {} flush {}, start time is {}, flush end time is {}," +
-//              " time consumption is {}ms",
-//          getProcessorName(), displayMessage,
-//          DatetimeUtils.convertMillsecondToZonedDateTime(flushStartTime),
-//          DatetimeUtils.convertMillsecondToZonedDateTime(flushEndTime),
-//          flushEndTime - flushStartTime);
-//    }
-//    return result;
-//  }
-//
-//  @Override
-//  public synchronized Future<Boolean> flush() throws IOException {
-//    // statistic information for flush
-//    if (lastFlushTime > 0) {
-//      long thisFLushTime = System.currentTimeMillis();
-//      ZonedDateTime lastDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(lastFlushTime),
-//          IoTDBDescriptor.getInstance().getConfig().getZoneID());
-//      ZonedDateTime thisDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(thisFLushTime),
-//          IoTDBDescriptor.getInstance().getConfig().getZoneID());
-//      LOGGER.info(
-//          "The overflow processor {} last flush time is {}, this flush time is {},"
-//              + " flush time interval is {}s",
-//          getProcessorName(), lastDateTime, thisDateTime,
-//          (thisFLushTime - lastFlushTime) / 1000);
-//    }
-//    lastFlushTime = System.currentTimeMillis();
-//    // value count
-//    if (valueCount > 0) {
-//      try {
-//        flushFuture.get();
-//      } catch (InterruptedException | ExecutionException e) {
-//        LOGGER.error("Encounter an interrupt error when waitting for the flushing, "
-//                + "the bufferwrite processor is {}.",
-//            getProcessorName(), e);
-//        Thread.currentThread().interrupt();
-//      }
-//      try {
-//        // backup newIntervalFile list and emptyIntervalFileNode
-//        overflowFlushAction.act();
-//      } catch (Exception e) {
-//        LOGGER.error("Flush the overflow rowGroup to file faied, when overflowFlushAction act");
-//        throw new IOException(e);
-//      }
-//
-//      if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
-//        try {
-//          logNode.notifyStartFlush();
-//        } catch (IOException e) {
-//          LOGGER.error("Overflow processor {} encountered an error when notifying log node, {}",
-//              getProcessorName(), e);
-//        }
-//      }
-//      BasicMemController.getInstance().reportFree(this, memSize.get());
-//      memSize.set(0);
-//      valueCount = 0;
-//      // switch from work to flush
-//      switchWorkToFlush();
-//      flushFuture = FlushManager.getInstance().submit( () ->
-//          flushTask("asynchronously"));
-//    } else {
-//      flushFuture = new ImmediateFuture(true);
-//    }
-//    return flushFuture;
-//
-//  }
-//
-//  @Override
-//  public void close() throws OverflowProcessorException {
-//    LOGGER.info("The overflow processor {} starts close operation.", getProcessorName());
-//    long closeStartTime = System.currentTimeMillis();
-//    // flush data
-//    try {
-//      flush().get();
-//    } catch (InterruptedException | ExecutionException e) {
-//      LOGGER.error("Encounter an interrupt error when waitting for the flushing, "
-//              + "the bufferwrite processor is {}.",
-//          getProcessorName(), e);
-//      Thread.currentThread().interrupt();
-//    } catch (IOException e) {
-//      throw new OverflowProcessorException(e);
-//    }
-//    if (LOGGER.isInfoEnabled()) {
-//      LOGGER.info("The overflow processor {} ends close operation.", getProcessorName());
-//      // log close time
-//      long closeEndTime = System.currentTimeMillis();
-//      LOGGER.info(
-//          "The close operation of overflow processor {} starts at {} and ends at {}."
-//              + " It comsumes {}ms.",
-//          getProcessorName(), DatetimeUtils.convertMillsecondToZonedDateTime(closeStartTime),
-//          DatetimeUtils.convertMillsecondToZonedDateTime(closeEndTime),
-//          closeEndTime - closeStartTime);
-//    }
-//  }
-//
-//  public void clear() throws IOException {
-//    if (workResource != null) {
-//      workResource.close();
-//    }
-//    if (mergeResource != null) {
-//      mergeResource.close();
-//    }
-//  }
-//
-//  @Override
-//  public boolean canBeClosed() {
-//    // TODO: consider merge
-//    return !isMerge;
-//  }
-//
-//  @Override
-//  public long memoryUsage() {
-//    return memSize.get();
-//  }
-//
-//  public String getOverflowRestoreFile() {
-//    return workResource.getPositionFilePath();
-//  }
-//
-//  /**
-//   * @return The sum of all timeseries's metadata size within this file.
-//   */
-//  public long getMetaSize() {
-//    // TODO : [MemControl] implement this
-//    return 0;
-//  }
-//
-//  /**
-//   * @return The size of overflow file corresponding to this processor.
-//   */
-//  public long getFileSize() {
-//    return workResource.getInsertFile().length() + memoryUsage();
-//  }
-//
-//  /**
-//   * Check whether current overflow file contains too many metadata or size of current overflow file
-//   * is too large If true, close current file and open a new one.
-//   */
-//  private boolean checkSize() {
-//    IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
-//    long metaSize = getMetaSize();
-//    long fileSize = getFileSize();
-//    LOGGER.info(
-//        "The overflow processor {}, the size of metadata reaches {},"
-//            + " the size of file reaches {}.",
-//        getProcessorName(), MemUtils.bytesCntToStr(metaSize), MemUtils.bytesCntToStr(fileSize));
-//    if (metaSize >= config.getOverflowMetaSizeThreshold()
-//        || fileSize >= config.getOverflowFileSizeThreshold()) {
-//      LOGGER.info(
-//          "The overflow processor {}, size({}) of the file {} reaches threshold {},"
-//              + " size({}) of metadata reaches threshold {}.",
-//          getProcessorName(), MemUtils.bytesCntToStr(fileSize), workResource.getInsertFilePath(),
-//          MemUtils.bytesCntToStr(config.getOverflowMetaSizeThreshold()),
-//          MemUtils.bytesCntToStr(metaSize),
-//          MemUtils.bytesCntToStr(config.getOverflowMetaSizeThreshold()));
-//      return true;
-//    } else {
-//      return false;
-//    }
-//  }
-//
-//  public WriteLogNode getLogNode() {
-//    return logNode;
-//  }
-//
-//  public OverflowResource getWorkResource() {
-//    return workResource;
-//  }
-//
-//  @Override
-//  public boolean equals(Object o) {
-//    if (this == o) {
-//      return true;
-//    }
-//    if (o == null || getClass() != o.getClass()) {
-//      return false;
-//    }
-//    if (!super.equals(o)) {
-//      return false;
-//    }
-//    OverflowProcessor that = (OverflowProcessor) o;
-//    return isMerge == that.isMerge &&
-//            valueCount == that.valueCount &&
-//            lastFlushTime == that.lastFlushTime &&
-//            memThreshold == that.memThreshold &&
-//            Objects.equals(workResource, that.workResource) &&
-//            Objects.equals(mergeResource, that.mergeResource) &&
-//            Objects.equals(workMemtable, that.workMemtable) &&
-//            Objects.equals(flushMemtable, that.flushMemtable) &&
-//            Objects.equals(flushFuture, that.flushFuture) &&
-//            Objects.equals(parentPath, that.parentPath) &&
-//            Objects.equals(dataPathCount, that.dataPathCount) &&
-//            Objects.equals(queryFlushLock, that.queryFlushLock) &&
-//            Objects.equals(overflowFlushAction, that.overflowFlushAction) &&
-//            Objects.equals(filenodeFlushAction, that.filenodeFlushAction) &&
-//            Objects.equals(fileSchema, that.fileSchema) &&
-//            Objects.equals(memSize, that.memSize) &&
-//            Objects.equals(logNode, that.logNode);
-//  }
-//
-//  @Override
-//  public int hashCode() {
-//    return Objects.hash(super.hashCode(), workResource, mergeResource, workMemtable,
-//        flushMemtable, flushFuture, isMerge, valueCount, parentPath, lastFlushTime,
-//            dataPathCount, queryFlushLock, overflowFlushAction, filenodeFlushAction, fileSchema,
-//            memThreshold, memSize, logNode, flushFuture);
-//  }
-//
-//  /**
-//   * used for test. We can block to wait for finishing flushing.
-//   * @return the future of the flush() task.
-//   */
-//  public Future<Boolean> getFlushFuture() {
-//    return flushFuture;
-//  }
-//
-//  /**
-//   * used for test. We can know when the flush() is called.
-//   * @return the last flush() time.
-//   */
-//  public long getFileNamePrefix() {
-//    return lastFlushTime;
-//  }
-//}
\ No newline at end of file
diff --git a/iotdb/src/main/java/org/apache/iotdb/db/engine/tsfiledata/TsFileResource.java b/iotdb/src/main/java/org/apache/iotdb/db/engine/tsfiledata/TsFileResource.java
deleted file mode 100644
index cc61e3f..0000000
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/tsfiledata/TsFileResource.java
+++ /dev/null
@@ -1,305 +0,0 @@
-///**
-// * Licensed to the Apache Software Foundation (ASF) under one
-// * or more contributor license agreements.  See the NOTICE file
-// * distributed with this work for additional information
-// * regarding copyright ownership.  The ASF licenses this file
-// * to you under the Apache License, Version 2.0 (the
-// * "License"); you may not use this file except in compliance
-// * with the License.  You may obtain a copy of the License at
-// *
-// *      http://www.apache.org/licenses/LICENSE-2.0
-// *
-// * Unless required by applicable law or agreed to in writing,
-// * software distributed under the License is distributed on an
-// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// * KIND, either express or implied.  See the License for the
-// * specific language governing permissions and limitations
-// * under the License.
-// */
-//package org.apache.iotdb.db.engine.tsfiledata;
-//
-//import java.io.File;
-//import java.io.IOException;
-//import java.io.Serializable;
-//import java.util.ArrayList;
-//import java.util.HashMap;
-//import java.util.HashSet;
-//import java.util.List;
-//import java.util.Map;
-//import java.util.Objects;
-//import java.util.Set;
-//import org.apache.iotdb.db.conf.directories.Directories;
-//import org.apache.iotdb.db.engine.bufferwrite.RestorableTsFileIOWriter;
-//import org.apache.iotdb.db.engine.filenode.FileNodeProcessorStatus;
-//import org.apache.iotdb.db.engine.filenode.OverflowChangeType;
-//import org.apache.iotdb.db.engine.modification.ModificationFile;
-//import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
-//import org.apache.iotdb.tsfile.file.metadata.ChunkGroupMetaData;
-//import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
-//import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadataIndex;
-//import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
-//
-///**
-// * This class is used to store one bufferwrite file status.<br>
-// */
-//public class TsFileResource implements Serializable {
-//
-//  private static final long serialVersionUID = -4309683416067212549L;
-//
-//  private OverflowChangeType overflowChangeType;
-//
-//  //the file index of `settled` folder in the Directories.
-//  private int baseDirIndex;
-//  private File file;
-//  private Map<String, Long> startTimeMap;
-//  private Map<String, Long> endTimeMap;
-//  private Set<String> mergeChanged = new HashSet<>();
-//
-//  private transient ModificationFile modFile;
-//
-//  /**
-//   * @param autoRead whether read the file to initialize startTimeMap and endTimeMap
-//   *
-//   */
-//  public TsFileResource(File file, boolean autoRead) throws IOException {
-//    this(new HashMap<>(), new HashMap<>(), OverflowChangeType.NO_CHANGE, file);
-//    if (autoRead) {
-//      //init startTime and endTime
-//      try (TsFileSequenceReader reader = new TsFileSequenceReader(file.getAbsolutePath())) {
-//        if (reader.readTailMagic().equals(TSFileConfig.MAGIC_STRING)) {
-//          //this is a complete tsfile, and we can read the metadata directly.
-//          for (Map.Entry<String, TsDeviceMetadataIndex> deviceEntry : reader.readFileMetadata()
-//              .getDeviceMap().entrySet()) {
-//            startTimeMap.put(deviceEntry.getKey(), deviceEntry.getValue().getStartTime());
-//            endTimeMap.put(deviceEntry.getKey(), deviceEntry.getValue().getEndTime());
-//          }
-//        } else {
-//          //sadly, this is not a complete tsfile. we have to repair it bytes by bytes
-//          //TODO will implement it
-//          List<ChunkGroupMetaData> metaDataList = new ArrayList<>();
-//          reader.selfCheck(null, metaDataList, false);
-//          initTimeMapFromChunGroupMetaDatas(metaDataList);
-//        }
-//      }
-//    }
-//  }
-//
-//  /**
-//   * @param writer an unclosed TsFile Writer
-//   */
-//  public TsFileResource(File file, RestorableTsFileIOWriter writer) {
-//    this(new HashMap<>(), new HashMap<>(), OverflowChangeType.NO_CHANGE, file);
-//    initTimeMapFromChunGroupMetaDatas(writer.getChunkGroupMetaDatas());
-//  }
-//
-//  private void initTimeMapFromChunGroupMetaDatas(List<ChunkGroupMetaData> metaDataList) {
-//    for (ChunkGroupMetaData metaData : metaDataList) {
-//      long startTime = startTimeMap.getOrDefault(metaData.getDeviceID(), Long.MAX_VALUE);
-//      long endTime = endTimeMap.getOrDefault(metaData.getDeviceID(), Long.MIN_VALUE);
-//      for (ChunkMetaData chunk : metaData.getChunkMetaDataList()) {
-//        if (chunk.getStartTime() < startTime) {
-//          startTime = chunk.getStartTime();
-//        }
-//        if (chunk.getEndTime() > endTime) {
-//          endTime = chunk.getEndTime();
-//        }
-//      }
-//      startTimeMap.put(metaData.getDeviceID(), startTime);
-//      endTimeMap.put(metaData.getDeviceID(), endTime);
-//    }
-//  }
-//
-//
-//  public TsFileResource(Map<String, Long> startTimeMap, Map<String, Long> endTimeMap,
-//      OverflowChangeType type, File file) {
-//
-//    this.overflowChangeType = type;
-//    this.baseDirIndex = Directories.getInstance().getTsFileFolderIndex(file.getParentFile().getParent());
-//    this.file = file;
-//
-//    this.startTimeMap = startTimeMap;
-//    this.endTimeMap = endTimeMap;
-//    this.modFile = new ModificationFile(file.getAbsolutePath() + ModificationFile.FILE_SUFFIX);
-//  }
-//
-//
-//  public void setStartTime(String deviceId, long startTime) {
-//
-//    startTimeMap.put(deviceId, startTime);
-//  }
-//
-//  public long getStartTime(String deviceId) {
-//
-//    if (startTimeMap.containsKey(deviceId)) {
-//      return startTimeMap.get(deviceId);
-//    } else {
-//      return -1;
-//    }
-//  }
-//
-//  public Map<String, Long> getStartTimeMap() {
-//
-//    return startTimeMap;
-//  }
-//
-//  public void setStartTimeMap(Map<String, Long> startTimeMap) {
-//
-//    this.startTimeMap = startTimeMap;
-//  }
-//
-//  public void setEndTime(String deviceId, long timestamp) {
-//
-//    this.endTimeMap.put(deviceId, timestamp);
-//  }
-//
-//  public long getEndTime(String deviceId) {
-//
-//    if (endTimeMap.get(deviceId) == null) {
-//      return -1;
-//    }
-//    return endTimeMap.get(deviceId);
-//  }
-//
-//  public Map<String, Long> getEndTimeMap() {
-//
-//    return endTimeMap;
-//  }
-//
-//  public void setEndTimeMap(Map<String, Long> endTimeMap) {
-//
-//    this.endTimeMap = endTimeMap;
-//  }
-//
-//  public void removeTime(String deviceId) {
-//
-//    startTimeMap.remove(deviceId);
-//    endTimeMap.remove(deviceId);
-//  }
-//
-//  public File getFile() {
-//    return file;
-//  }
-//
-//  public int getBaseDirIndex() {
-//    return baseDirIndex;
-//  }
-//
-//  public void setBaseDirIndex(int baseDirIndex) {
-//    this.baseDirIndex = baseDirIndex;
-//  }
-//
-//
-//  public boolean checkEmpty() {
-//
-//    return startTimeMap.isEmpty() && endTimeMap.isEmpty();
-//  }
-//
-//  public void clear() {
-//    startTimeMap.clear();
-//    endTimeMap.clear();
-//    mergeChanged.clear();
-//    overflowChangeType = OverflowChangeType.NO_CHANGE;
-//  }
-//
-//  public void changeTypeToChanged(FileNodeProcessorStatus fileNodeProcessorState) {
-//
-//    if (fileNodeProcessorState == FileNodeProcessorStatus.MERGING_WRITE) {
-//      overflowChangeType = OverflowChangeType.MERGING_CHANGE;
-//    } else {
-//      overflowChangeType = OverflowChangeType.CHANGED;
-//    }
-//  }
-//
-//  public void addMergeChanged(String deviceId) {
-//
-//    mergeChanged.add(deviceId);
-//  }
-//
-//  public Set<String> getMergeChanged() {
-//
-//    return mergeChanged;
-//  }
-//
-//  public void clearMergeChanged() {
-//
-//    mergeChanged.clear();
-//  }
-//
-//  public boolean isClosed() {
-//
-//    return !endTimeMap.isEmpty();
-//
-//  }
-//
-//  public TsFileResource backUp() {
-//
-//    Map<String, Long> startTimeMapCopy = new HashMap<>(this.startTimeMap);
-//    Map<String, Long> endTimeMapCopy = new HashMap<>(this.endTimeMap);
-//    return new TsFileResource(startTimeMapCopy, endTimeMapCopy, overflowChangeType, file);
-//  }
-//
-//  public Set<String> getDevices() {
-//    return this.startTimeMap.keySet();
-//  }
-//
-//  @Override
-//  public int hashCode() {
-//
-//    final int prime = 31;
-//    int result = 1;
-//    result = prime * result + ((endTimeMap == null) ? 0 : endTimeMap.hashCode());
-//    result = prime * result + ((file == null) ? 0 : file.hashCode());
-//    result = prime * result + ((overflowChangeType == null) ? 0 : overflowChangeType.hashCode());
-//    result = prime * result + ((startTimeMap == null) ? 0 : startTimeMap.hashCode());
-//    return result;
-//  }
-//
-//  @Override
-//  public boolean equals(Object o) {
-//    if (this == o) return true;
-//    if (o == null || getClass() != o.getClass()) return false;
-//    TsFileResource fileNode = (TsFileResource) o;
-//    return baseDirIndex == fileNode.baseDirIndex &&
-//            overflowChangeType == fileNode.overflowChangeType &&
-//            Objects.equals(file, fileNode.file) &&
-//            Objects.equals(startTimeMap, fileNode.startTimeMap) &&
-//            Objects.equals(endTimeMap, fileNode.endTimeMap) &&
-//            Objects.equals(mergeChanged, fileNode.mergeChanged);
-//  }
-//
-//  @Override
-//  public String toString() {
-//
-//    return String.format(
-//            "TsFileResource [file=%s,overflowChangeType=%s, startTimeMap=%s,"
-//                    + " endTimeMap=%s, mergeChanged=%s]",
-//            file.getAbsolutePath(), overflowChangeType, startTimeMap, endTimeMap, mergeChanged);
-//  }
-//
-//  public OverflowChangeType getOverflowChangeType() {
-//    return overflowChangeType;
-//  }
-//
-//  public void setOverflowChangeType(OverflowChangeType overflowChangeType) {
-//    this.overflowChangeType = overflowChangeType;
-//  }
-//
-//  public synchronized ModificationFile getModFile() {
-//    if (modFile == null) {
-//      modFile = new ModificationFile(file.getAbsolutePath() + ModificationFile.FILE_SUFFIX);
-//    }
-//    return modFile;
-//  }
-//
-//  public boolean containsDevice(String deviceId) {
-//    return startTimeMap.containsKey(deviceId);
-//  }
-//
-//  public void setModFile(ModificationFile modFile) {
-//    this.modFile = modFile;
-//  }
-//
-//  public void close() throws IOException {
-//    modFile.close();
-//  }
-//}
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/experiments/PerfTest.java b/iotdb/src/test/java/org/apache/iotdb/db/experiments/PerfTest.java
deleted file mode 100644
index f7ca287..0000000
--- a/iotdb/src/test/java/org/apache/iotdb/db/experiments/PerfTest.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.experiments;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import org.apache.iotdb.db.engine.memtable.IMemTable;
-import org.apache.iotdb.db.engine.memtable.IWritableMemChunk;
-import org.apache.iotdb.db.engine.memtable.PrimitiveMemTable;
-import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
-import org.apache.iotdb.db.utils.TimeValuePair;
-import org.apache.iotdb.tsfile.file.footer.ChunkGroupFooter;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-import org.apache.iotdb.tsfile.utils.Pair;
-import org.apache.iotdb.tsfile.write.chunk.ChunkBuffer;
-import org.apache.iotdb.tsfile.write.chunk.ChunkWriterImpl;
-import org.apache.iotdb.tsfile.write.chunk.IChunkWriter;
-import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
-import org.apache.iotdb.tsfile.write.record.datapoint.FloatDataPoint;
-import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
-import org.junit.Test;
-
-public class PerfTest {
-  int fullSize = 128*1024*1024/16;
-  int deviceSize = 1000;
-  int sensorSize = 1000;
-  long time = 100L;
-  Random random = new Random(System.currentTimeMillis());
-  Map<String, Long> maxTimeStamp = new HashMap<>();
-  IMemTable table = new PrimitiveMemTable();
-  @Test
-  public void test2(){
-
-    long start = System.currentTimeMillis();
-    for ( int i =0 ; i< fullSize; i++ ) {
-      time = random.nextInt(10) - 5 <0 ? time + random.nextInt(10) : time - random.nextInt(10);
-      String device = "d" + random.nextInt(deviceSize);
-      String sensor = "s" + random.nextInt(sensorSize);
-      table.write(device, sensor, TSDataType.FLOAT, time, 5.0f);
-      if (!maxTimeStamp.containsKey(device) || maxTimeStamp.get(device) < time) {
-        maxTimeStamp.put(device, time);
-      }
-    }
-    for (String deviceId : table.getMemTableMap().keySet()) {
-      for (String measurementId : table.getMemTableMap().get(deviceId).keySet()) {
-        IWritableMemChunk series = table.getMemTableMap().get(deviceId).get(measurementId);
-        series.getSortedTimeValuePairList();
-      }
-    }
-    System.out.println("origin total time cost: " + (System.currentTimeMillis() - start));
-  }
-
-  @Test
-  public void test(){
-    long start = System.currentTimeMillis();
-
-    for ( int i =0 ; i< fullSize; i++ ) {
-      time = random.nextInt(10) - 5 <0 ? time + random.nextInt(10) : time - random.nextInt(10);
-      String device = "d" + random.nextInt(deviceSize);
-      String sensor = "s" + random.nextInt(sensorSize);
-      table.write(device, sensor, TSDataType.FLOAT, time, 5.0f);
-
-    }
-    for (String deviceId : table.getMemTableMap().keySet()) {
-      long minTime = Long.MAX_VALUE;
-      long maxTime = Long.MIN_VALUE;
-      for (String measurementId : table.getMemTableMap().get(deviceId).keySet()) {
-        IWritableMemChunk series = table.getMemTableMap().get(deviceId).get(measurementId);
-
-          List<TimeValuePair> sortedTimeValuePairs = series.getSortedTimeValuePairList();
-          if (sortedTimeValuePairs.get(sortedTimeValuePairs.size() - 1).getTimestamp() > maxTime) {
-            maxTime = sortedTimeValuePairs.get(sortedTimeValuePairs.size() - 1).getTimestamp();
-          }
-
-      }
-      maxTimeStamp.put(deviceId, maxTime);
-    }
-    System.out.println("modified total time cost: " + (System.currentTimeMillis() - start));
-  }
-
-//  @Test
-//  public void test3() {
-//    IMemTable table = new PrimitiveMemTable();
-//    table.write("d1", "s1", TSDataType.FLOAT, 10, 5.0f);
-//    table.write("d1", "s1", TSDataType.FLOAT, 1, 5.0f);
-//    ReadOnlyMemChunk chunk = table.query("d1", "s1", TSDataType.FLOAT, Collections.emptyMap());
-//    chunk.getSortedTimeValuePairList().forEach( x -> System.out.println(x.getTimestamp()));
-//  }
-//
-//
-//  private void scan(IMemTable imemTable) {
-//    Map<String, Pair<Long, Long>> result = new HashMap<>();
-//    for (String deviceId : imemTable.getMemTableMap().keySet()) {
-//      int seriesNumber = imemTable.getMemTableMap().get(deviceId).size();
-//      long minTime = Long.MAX_VALUE;
-//      long maxTime = Long.MIN_VALUE;
-//      for (String measurementId : imemTable.getMemTableMap().get(deviceId).keySet()) {
-//        // TODO if we can not use TSFileIO writer, then we have to redesign the class of TSFileIO.
-//        IWritableMemChunk series = imemTable.getMemTableMap().get(deviceId).get(measurementId);
-//        List<TimeValuePair> sortedTimeValuePairs = series.getSortedTimeValuePairList();
-//        if (sortedTimeValuePairs.get(0).getTimestamp() < minTime) {
-//          minTime = sortedTimeValuePairs.get(0).getTimestamp();
-//        }
-//        if (sortedTimeValuePairs.get(sortedTimeValuePairs.size() - 1).getTimestamp() > maxTime) {
-//          maxTime = sortedTimeValuePairs.get(sortedTimeValuePairs.size() - 1).getTimestamp();
-//        }
-//      }
-//      long memSize = tsFileIoWriter.getPos() - startPos;
-//      ChunkGroupFooter footer = new ChunkGroupFooter(deviceId, memSize, seriesNumber);
-//      tsFileIoWriter.endChunkGroup(footer, version);
-//      result.put(deviceId, new Pair<>(minTime, maxTime));
-//    }
-//  }
-}
diff --git a/iotdb/src/test/java/org/apache/iotdb/db/integration/IoTDBDaemonIT.java b/iotdb/src/test/java/org/apache/iotdb/db/integration/IoTDBDaemonIT.java
index 1c35566..ff5e9e9 100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/integration/IoTDBDaemonIT.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/integration/IoTDBDaemonIT.java
@@ -33,7 +33,6 @@ import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.service.IoTDB;
 import org.apache.iotdb.db.utils.EnvironmentUtils;
 import org.apache.iotdb.jdbc.Config;