You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by qi...@apache.org on 2019/06/22 09:32:58 UTC
[incubator-iotdb] 03/03: remove FileNodeManager
This is an automated email from the ASF dual-hosted git repository.
qiaojialin pushed a commit to branch feature_async_close_tsfile
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git
commit ce6770c0278a011b489b8ef453b757daa2ad4969
Merge: 9e4acf8 0e52c55
Author: qiaojialin <64...@qq.com>
AuthorDate: Sat Jun 22 17:32:35 2019 +0800
remove FileNodeManager
.../java/org/apache/iotdb/db/engine/Processor.java | 2 -
.../engine/bufferwrite/BufferWriteProcessor.java | 3 +-
.../iotdb/db/engine/filenode/FileNodeManager.java | 2630 ++++++------
.../db/engine/filenode/FileNodeProcessor.java | 4232 ++++++++++----------
.../db/engine/filenodeV2/FileNodeManagerV2.java | 10 +-
.../db/engine/filenodeV2/FileNodeProcessorV2.java | 3 +-
.../db/engine/memcontrol/FlushPartialPolicy.java | 9 +-
.../db/engine/memcontrol/ForceFLushAllPolicy.java | 11 +-
.../db/engine/overflow/io/OverflowMemtable.java | 111 -
.../db/engine/overflow/io/OverflowProcessor.java | 820 ----
.../org/apache/iotdb/db/monitor/StatMonitor.java | 9 +-
.../iotdb/db/monitor/collector/FileSize.java | 9 +-
.../iotdb/db/qp/physical/crud/InsertPlan.java | 15 +
.../db/query/control/QueryResourceManager.java | 46 +-
.../apache/iotdb/db/query/dataset/AuthDataSet.java | 6 +-
.../dataset/EngineDataSetWithTimeGenerator.java | 2 -
.../groupby/GroupByWithOnlyTimeFilterDataSet.java | 29 +-
.../groupby/GroupByWithValueFilterDataSet.java | 8 +-
.../AbstractExecutorWithoutTimeGenerator.java | 84 -
.../AbstractExecutorWithoutTimeGeneratorV2.java | 85 -
.../db/query/executor/AggregateEngineExecutor.java | 39 +-
.../executor/EngineExecutorWithTimeGenerator.java | 16 +-
.../EngineExecutorWithoutTimeGenerator.java | 29 +-
.../iotdb/db/query/executor/EngineQueryRouter.java | 8 +-
.../db/query/executor/FillEngineExecutor.java | 7 +-
.../db/query/factory/ISeriesReaderFactory.java | 28 +-
.../db/query/factory/SeriesReaderFactory.java | 73 +-
.../db/query/factory/SeriesReaderFactoryImpl.java | 125 +-
.../java/org/apache/iotdb/db/query/fill/IFill.java | 26 +-
.../org/apache/iotdb/db/query/fill/LinearFill.java | 8 +-
.../apache/iotdb/db/query/fill/PreviousFill.java | 8 +-
.../reader/sequence/SequenceDataReaderV2.java | 13 +-
.../timegenerator/AbstractNodeConstructor.java | 34 -
.../query/timegenerator/EngineNodeConstructor.java | 10 +-
.../apache/iotdb/db/service/CloseMergeService.java | 6 +-
.../java/org/apache/iotdb/db/service/IoTDB.java | 13 +-
.../org/apache/iotdb/db/service/TSServiceImpl.java | 7 +-
.../iotdb/db/sync/receiver/SyncServiceImpl.java | 15 +-
.../org/apache/iotdb/db/utils/LoadDataUtils.java | 9 +-
.../iotdb/db/writelog/recover/LogReplayer.java | 11 +-
.../recover/SeqTsFileRecoverPerformer.java | 2 +-
...ormer.java => UnSeqTsFileRecoverPerformer.java} | 84 +-
.../recover/UnseqTsFileRecoverPerformer.java | 75 -
.../filenode/FileNodeProcessorStoreTest.java | 91 -
.../db/engine/filenode/FileNodeProcessorTest.java | 134 -
.../db/engine/filenode/TsFileResourceTest.java | 98 -
.../filenodeV2/FileNodeManagerBenchmark.java | 5 +-
.../engine/filenodeV2/FileNodeProcessorV2Test.java | 7 +-
.../filenodeV2/UnsealedTsFileProcessorV2Test.java | 7 +-
.../memcontrol/OverflowFileSizeControlTest.java | 145 -
.../memcontrol/OverflowMetaSizeControlTest.java | 146 -
.../engine/modification/DeletionFileNodeTest.java | 69 +-
.../db/engine/modification/DeletionQueryTest.java | 92 +-
.../db/engine/overflow/io/OverflowIOTest.java | 65 -
.../engine/overflow/io/OverflowMemtableTest.java | 100 -
.../overflow/io/OverflowProcessorBenchmark.java | 124 -
.../engine/overflow/io/OverflowProcessorTest.java | 212 -
.../engine/overflow/io/OverflowResourceTest.java | 99 -
.../db/engine/overflow/io/OverflowTestUtils.java | 79 -
.../overflow/metadata/OFFileMetadataTest.java | 90 -
.../metadata/OFRowGroupListMetadataTest.java | 93 -
.../metadata/OFSeriesListMetadataTest.java | 88 -
.../overflow/metadata/OverflowTestHelper.java | 84 -
.../db/engine/overflow/metadata/OverflowUtils.java | 138 -
.../org/apache/iotdb/db/monitor/MonitorTest.java | 294 +-
.../apache/iotdb/db/qp/plan/PhysicalPlanTest.java | 9 +-
.../apache/iotdb/db/utils/EnvironmentUtils.java | 10 +-
.../iotdb/db/writelog/recover/LogReplayerTest.java | 9 +-
.../db/writelog/recover/SeqTsFileRecoverTest.java | 5 +-
.../writelog/recover/UnseqTsFileRecoverTest.java | 153 -
70 files changed, 4066 insertions(+), 7150 deletions(-)
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/Processor.java
index 76b9704,76b9704..1453c25
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/Processor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/Processor.java
@@@ -23,7 -23,7 +23,6 @@@ import java.util.concurrent.Future
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
--import org.apache.iotdb.db.engine.filenode.FileNodeProcessor;
import org.apache.iotdb.db.exception.ProcessorException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@@ -32,7 -32,7 +31,6 @@@
* Processor is used for implementing different processor with different operation.<br>
*
* @see BufferWriteProcessor
-- * @see FileNodeProcessor
*/
public abstract class Processor {
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
index 71495b0,71495b0..e0d5ab8
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/bufferwrite/BufferWriteProcessor.java
@@@ -52,6 -52,6 +52,7 @@@ import org.apache.iotdb.db.exception.Bu
import org.apache.iotdb.db.monitor.collector.MemTableWriteTimeCost;
import org.apache.iotdb.db.monitor.collector.MemTableWriteTimeCost.MemTableWriteTimeCostType;
import org.apache.iotdb.db.qp.constant.DatetimeUtils;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.utils.ImmediateFuture;
import org.apache.iotdb.db.utils.MemUtils;
import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
@@@ -198,7 -198,7 +199,7 @@@ public class BufferWriteProcessor exten
public boolean write(TSRecord tsRecord) throws BufferWriteProcessorException {
MemTableWriteTimeCost.getInstance().init();
long start1 = System.currentTimeMillis();
-- long memUsage = MemUtils.getRecordSize(tsRecord);
++ long memUsage = MemUtils.getRecordSize(new InsertPlan(tsRecord));
BasicMemController.UsageLevel level = BasicMemController.getInstance()
.acquireUsage(this, memUsage);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
index cd0d5de,13f2086..ff7d187
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
@@@ -1,1317 -1,1317 +1,1317 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--
--package org.apache.iotdb.db.engine.filenode;
--
--import java.io.File;
--import java.io.IOException;
--import java.util.ArrayList;
--import java.util.HashMap;
--import java.util.Iterator;
--import java.util.List;
--import java.util.Map;
--import java.util.concurrent.ConcurrentHashMap;
--import java.util.concurrent.ExecutionException;
--import java.util.concurrent.Future;
--import java.util.concurrent.ScheduledExecutorService;
--import java.util.concurrent.TimeUnit;
--import java.util.concurrent.atomic.AtomicLong;
--import org.apache.commons.io.FileUtils;
--import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBConstant;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
- import org.apache.iotdb.db.conf.directories.DirectoryManager;
-import org.apache.iotdb.db.conf.directories.Directories;
--import org.apache.iotdb.db.engine.Processor;
--import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
--import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
--import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
--import org.apache.iotdb.db.engine.pool.FlushPoolManager;
--import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
--import org.apache.iotdb.db.exception.BufferWriteProcessorException;
--import org.apache.iotdb.db.exception.FileNodeManagerException;
--import org.apache.iotdb.db.exception.FileNodeProcessorException;
--import org.apache.iotdb.db.exception.PathErrorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.metadata.MManager;
--import org.apache.iotdb.db.monitor.IStatistic;
--import org.apache.iotdb.db.monitor.MonitorConstants;
--import org.apache.iotdb.db.monitor.StatMonitor;
--import org.apache.iotdb.db.qp.physical.crud.DeletePlan;
--import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
--import org.apache.iotdb.db.qp.physical.crud.UpdatePlan;
--import org.apache.iotdb.db.query.context.QueryContext;
--import org.apache.iotdb.db.query.control.FileReaderManager;
--import org.apache.iotdb.db.service.IService;
--import org.apache.iotdb.db.service.ServiceType;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
--import org.apache.iotdb.db.writelog.node.WriteLogNode;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
--import org.apache.iotdb.tsfile.read.common.Path;
--import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--import org.slf4j.Logger;
--import org.slf4j.LoggerFactory;
--
--public class FileNodeManager implements IStatistic, IService {
--
-- private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeManager.class);
-- private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
- private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
- private static final Directories directories = Directories.getInstance();
-- /**
-- * a folder that persist FileNodeProcessorStore classes. Each stroage group will have a subfolder.
-- * by default, it is system/info
-- */
-- private final String baseDir;
--
-- /**
-- * This map is used to manage all filenode processor,<br> the key is filenode name which is
-- * storage group seriesPath.
-- */
-- private ConcurrentHashMap<String, FileNodeProcessor> processorMap;
-- /**
-- * This set is used to store overflowed filenode name.<br> The overflowed filenode will be merge.
-- */
-- private volatile FileNodeManagerStatus fileNodeManagerStatus = FileNodeManagerStatus.NONE;
-- // There is no need to add concurrently
-- private HashMap<String, AtomicLong> statParamsHashMap;
--
-- ScheduledExecutorService closedProcessorCleaner = IoTDBThreadPoolFactory.newScheduledThreadPool(1,
-- "Closed FileNodeProcessors Cleaner");
--
-- private FileNodeManager(String baseDir) {
-- processorMap = new ConcurrentHashMap<>();
-- statParamsHashMap = new HashMap<>();
-- //label: A
-- for (MonitorConstants.FileNodeManagerStatConstants fileNodeManagerStatConstant :
-- MonitorConstants.FileNodeManagerStatConstants.values()) {
-- statParamsHashMap.put(fileNodeManagerStatConstant.name(), new AtomicLong(0));
-- }
--
-- String normalizedBaseDir = baseDir;
-- if (normalizedBaseDir.charAt(normalizedBaseDir.length() - 1) != File.separatorChar) {
-- normalizedBaseDir += Character.toString(File.separatorChar);
-- }
-- this.baseDir = normalizedBaseDir;
-- File dir = new File(normalizedBaseDir);
-- if (dir.mkdirs()) {
-- LOGGER.info("{} dir home doesn't exist, create it", dir.getPath());
-- }
-- //TODO merge this with label A
-- if (TsFileDBConf.isEnableStatMonitor()) {
-- StatMonitor statMonitor = StatMonitor.getInstance();
-- registerStatMetadata();
-- statMonitor.registerStatistics(MonitorConstants.STAT_STORAGE_DELTA_NAME, this);
-- }
--
-- closedProcessorCleaner.scheduleWithFixedDelay(()->{
-- int size = 0;
-- for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
-- size += fileNodeProcessor.getClosingBufferWriteProcessor().size();
-- }
-- if (size > 5) {
-- LOGGER.info("Current closing processor number is {}", size);
-- }
++///**
++// * Licensed to the Apache Software Foundation (ASF) under one
++// * or more contributor license agreements. See the NOTICE file
++// * distributed with this work for additional information
++// * regarding copyright ownership. The ASF licenses this file
++// * to you under the Apache License, Version 2.0 (the
++// * "License"); you may not use this file except in compliance
++// * with the License. You may obtain a copy of the License at
++// *
++// * http://www.apache.org/licenses/LICENSE-2.0
++// *
++// * Unless required by applicable law or agreed to in writing,
++// * software distributed under the License is distributed on an
++// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++// * KIND, either express or implied. See the License for the
++// * specific language governing permissions and limitations
++// * under the License.
++// */
++//
++//package org.apache.iotdb.db.engine.filenode;
++//
++//import java.io.File;
++//import java.io.IOException;
++//import java.util.ArrayList;
++//import java.util.HashMap;
++//import java.util.Iterator;
++//import java.util.List;
++//import java.util.Map;
++//import java.util.concurrent.ConcurrentHashMap;
++//import java.util.concurrent.ExecutionException;
++//import java.util.concurrent.Future;
++//import java.util.concurrent.ScheduledExecutorService;
++//import java.util.concurrent.TimeUnit;
++//import java.util.concurrent.atomic.AtomicLong;
++//import org.apache.commons.io.FileUtils;
++//import org.apache.iotdb.db.concurrent.IoTDBThreadPoolFactory;
++//import org.apache.iotdb.db.conf.IoTDBConfig;
++//import org.apache.iotdb.db.conf.IoTDBConstant;
++//import org.apache.iotdb.db.conf.IoTDBDescriptor;
++//import org.apache.iotdb.db.conf.directories.DirectoryManager;
++//import org.apache.iotdb.db.engine.Processor;
++//import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
++//import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
++//import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
++//import org.apache.iotdb.db.engine.pool.FlushPoolManager;
++//import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
++//import org.apache.iotdb.db.exception.BufferWriteProcessorException;
++//import org.apache.iotdb.db.exception.FileNodeManagerException;
++//import org.apache.iotdb.db.exception.FileNodeProcessorException;
++//import org.apache.iotdb.db.exception.PathErrorException;
++//import org.apache.iotdb.db.exception.ProcessorException;
++//import org.apache.iotdb.db.metadata.MManager;
++//import org.apache.iotdb.db.monitor.IStatistic;
++//import org.apache.iotdb.db.monitor.MonitorConstants;
++//import org.apache.iotdb.db.monitor.StatMonitor;
++//import org.apache.iotdb.db.qp.physical.crud.DeletePlan;
++//import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
++//import org.apache.iotdb.db.qp.physical.crud.UpdatePlan;
++//import org.apache.iotdb.db.query.context.QueryContext;
++//import org.apache.iotdb.db.query.control.FileReaderManager;
++//import org.apache.iotdb.db.service.IService;
++//import org.apache.iotdb.db.service.ServiceType;
++//import org.apache.iotdb.db.utils.MemUtils;
++//import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
++//import org.apache.iotdb.db.writelog.node.WriteLogNode;
++//import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
++//import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
++//import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
++//import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
++//import org.apache.iotdb.tsfile.read.common.Path;
++//import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
++//import org.apache.iotdb.tsfile.write.record.TSRecord;
++//import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
++//import org.slf4j.Logger;
++//import org.slf4j.LoggerFactory;
++//
++//public class FileNodeManager implements IStatistic, IService {
++//
++// private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeManagerV2.class);
++// private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
++// private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
++// /**
++// * a folder that persist FileNodeProcessorStore classes. Each stroage group will have a subfolder.
++// * by default, it is system/info
++// */
++// private final String baseDir;
++//
++// /**
++// * This map is used to manage all filenode processor,<br> the key is filenode name which is
++// * storage group seriesPath.
++// */
++// private ConcurrentHashMap<String, FileNodeProcessor> processorMap;
++// /**
++// * This set is used to store overflowed filenode name.<br> The overflowed filenode will be merge.
++// */
++// private volatile FileNodeManagerStatus fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++// // There is no need to add concurrently
++// private HashMap<String, AtomicLong> statParamsHashMap;
++//
++// ScheduledExecutorService closedProcessorCleaner = IoTDBThreadPoolFactory.newScheduledThreadPool(1,
++// "Closed FileNodeProcessors Cleaner");
++//
++// private FileNodeManager(String baseDir) {
++// processorMap = new ConcurrentHashMap<>();
++// statParamsHashMap = new HashMap<>();
++// //label: A
++// for (MonitorConstants.FileNodeManagerStatConstants fileNodeManagerStatConstant :
++// MonitorConstants.FileNodeManagerStatConstants.values()) {
++// statParamsHashMap.put(fileNodeManagerStatConstant.name(), new AtomicLong(0));
++// }
++//
++// String normalizedBaseDir = baseDir;
++// if (normalizedBaseDir.charAt(normalizedBaseDir.length() - 1) != File.separatorChar) {
++// normalizedBaseDir += Character.toString(File.separatorChar);
++// }
++// this.baseDir = normalizedBaseDir;
++// File dir = new File(normalizedBaseDir);
++// if (dir.mkdirs()) {
++// LOGGER.info("{} dir home doesn't exist, create it", dir.getPath());
++// }
++// //TODO merge this with label A
++// if (TsFileDBConf.isEnableStatMonitor()) {
++// StatMonitor statMonitor = StatMonitor.getInstance();
++// registerStatMetadata();
++// statMonitor.registerStatistics(MonitorConstants.STAT_STORAGE_DELTA_NAME, this);
++// }
++//
++// closedProcessorCleaner.scheduleWithFixedDelay(()->{
++// int size = 0;
++// for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
++// size += fileNodeProcessor.getClosingBufferWriteProcessor().size();
++// }
++// if (size > 5) {
++// LOGGER.info("Current closing processor number is {}", size);
++// }
++//// for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
++//// fileNodeProcessor.checkAllClosingProcessors();
++//// }
++// }, 0, 30000, TimeUnit.MILLISECONDS);
++//
++// }
++//
++// public static FileNodeManager getInstance() {
++// return FileNodeManagerHolder.INSTANCE;
++// }
++//
++// private void updateStatHashMapWhenFail(TSRecord tsRecord) {
++// statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_REQ_FAIL.name())
++// .incrementAndGet();
++// statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS_FAIL.name())
++// .addAndGet(tsRecord.dataPointList.size());
++// }
++//
++// /**
++// * get stats parameter hash map.
++// *
++// * @return the key represents the params' name, values is AtomicLong type
++// */
++// @Override
++// public Map<String, AtomicLong> getStatParamsHashMap() {
++// return statParamsHashMap;
++// }
++//
++// @Override
++// public List<String> getAllPathForStatistic() {
++// List<String> list = new ArrayList<>();
++// for (MonitorConstants.FileNodeManagerStatConstants statConstant :
++// MonitorConstants.FileNodeManagerStatConstants.values()) {
++// list.add(MonitorConstants.STAT_STORAGE_DELTA_NAME + MonitorConstants.MONITOR_PATH_SEPARATOR
++// + statConstant.name());
++// }
++// return list;
++// }
++//
++// @Override
++// public Map<String, TSRecord> getAllStatisticsValue() {
++// long curTime = System.currentTimeMillis();
++// TSRecord tsRecord = StatMonitor
++// .convertToTSRecord(getStatParamsHashMap(), MonitorConstants.STAT_STORAGE_DELTA_NAME,
++// curTime);
++// HashMap<String, TSRecord> ret = new HashMap<>();
++// ret.put(MonitorConstants.STAT_STORAGE_DELTA_NAME, tsRecord);
++// return ret;
++// }
++//
++// /**
++// * Init Stat MetaDta.
++// */
++// @Override
++// public void registerStatMetadata() {
++// Map<String, String> hashMap = new HashMap<>();
++// for (MonitorConstants.FileNodeManagerStatConstants statConstant :
++// MonitorConstants.FileNodeManagerStatConstants.values()) {
++// hashMap
++// .put(MonitorConstants.STAT_STORAGE_DELTA_NAME + MonitorConstants.MONITOR_PATH_SEPARATOR
++// + statConstant.name(), MonitorConstants.DATA_TYPE_INT64);
++// }
++// StatMonitor.getInstance().registerStatStorageGroup(hashMap);
++// }
++//
++// /**
++// * This function is just for unit test.
++// */
++// public synchronized void resetFileNodeManager() {
++// for (String key : statParamsHashMap.keySet()) {
++// statParamsHashMap.put(key, new AtomicLong());
++// }
++// processorMap.clear();
++// }
++//
++// /**
++// * @param filenodeName storage name, e.g., root.a.b
++// */
++// private FileNodeProcessor constructNewProcessor(String filenodeName)
++// throws FileNodeManagerException {
++// try {
++// return new FileNodeProcessor(baseDir, filenodeName);
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error("Can't construct the FileNodeProcessor, the filenode is {}", filenodeName, e);
++// throw new FileNodeManagerException(e);
++// }
++// }
++//
++// private FileNodeProcessor getProcessor(String path, boolean isWriteLock)
++// throws FileNodeManagerException {
++// String filenodeName;
++// try {
++// // return the stroage name
++// filenodeName = MManager.getInstance().getFileNameByPath(path);
++// } catch (PathErrorException e) {
++// LOGGER.error("MManager get filenode name error, seriesPath is {}", path);
++// throw new FileNodeManagerException(e);
++// }
++// FileNodeProcessor processor;
++// processor = processorMap.get(filenodeName);
++// if (processor != null) {
++// processor.lock(isWriteLock);
++// } else {
++// filenodeName = filenodeName.intern();
++// // calculate the value with same key synchronously
++// synchronized (filenodeName) {
++// processor = processorMap.get(filenodeName);
++// if (processor != null) {
++// processor.lock(isWriteLock);
++// } else {
++// // calculate the value with the key monitor
++// LOGGER.debug("construct a processor instance, the filenode is {}, Thread is {}",
++// filenodeName, Thread.currentThread().getId());
++// processor = constructNewProcessor(filenodeName);
++// processor.lock(isWriteLock);
++// processorMap.put(filenodeName, processor);
++// }
++// }
++// }
++// return processor;
++// }
++//
++// /**
++// * recovery the filenode processor.
++// */
++// public void recovery() {
++// List<String> filenodeNames = null;
++// try {
++// filenodeNames = MManager.getInstance().getAllFileNames();
++// } catch (PathErrorException e) {
++// LOGGER.error("Restoring all FileNodes failed.", e);
++// return;
++// }
++// for (String filenodeName : filenodeNames) {
++// FileNodeProcessor fileNodeProcessor = null;
++// try {
++// // recover in initialization
++// fileNodeProcessor = getProcessor(filenodeName, true);
++// } catch (FileNodeManagerException e) {
++// LOGGER.error("Restoring fileNode {} failed.", filenodeName, e);
++// } finally {
++// if (fileNodeProcessor != null) {
++// fileNodeProcessor.writeUnlock();
++// }
++// }
++// // add index check sum
++// }
++// }
++//
++// /**
++// * insert TsRecord into storage group.
++// *
++// * @param tsRecord input Data
++// * @param isMonitor if true, the insertion is done by StatMonitor and the statistic Info will not
++// * be recorded. if false, the statParamsHashMap will be updated.
++// * @return an int value represents the insert type, 0: failed; 1: overflow; 2: bufferwrite
++// */
++// public int insert(TSRecord tsRecord, boolean isMonitor) throws FileNodeManagerException {
++// long timestamp = tsRecord.time;
++//
++// String deviceId = tsRecord.deviceId;
++// checkTimestamp(tsRecord);
++//// //if memory is dangerous, directly reject
++//// long memUsage = MemUtils.getRecordSize(tsRecord);
++//// BasicMemController.UsageLevel level = BasicMemController.getInstance()
++//// .acquireUsage(this, memUsage);
++//// if (level == UsageLevel.DANGEROUS) {
++//// return 0;
++//// }
++//
++// updateStat(isMonitor, tsRecord);
++//
++// FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++// int insertType;
++//
++// try {
++// long lastUpdateTime = fileNodeProcessor.getFlushLastUpdateTime(deviceId);
++// if (timestamp < lastUpdateTime) {
++//
++// long startOverflow = System.currentTimeMillis();
++//
++// insertOverflow(fileNodeProcessor, timestamp, tsRecord, isMonitor, deviceId);
++//
++// startOverflow = System.currentTimeMillis() - startOverflow;
++// if (startOverflow > 1000) {
++// LOGGER.info("has overflow data, insert cost: {}", startOverflow);
++// }
++//
++// insertType = 1;
++// } else {
++// insertBufferWrite(fileNodeProcessor, timestamp, isMonitor, tsRecord, deviceId);
++// insertType = 2;
++// }
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error(String.format("Encounter an error when closing the buffer insert processor %s.",
++// fileNodeProcessor.getProcessorName()), e);
++// throw new FileNodeManagerException(e);
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// // Modify the insert
++// if (!isMonitor) {
++// fileNodeProcessor.getStatParamsHashMap()
++// .get(MonitorConstants.FileNodeProcessorStatConstants.TOTAL_POINTS_SUCCESS.name())
++// .addAndGet(tsRecord.dataPointList.size());
++// fileNodeProcessor.getStatParamsHashMap()
++// .get(MonitorConstants.FileNodeProcessorStatConstants.TOTAL_REQ_SUCCESS.name())
++// .incrementAndGet();
++// statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_REQ_SUCCESS.name())
++// .incrementAndGet();
++// statParamsHashMap
++// .get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS_SUCCESS.name())
++// .addAndGet(tsRecord.dataPointList.size());
++// }
++// return insertType;
++// }
++//
++// private void writeLog(TSRecord tsRecord, boolean isMonitor, WriteLogNode logNode)
++// throws FileNodeManagerException {
++// try {
++// if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
++// String[] measurementList = new String[tsRecord.dataPointList.size()];
++// String[] insertValues = new String[tsRecord.dataPointList.size()];
++// int i=0;
++// for (DataPoint dp : tsRecord.dataPointList) {
++// measurementList[i] = dp.getMeasurementId();
++// insertValues[i] = dp.getValue().toString();
++// i++;
++// }
++// logNode.write(new InsertPlan(2, tsRecord.deviceId, tsRecord.time, measurementList,
++// insertValues));
++// }
++// } catch (IOException e) {
++// if (!isMonitor) {
++// updateStatHashMapWhenFail(tsRecord);
++// }
++// throw new FileNodeManagerException(e);
++// }
++// }
++//
++// private void checkTimestamp(TSRecord tsRecord) throws FileNodeManagerException {
++// if (tsRecord.time < 0) {
++// LOGGER.error("The insert time lt 0, {}.", tsRecord);
++// throw new FileNodeManagerException("The insert time lt 0, the tsrecord is " + tsRecord);
++// }
++// }
++//
++// private void updateStat(boolean isMonitor, TSRecord tsRecord) {
++// if (!isMonitor) {
++// statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS.name())
++// .addAndGet(tsRecord.dataPointList.size());
++// }
++// }
++//
++// private void insertOverflow(FileNodeProcessor fileNodeProcessor, long timestamp,
++// TSRecord tsRecord, boolean isMonitor, String deviceId)
++// throws FileNodeManagerException {
++// // get overflow processor
++// OverflowProcessor overflowProcessor;
++// String filenodeName = fileNodeProcessor.getProcessorName();
++// try {
++// overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
++// } catch (ProcessorException e) {
++// LOGGER.error("Get the overflow processor failed, the filenode is {}, insert time is {}",
++// filenodeName, timestamp);
++// if (!isMonitor) {
++// updateStatHashMapWhenFail(tsRecord);
++// }
++// throw new FileNodeManagerException(e);
++// }
++// // insert wal
++// try {
++// writeLog(tsRecord, isMonitor, overflowProcessor.getLogNode());
++// } catch (IOException e) {
++// throw new FileNodeManagerException(e);
++// }
++// // insert overflow data
++// try {
++// overflowProcessor.insert(tsRecord);
++// fileNodeProcessor.changeTypeToChanged(deviceId, timestamp);
++// fileNodeProcessor.setOverflowed(true);
++// } catch (IOException e) {
++// LOGGER.error("Insert into overflow error, the reason is {}", e);
++// if (!isMonitor) {
++// updateStatHashMapWhenFail(tsRecord);
++// }
++// throw new FileNodeManagerException(e);
++// }
++// }
++//
++// private void insertBufferWrite(FileNodeProcessor fileNodeProcessor, long timestamp,
++// boolean isMonitor, TSRecord tsRecord, String deviceId)
++// throws FileNodeManagerException, FileNodeProcessorException {
++//
++// long start1 = System.currentTimeMillis();
++// // get bufferwrite processor
++// BufferWriteProcessor bufferWriteProcessor;
++// String filenodeName = fileNodeProcessor.getProcessorName();
++// try {
++// bufferWriteProcessor = fileNodeProcessor.getBufferWriteProcessor(filenodeName, timestamp);
++//
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error("Get the bufferwrite processor failed, the filenode is {}, insert time is {}",
++// filenodeName, timestamp);
++// if (!isMonitor) {
++// updateStatHashMapWhenFail(tsRecord);
++// }
++// throw new FileNodeManagerException(e);
++// } finally {
++// long start1_1 = System.currentTimeMillis() - start1;
++// if (start1_1 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step-1-1, cost: {}", start1_1);
++// }
++// }
++//
++// long start1_2 = System.currentTimeMillis();
++// // Add a new interval file to newfilelist
++// if (bufferWriteProcessor.isNewProcessor()) {
++// bufferWriteProcessor.setNewProcessor(false);
++// String bufferwriteBaseDir = bufferWriteProcessor.getBaseDir();
++// String bufferwriteRelativePath = bufferWriteProcessor.getFileRelativePath();
++// try {
++// bufferWriteProcessor.setCurrentTsFileResource(new TsFileResource(new File(new File(bufferwriteBaseDir), bufferwriteRelativePath), false));
++// fileNodeProcessor.addIntervalFileNode(bufferWriteProcessor.getCurrentTsFileResource());
++// } catch (Exception e) {
++// if (!isMonitor) {
++// updateStatHashMapWhenFail(tsRecord);
++// }
++// throw new FileNodeManagerException(e);
++// }
++// }
++// start1_2 = System.currentTimeMillis() - start1_2;
++// if (start1_2 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step-1-2, cost: {}", start1_2);
++// }
++//
++// start1 = System.currentTimeMillis() - start1;
++// if (start1 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step-1, cost: {}", start1);
++// }
++//
++// long start2 = System.currentTimeMillis();
++//
++// long start2_1 = start2;
++// // insert wal
++// try {
++// writeLog(tsRecord, isMonitor, bufferWriteProcessor.getLogNode());
++// } catch (IOException e) {
++// throw new FileNodeManagerException(e);
++// }
++// start2_1 = System.currentTimeMillis() - start2_1;
++// if (start2_1 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step2-1 cost: {}", start2_1);
++// }
++//
++// long start2_2 = System.currentTimeMillis();
++// // Write data
++// long prevStartTime = fileNodeProcessor.getIntervalFileNodeStartTime(deviceId);
++// long prevUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
++//
++// fileNodeProcessor.setIntervalFileNodeStartTime(deviceId);
++// fileNodeProcessor.setLastUpdateTime(deviceId, timestamp);
++//
++// start2_2 = System.currentTimeMillis() - start2_2;
++// if (start2_2 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step2-2 cost: {}", start2_2);
++// }
++// try {
++// long start2_3 = System.currentTimeMillis();
++//
++// // insert tsrecord and check flushMetadata
++// if (!bufferWriteProcessor.write(tsRecord)) {
++// start2_3 = System.currentTimeMillis() - start2_3;
++// if (start2_3 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step2-3 cost: {}", start2_3);
++// }
++//
++// long start2_4 = System.currentTimeMillis();
++// // undo time update
++// fileNodeProcessor.setIntervalFileNodeStartTime(deviceId, prevStartTime);
++// fileNodeProcessor.setLastUpdateTime(deviceId, prevUpdateTime);
++// start2_4 = System.currentTimeMillis() - start2_4;
++// if (start2_4 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step2-4 cost: {}", start2_4);
++// }
++// }
++// } catch (BufferWriteProcessorException e) {
++// if (!isMonitor) {
++// updateStatHashMapWhenFail(tsRecord);
++// }
++// throw new FileNodeManagerException(e);
++// }
++// start2 = System.currentTimeMillis() - start2;
++// if (start2 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step-2, cost: {}", start2);
++// }
++//
++// long start3 = System.currentTimeMillis();
++//
++// // check if the file should be closed
++// if (bufferWriteProcessor
++// .getFileSize() > IoTDBDescriptor.getInstance()
++// .getConfig().getBufferwriteFileSizeThreshold()) {
++// if (LOGGER.isInfoEnabled()) {
++// LOGGER.info(
++// "The filenode processor {} will setCloseMark the bufferwrite processor, "
++// + "because the size[{}] of tsfile {} reaches the threshold {}",
++// filenodeName, MemUtils.bytesCntToStr(bufferWriteProcessor.getFileSize()),
++// bufferWriteProcessor.getInsertFilePath(), MemUtils.bytesCntToStr(
++// IoTDBDescriptor.getInstance().getConfig().getBufferwriteFileSizeThreshold()));
++// }
++//
++// fileNodeProcessor.closeBufferWrite();
++// start3 = System.currentTimeMillis() - start3;
++// if (start3 > 1000) {
++// LOGGER.info("FileNodeManagerV2.insertBufferWrite step-3, setCloseMark buffer insert cost: {}", start3);
++// }
++// }
++// }
++//
++// /**
++// * update data.
++// */
++// public void update(String deviceId, String measurementId, long startTime, long endTime,
++// TSDataType type, String v)
++// throws FileNodeManagerException {
++//
++// FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++// try {
++//
++// long lastUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
++// if (startTime > lastUpdateTime) {
++// LOGGER.warn("The update range is error, startTime {} is great than lastUpdateTime {}",
++// startTime,
++// lastUpdateTime);
++// return;
++// }
++// long finalEndTime = endTime > lastUpdateTime ? lastUpdateTime : endTime;
++//
++// String filenodeName = fileNodeProcessor.getProcessorName();
++// // get overflow processor
++// OverflowProcessor overflowProcessor;
++// try {
++// overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
++// } catch (ProcessorException e) {
++// LOGGER.error(
++// "Get the overflow processor failed, the filenode is {}, "
++// + "insert time range is from {} to {}",
++// filenodeName, startTime, finalEndTime);
++// throw new FileNodeManagerException(e);
++// }
++// overflowProcessor.update(deviceId, measurementId, startTime, finalEndTime, type, v);
++// // change the type of tsfile to overflowed
++// fileNodeProcessor.changeTypeToChanged(deviceId, startTime, finalEndTime);
++// fileNodeProcessor.setOverflowed(true);
++//
++// // insert wal
++// try {
++// if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
++// overflowProcessor.getLogNode().write(
++// new UpdatePlan(startTime, finalEndTime, v, new Path(deviceId
++// + "." + measurementId)));
++// }
++// } catch (IOException e) {
++// throw new FileNodeManagerException(e);
++// }
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// }
++//
++// /**
++// * delete data.
++// */
++// public void delete(String deviceId, String measurementId, long timestamp)
++// throws FileNodeManagerException {
++//
++// FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++// try {
++// long lastUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
++// // no tsfile data, the delete operation is invalid
++// if (lastUpdateTime == -1) {
++// LOGGER.warn("The last update time is -1, delete overflow is invalid, "
++// + "the filenode processor is {}",
++// fileNodeProcessor.getProcessorName());
++// } else {
++// // insert wal
++// if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
++// // get processors for wal
++// String filenodeName = fileNodeProcessor.getProcessorName();
++// OverflowProcessor overflowProcessor;
++// BufferWriteProcessor bufferWriteProcessor;
++// try {
++// overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
++// // in case that no BufferWriteProcessor is available, a new BufferWriteProcessor is
++// // needed to access LogNode.
++// // TODO this may make the time range of the next TsFile a little wider
++// bufferWriteProcessor = fileNodeProcessor.getBufferWriteProcessor(filenodeName,
++// lastUpdateTime + 1);
++// } catch (ProcessorException e) {
++// LOGGER.error("Getting the processor failed, the filenode is {}, delete time is {}.",
++// filenodeName, timestamp);
++// throw new FileNodeManagerException(e);
++// }
++// try {
++// overflowProcessor.getLogNode().write(new DeletePlan(timestamp,
++// new Path(deviceId + "." + measurementId)));
++// bufferWriteProcessor.getLogNode().write(new DeletePlan(timestamp,
++// new Path(deviceId + "." + measurementId)));
++// } catch (IOException e) {
++// throw new FileNodeManagerException(e);
++// }
++// }
++//
++// try {
++// fileNodeProcessor.delete(deviceId, measurementId, timestamp);
++// } catch (IOException e) {
++// throw new FileNodeManagerException(e);
++// }
++// // change the type of tsfile to overflowed
++// fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
++// fileNodeProcessor.setOverflowed(true);
++//
++// }
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// }
++//
++// private void delete(String processorName,
++// Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator)
++// throws FileNodeManagerException {
++// if (!processorMap.containsKey(processorName)) {
++// //TODO do we need to call processorIterator.remove() ?
++// LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
++// return;
++// }
++// LOGGER.info("Try to delete the filenode processor {}.", processorName);
++// FileNodeProcessor processor = processorMap.get(processorName);
++// if (!processor.tryWriteLock()) {
++// throw new FileNodeManagerException(String
++// .format("Can't delete the filenode processor %s because Can't get the insert lock.",
++// processorName));
++// }
++//
++// try {
++// if (!processor.canBeClosed()) {
++// LOGGER.warn("The filenode processor {} can't be deleted.", processorName);
++// return;
++// }
++//
++// try {
++// LOGGER.info("Delete the filenode processor {}.", processorName);
++// processor.delete();
++// processorIterator.remove();
++// } catch (ProcessorException e) {
++// LOGGER.error("Delete the filenode processor {} by iterator error.", processorName, e);
++// throw new FileNodeManagerException(e);
++// }
++// } finally {
++// processor.writeUnlock();
++// }
++// }
++//
++// /**
++// * Similar to delete(), but only deletes data in BufferWrite. Only used by WAL recovery.
++// */
++// public void deleteBufferWrite(String deviceId, String measurementId, long timestamp)
++// throws FileNodeManagerException {
++// FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++// try {
++// fileNodeProcessor.deleteBufferWrite(deviceId, measurementId, timestamp);
++// } catch (BufferWriteProcessorException | IOException e) {
++// throw new FileNodeManagerException(e);
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// // change the type of tsfile to overflowed
++// fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
++// fileNodeProcessor.setOverflowed(true);
++// }
++//
++// /**
++// * Similar to delete(), but only deletes data in Overflow. Only used by WAL recovery.
++// */
++// public void deleteOverflow(String deviceId, String measurementId, long timestamp)
++// throws FileNodeManagerException {
++// FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++// try {
++// fileNodeProcessor.deleteOverflow(deviceId, measurementId, timestamp);
++// } catch (ProcessorException e) {
++// throw new FileNodeManagerException(e);
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// // change the type of tsfile to overflowed
++// fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
++// fileNodeProcessor.setOverflowed(true);
++// }
++//
++// /**
++// * begin query.
++// *
++// * @param deviceId queried deviceId
++// * @return a query token for the device.
++// */
++// public int beginQuery(String deviceId) throws FileNodeManagerException {
++// FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++// try {
++// LOGGER.debug("Get the FileNodeProcessor: filenode is {}, begin query.",
++// fileNodeProcessor.getProcessorName());
++// return fileNodeProcessor.addMultiPassCount();
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// }
++//
++// /**
++// * query data.
++// */
++// public QueryDataSource query(SingleSeriesExpression seriesExpression, QueryContext context)
++// throws FileNodeManagerException {
++// String deviceId = seriesExpression.getSeriesPath().getDevice();
++// String measurementId = seriesExpression.getSeriesPath().getMeasurement();
++// FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, false);
++// LOGGER.debug("Get the FileNodeProcessor: filenode is {}, query.",
++// fileNodeProcessor.getProcessorName());
++// try {
++// QueryDataSource queryDataSource;
++// // query operation must have overflow processor
++// if (!fileNodeProcessor.hasOverflowProcessor()) {
++// try {
++// fileNodeProcessor.getOverflowProcessor(fileNodeProcessor.getProcessorName());
++// } catch (ProcessorException e) {
++// LOGGER.error("Get the overflow processor failed, the filenode is {}, query is {},{}",
++// fileNodeProcessor.getProcessorName(), deviceId, measurementId);
++// throw new FileNodeManagerException(e);
++// }
++// }
++// try {
++// queryDataSource = fileNodeProcessor.query(deviceId, measurementId, context);
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error("Query error: the deviceId {}, the measurementId {}", deviceId, measurementId,
++// e);
++// throw new FileNodeManagerException(e);
++// }
++// // return query structure
++// return queryDataSource;
++// } finally {
++// fileNodeProcessor.readUnlock();
++// }
++// }
++//
++// /**
++// * end query.
++// */
++// public void endQuery(String deviceId, int token) throws FileNodeManagerException {
++//
++// FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
++// try {
++// LOGGER.debug("Get the FileNodeProcessor: {} end query.",
++// fileNodeProcessor.getProcessorName());
++// fileNodeProcessor.decreaseMultiPassCount(token);
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error("Failed to end query: the deviceId {}, token {}.", deviceId, token, e);
++// throw new FileNodeManagerException(e);
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// }
++//
++// /**
++// * Append one specified tsfile to the storage group. <b>This method is only provided for
++// * transmission module</b>
++// *
++// * @param fileNodeName the seriesPath of storage group
++// * @param appendFile the appended tsfile information
++// */
++// public boolean appendFileToFileNode(String fileNodeName, TsFileResource appendFile,
++// String appendFilePath) throws FileNodeManagerException {
++// FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
++// try {
++// // check append file
++// for (Map.Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
++// if (fileNodeProcessor.getLastUpdateTime(entry.getKey()) >= entry.getValue()) {
++// return false;
++// }
++// }
++// // setCloseMark bufferwrite file
++// fileNodeProcessor.closeBufferWrite();
++// // append file to storage group.
++// fileNodeProcessor.appendFile(appendFile, appendFilePath);
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error("Cannot append the file {} to {}", appendFile.getFile().getAbsolutePath(), fileNodeName, e);
++// throw new FileNodeManagerException(e);
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// return true;
++// }
++//
++// /**
++// * get all overlap tsfiles which are conflict with the appendFile.
++// *
++// * @param fileNodeName the seriesPath of storage group
++// * @param appendFile the appended tsfile information
++// */
++// public List<String> getOverlapFilesFromFileNode(String fileNodeName, TsFileResource appendFile,
++// String uuid) throws FileNodeManagerException {
++// FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
++// List<String> overlapFiles;
++// try {
++// overlapFiles = fileNodeProcessor.getOverlapFiles(appendFile, uuid);
++// } catch (FileNodeProcessorException e) {
++// throw new FileNodeManagerException(e);
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// return overlapFiles;
++// }
++//
++// /**
++// * merge all overflowed filenode.
++// *
++// * @throws FileNodeManagerException FileNodeManagerException
++// */
++// public void mergeAll() throws FileNodeManagerException {
++// if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++// LOGGER.warn("Failed to merge all overflowed filenode, because filenode manager status is {}",
++// fileNodeManagerStatus);
++// return;
++// }
++//
++// fileNodeManagerStatus = FileNodeManagerStatus.MERGE;
++// LOGGER.info("Start to merge all overflowed filenode");
++// List<String> allFileNodeNames;
++// try {
++// allFileNodeNames = MManager.getInstance().getAllFileNames();
++// } catch (PathErrorException e) {
++// LOGGER.error("Get all storage group seriesPath error,", e);
++// throw new FileNodeManagerException(e);
++// }
++// List<Future<?>> futureTasks = new ArrayList<>();
++// for (String fileNodeName : allFileNodeNames) {
++// FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
++// try {
++// Future<?> task = fileNodeProcessor.submitToMerge();
++// if (task != null) {
++// LOGGER.info("Submit the filenode {} to the merge pool", fileNodeName);
++// futureTasks.add(task);
++// }
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// }
++// long totalTime = 0;
++// // loop waiting for merge to end, the longest waiting time is
++// // 60s.
++// int time = 2;
++// List<Exception> mergeException = new ArrayList<>();
++// for (Future<?> task : futureTasks) {
++// while (!task.isDone()) {
++// try {
++// LOGGER.info(
++// "Waiting for the end of merge, already waiting for {}s, "
++// + "continue to wait anothor {}s",
++// totalTime, time);
++// TimeUnit.SECONDS.sleep(time);
++// totalTime += time;
++// time = updateWaitTime(time);
++// } catch (InterruptedException e) {
++// LOGGER.error("Unexpected interruption {}", e);
++// Thread.currentThread().interrupt();
++// }
++// }
++// try {
++// task.get();
++// } catch (InterruptedException e) {
++// LOGGER.error("Unexpected interruption {}", e);
++// } catch (ExecutionException e) {
++// mergeException.add(e);
++// LOGGER.error("The exception for merge: {}", e);
++// }
++// }
++// if (!mergeException.isEmpty()) {
++// // just throw the first exception
++// throw new FileNodeManagerException(mergeException.get(0));
++// }
++// fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++// LOGGER.info("End to merge all overflowed filenode");
++// }
++//
++// private int updateWaitTime(int time) {
++// return time < 32 ? time * 2 : 60;
++// }
++//
++// /**
++// * delete one filenode.
++// */
++// public void deleteOneFileNode(String processorName) throws FileNodeManagerException {
++// if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++// return;
++// }
++//
++// fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
++// try {
++// if (processorMap.containsKey(processorName)) {
++// deleteFileNodeBlocked(processorName);
++// }
++// String fileNodePath = TsFileDBConf.getFileNodeDir();
++// fileNodePath = standardizeDir(fileNodePath) + processorName;
++// FileUtils.deleteDirectory(new File(fileNodePath));
++//
++// cleanBufferWrite(processorName);
++//
++// MultiFileLogNodeManager.getInstance()
++// .deleteNode(processorName + IoTDBConstant.BUFFERWRITE_LOG_NODE_SUFFIX);
++// MultiFileLogNodeManager.getInstance()
++// .deleteNode(processorName + IoTDBConstant.OVERFLOW_LOG_NODE_SUFFIX);
++// } catch (IOException e) {
++// LOGGER.error("Delete the filenode processor {} error.", processorName, e);
++// throw new FileNodeManagerException(e);
++// } finally {
++// fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++// }
++// }
++//
++// private void cleanBufferWrite(String processorName) throws IOException {
++// List<String> bufferwritePathList = DIRECTORY_MANAGER.getAllTsFileFolders();
++// for (String bufferwritePath : bufferwritePathList) {
++// bufferwritePath = standardizeDir(bufferwritePath) + processorName;
++// File bufferDir = new File(bufferwritePath);
++// // free and setCloseMark the streams under this bufferwrite directory
++// if (!bufferDir.exists()) {
++// continue;
++// }
++// File[] bufferFiles = bufferDir.listFiles();
++// if (bufferFiles != null) {
++// for (File bufferFile : bufferFiles) {
++// FileReaderManager.getInstance().closeFileAndRemoveReader(bufferFile.getPath());
++// }
++// }
++// FileUtils.deleteDirectory(new File(bufferwritePath));
++// }
++// }
++//
++// private void deleteFileNodeBlocked(String processorName) throws FileNodeManagerException {
++// LOGGER.info("Forced to delete the filenode processor {}", processorName);
++// FileNodeProcessor processor = processorMap.get(processorName);
++// while (true) {
++// if (processor.tryWriteLock()) {
++// try {
++// if (processor.canBeClosed()) {
++// LOGGER.info("Delete the filenode processor {}.", processorName);
++// processor.delete();
++// processorMap.remove(processorName);
++// break;
++// } else {
++// LOGGER.info(
++// "Can't delete the filenode processor {}, "
++// + "because the filenode processor can't be closed."
++// + " Wait 100ms to retry");
++// }
++// } catch (ProcessorException e) {
++// LOGGER.error("Delete the filenode processor {} error.", processorName, e);
++// throw new FileNodeManagerException(e);
++// } finally {
++// processor.writeUnlock();
++// }
++// } else {
++// LOGGER.info(
++// "Can't delete the filenode processor {}, because it can't get the insert lock."
++// + " Wait 100ms to retry", processorName);
++// }
++// try {
++// TimeUnit.MILLISECONDS.sleep(100);
++// } catch (InterruptedException e) {
++// LOGGER.error(e.getMessage());
++// Thread.currentThread().interrupt();
++// }
++// }
++// }
++//
++// private String standardizeDir(String originalPath) {
++// String res = originalPath;
++// if ((originalPath.length() > 0
++// && originalPath.charAt(originalPath.length() - 1) != File.separatorChar)
++// || originalPath.length() == 0) {
++// res = originalPath + File.separatorChar;
++// }
++// return res;
++// }
++//
++// /**
++// * add time series.
++// */
++// public void addTimeSeries(Path path, TSDataType dataType, TSEncoding encoding,
++// CompressionType compressor,
++// Map<String, String> props) throws FileNodeManagerException {
++// FileNodeProcessor fileNodeProcessor = getProcessor(path.getFullPath(), true);
++// try {
++// fileNodeProcessor.addTimeSeries(path.getMeasurement(), dataType, encoding, compressor, props);
++// } finally {
++// fileNodeProcessor.writeUnlock();
++// }
++// }
++//
++//
++// /**
++// * Force to setCloseMark the filenode processor.
++// */
++// public void closeOneFileNode(String processorName) throws FileNodeManagerException {
++// if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++// return;
++// }
++//
++// fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
++// try {
++// LOGGER.info("Force to setCloseMark the filenode processor {}.", processorName);
++// while (!closeOneProcessor(processorName)) {
++// try {
++// LOGGER.info("Can't force to setCloseMark the filenode processor {}, wait 100ms to retry",
++// processorName);
++// TimeUnit.MILLISECONDS.sleep(100);
++// } catch (InterruptedException e) {
++// // ignore the interrupted exception
++// LOGGER.error("Unexpected interruption {}", e);
++// Thread.currentThread().interrupt();
++// }
++// }
++// } finally {
++// fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++// }
++// }
++//
++//
++// /**
++// * try to setCloseMark the filenode processor. The name of filenode processor is processorName
++// * notice: this method has the same function with close()
++// */
++// private boolean closeOneProcessor(String processorName) throws FileNodeManagerException {
++// if (!processorMap.containsKey(processorName)) {
++// return true;
++// }
++//
++// Processor processor = processorMap.get(processorName);
++// if (processor.tryWriteLock()) {
++// try {
++// if (processor.canBeClosed()) {
++// processor.close();
++// return true;
++// } else {
++// return false;
++// }
++// } catch (ProcessorException e) {
++// LOGGER.error("Close the filenode processor {} error.", processorName, e);
++// throw new FileNodeManagerException(e);
++// } finally {
++// processor.writeUnlock();
++// }
++// } else {
++// return false;
++// }
++// }
++//
++// /**
++// * try to setCloseMark the filenode processor.
++// * notice: This method has the same function with closeOneProcessor()
++// */
++// private void close(String processorName) throws FileNodeManagerException {
++// if (!processorMap.containsKey(processorName)) {
++// LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
++// return;
++// }
++// LOGGER.info("Try to setCloseMark the filenode processor {}.", processorName);
++// FileNodeProcessor processor = processorMap.get(processorName);
++// if (!processor.tryWriteLock()) {
++// LOGGER.warn("Can't get the insert lock of the filenode processor {}.", processorName);
++// return;
++// }
++// try {
++// if (processor.canBeClosed()) {
++// try {
++// LOGGER.info("Close the filenode processor {}.", processorName);
++// processor.close();
++// } catch (ProcessorException e) {
++// LOGGER.error("Close the filenode processor {} error.", processorName, e);
++// throw new FileNodeManagerException(e);
++// }
++// } else {
++// LOGGER.warn("The filenode processor {} can't be closed.", processorName);
++// }
++// } finally {
++// processor.writeUnlock();
++// }
++// }
++//
++// /**
++// * delete all filenode.
++// */
++// public synchronized boolean deleteAll() throws FileNodeManagerException {
++// LOGGER.info("Start deleting all filenode");
++// if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++// LOGGER.info("Failed to delete all filenode processor because of merge operation");
++// return false;
++// }
++//
++// fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
++// try {
++// Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator = processorMap.entrySet()
++// .iterator();
++// while (processorIterator.hasNext()) {
++// Map.Entry<String, FileNodeProcessor> processorEntry = processorIterator.next();
++// delete(processorEntry.getKey(), processorIterator);
++// }
++// return processorMap.isEmpty();
++// } finally {
++// LOGGER.info("Deleting all FileNodeProcessors ends");
++// fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++// }
++// }
++//
++// /**
++// * Try to setCloseMark All.
++// */
++// public void closeAll() throws FileNodeManagerException {
++// LOGGER.info("Start closing all filenode processor");
++// if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
++// LOGGER.info("Failed to setCloseMark all filenode processor because of merge operation");
++// return;
++// }
++// fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
++// try {
++// for (Map.Entry<String, FileNodeProcessor> processorEntry : processorMap.entrySet()) {
++// close(processorEntry.getKey());
++// }
++// } finally {
++// LOGGER.info("Close all FileNodeProcessors ends");
++// fileNodeManagerStatus = FileNodeManagerStatus.NONE;
++// }
++// }
++//
++// /**
++// * force flushMetadata to control memory usage.
++// */
++// public void forceFlush(BasicMemController.UsageLevel level) {
++// // you may add some delicate process like below
++// // or you could provide multiple methods for different urgency
++// switch (level) {
++// // only select the most urgent (most active or biggest in size)
++// // processors to flushMetadata
++// // only select top 10% active memory user to flushMetadata
++// case WARNING:
++// try {
++// flushTop(0.1f);
++// } catch (IOException e) {
++// LOGGER.error("force flushMetadata memory data error: {}", e);
++// }
++// break;
++// // force all processors to flushMetadata
++// case DANGEROUS:
++// try {
++// flushAll();
++// } catch (IOException e) {
++// LOGGER.error("force flushMetadata memory data error: {}", e);
++// }
++// break;
++// // if the flushMetadata thread pool is not full ( or half full), start a new
++// // flushMetadata task
++// case SAFE:
++// if (FlushPoolManager.getInstance().getActiveCnt() < 0.5 * FlushPoolManager.getInstance()
++// .getThreadCnt()) {
++// try {
++// flushTop(0.01f);
++// } catch (IOException e) {
++// LOGGER.error("force flushMetadata memory data error: ", e);
++// }
++// }
++// break;
++// default:
++// }
++// }
++//
++// private void flushAll() throws IOException {
++// for (FileNodeProcessor processor : processorMap.values()) {
++// if (!processor.tryLock(true)) {
++// continue;
++// }
++// try {
++// boolean isMerge = processor.flush().isHasOverflowFlushTask();
++// if (isMerge) {
++// processor.submitToMerge();
++// }
++// } finally {
++// processor.unlock(true);
++// }
++// }
++// }
++//
++// private void flushTop(float percentage) throws IOException {
++// List<FileNodeProcessor> tempProcessors = new ArrayList<>(processorMap.values());
++// // sort the tempProcessors as descending order
++// tempProcessors.sort((o1, o2) -> (int) (o2.memoryUsage() - o1.memoryUsage()));
++// int flushNum =
++// (int) (tempProcessors.size() * percentage) > 1
++// ? (int) (tempProcessors.size() * percentage)
++// : 1;
++// for (int i = 0; i < flushNum && i < tempProcessors.size(); i++) {
++// FileNodeProcessor processor = tempProcessors.get(i);
++// // 64M
++// if (processor.memoryUsage() <= TSFileConfig.groupSizeInByte / 2) {
++// continue;
++// }
++// long start = System.currentTimeMillis();
++// processor.writeLock();
++// try {
++// boolean isMerge = processor.flush().isHasOverflowFlushTask();
++// if (isMerge) {
++// processor.submitToMerge();
++// }
++// } finally {
++// processor.writeUnlock();
++// }
++// start = System.currentTimeMillis() - start;
++// LOGGER.info("flushMetadata Top cost: {}", start);
++// }
++// }
++//
++// @Override
++// public void start() {
++// // do no thing
++// }
++//
++// @Override
++// public void stop() {
++// try {
++// closeAll();
++// } catch (FileNodeManagerException e) {
++// LOGGER.error("Failed to setCloseMark file node manager because .", e);
++// }
++//
++// boolean notFinished = true;
++// while (notFinished) {
++// int size = 0;
// for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
--// fileNodeProcessor.checkAllClosingProcessors();
++// size += fileNodeProcessor.getClosingBufferWriteProcessor().size();
// }
-- }, 0, 30000, TimeUnit.MILLISECONDS);
--
-- }
--
-- public static FileNodeManager getInstance() {
-- return FileNodeManagerHolder.INSTANCE;
-- }
--
-- private void updateStatHashMapWhenFail(TSRecord tsRecord) {
-- statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_REQ_FAIL.name())
-- .incrementAndGet();
-- statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS_FAIL.name())
-- .addAndGet(tsRecord.dataPointList.size());
-- }
--
-- /**
-- * get stats parameter hash map.
-- *
-- * @return the key represents the params' name, values is AtomicLong type
-- */
-- @Override
-- public Map<String, AtomicLong> getStatParamsHashMap() {
-- return statParamsHashMap;
-- }
--
-- @Override
-- public List<String> getAllPathForStatistic() {
-- List<String> list = new ArrayList<>();
-- for (MonitorConstants.FileNodeManagerStatConstants statConstant :
-- MonitorConstants.FileNodeManagerStatConstants.values()) {
-- list.add(MonitorConstants.STAT_STORAGE_DELTA_NAME + MonitorConstants.MONITOR_PATH_SEPARATOR
-- + statConstant.name());
-- }
-- return list;
-- }
--
-- @Override
-- public Map<String, TSRecord> getAllStatisticsValue() {
-- long curTime = System.currentTimeMillis();
-- TSRecord tsRecord = StatMonitor
-- .convertToTSRecord(getStatParamsHashMap(), MonitorConstants.STAT_STORAGE_DELTA_NAME,
-- curTime);
-- HashMap<String, TSRecord> ret = new HashMap<>();
-- ret.put(MonitorConstants.STAT_STORAGE_DELTA_NAME, tsRecord);
-- return ret;
-- }
--
-- /**
-- * Init Stat MetaDta.
-- */
-- @Override
-- public void registerStatMetadata() {
-- Map<String, String> hashMap = new HashMap<>();
-- for (MonitorConstants.FileNodeManagerStatConstants statConstant :
-- MonitorConstants.FileNodeManagerStatConstants.values()) {
-- hashMap
-- .put(MonitorConstants.STAT_STORAGE_DELTA_NAME + MonitorConstants.MONITOR_PATH_SEPARATOR
-- + statConstant.name(), MonitorConstants.DATA_TYPE_INT64);
-- }
-- StatMonitor.getInstance().registerStatStorageGroup(hashMap);
-- }
--
-- /**
-- * This function is just for unit test.
-- */
-- public synchronized void resetFileNodeManager() {
-- for (String key : statParamsHashMap.keySet()) {
-- statParamsHashMap.put(key, new AtomicLong());
-- }
-- processorMap.clear();
-- }
--
-- /**
-- * @param filenodeName storage name, e.g., root.a.b
-- */
-- private FileNodeProcessor constructNewProcessor(String filenodeName)
-- throws FileNodeManagerException {
-- try {
-- return new FileNodeProcessor(baseDir, filenodeName);
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error("Can't construct the FileNodeProcessor, the filenode is {}", filenodeName, e);
-- throw new FileNodeManagerException(e);
-- }
-- }
--
-- private FileNodeProcessor getProcessor(String path, boolean isWriteLock)
-- throws FileNodeManagerException {
-- String filenodeName;
-- try {
-- // return the stroage name
-- filenodeName = MManager.getInstance().getFileNameByPath(path);
-- } catch (PathErrorException e) {
-- LOGGER.error("MManager get filenode name error, seriesPath is {}", path);
-- throw new FileNodeManagerException(e);
-- }
-- FileNodeProcessor processor;
-- processor = processorMap.get(filenodeName);
-- if (processor != null) {
-- processor.lock(isWriteLock);
-- } else {
-- filenodeName = filenodeName.intern();
-- // calculate the value with same key synchronously
-- synchronized (filenodeName) {
-- processor = processorMap.get(filenodeName);
-- if (processor != null) {
-- processor.lock(isWriteLock);
-- } else {
-- // calculate the value with the key monitor
-- LOGGER.debug("construct a processor instance, the filenode is {}, Thread is {}",
-- filenodeName, Thread.currentThread().getId());
-- processor = constructNewProcessor(filenodeName);
-- processor.lock(isWriteLock);
-- processorMap.put(filenodeName, processor);
-- }
-- }
-- }
-- return processor;
-- }
--
-- /**
-- * recovery the filenode processor.
-- */
-- public void recovery() {
-- List<String> filenodeNames = null;
-- try {
-- filenodeNames = MManager.getInstance().getAllFileNames();
-- } catch (PathErrorException e) {
-- LOGGER.error("Restoring all FileNodes failed.", e);
-- return;
-- }
-- for (String filenodeName : filenodeNames) {
-- FileNodeProcessor fileNodeProcessor = null;
-- try {
-- // recover in initialization
-- fileNodeProcessor = getProcessor(filenodeName, true);
-- } catch (FileNodeManagerException e) {
-- LOGGER.error("Restoring fileNode {} failed.", filenodeName, e);
-- } finally {
-- if (fileNodeProcessor != null) {
-- fileNodeProcessor.writeUnlock();
-- }
-- }
-- // add index check sum
-- }
-- }
--
-- /**
-- * insert TsRecord into storage group.
-- *
-- * @param tsRecord input Data
-- * @param isMonitor if true, the insertion is done by StatMonitor and the statistic Info will not
-- * be recorded. if false, the statParamsHashMap will be updated.
-- * @return an int value represents the insert type, 0: failed; 1: overflow; 2: bufferwrite
-- */
-- public int insert(TSRecord tsRecord, boolean isMonitor) throws FileNodeManagerException {
-- long timestamp = tsRecord.time;
--
-- String deviceId = tsRecord.deviceId;
-- checkTimestamp(tsRecord);
--// //if memory is dangerous, directly reject
--// long memUsage = MemUtils.getRecordSize(tsRecord);
--// BasicMemController.UsageLevel level = BasicMemController.getInstance()
--// .acquireUsage(this, memUsage);
--// if (level == UsageLevel.DANGEROUS) {
--// return 0;
--// }
--
-- updateStat(isMonitor, tsRecord);
--
-- FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
-- int insertType;
--
-- try {
-- long lastUpdateTime = fileNodeProcessor.getFlushLastUpdateTime(deviceId);
-- if (timestamp < lastUpdateTime) {
--
-- long startOverflow = System.currentTimeMillis();
--
-- insertOverflow(fileNodeProcessor, timestamp, tsRecord, isMonitor, deviceId);
--
-- startOverflow = System.currentTimeMillis() - startOverflow;
-- if (startOverflow > 1000) {
-- LOGGER.info("has overflow data, insert cost: {}", startOverflow);
-- }
--
-- insertType = 1;
-- } else {
-- insertBufferWrite(fileNodeProcessor, timestamp, isMonitor, tsRecord, deviceId);
-- insertType = 2;
-- }
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error(String.format("Encounter an error when closing the buffer insert processor %s.",
-- fileNodeProcessor.getProcessorName()), e);
-- throw new FileNodeManagerException(e);
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- // Modify the insert
-- if (!isMonitor) {
-- fileNodeProcessor.getStatParamsHashMap()
-- .get(MonitorConstants.FileNodeProcessorStatConstants.TOTAL_POINTS_SUCCESS.name())
-- .addAndGet(tsRecord.dataPointList.size());
-- fileNodeProcessor.getStatParamsHashMap()
-- .get(MonitorConstants.FileNodeProcessorStatConstants.TOTAL_REQ_SUCCESS.name())
-- .incrementAndGet();
-- statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_REQ_SUCCESS.name())
-- .incrementAndGet();
-- statParamsHashMap
-- .get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS_SUCCESS.name())
-- .addAndGet(tsRecord.dataPointList.size());
-- }
-- return insertType;
-- }
--
-- private void writeLog(TSRecord tsRecord, boolean isMonitor, WriteLogNode logNode)
-- throws FileNodeManagerException {
-- try {
-- if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
-- String[] measurementList = new String[tsRecord.dataPointList.size()];
-- String[] insertValues = new String[tsRecord.dataPointList.size()];
-- int i=0;
-- for (DataPoint dp : tsRecord.dataPointList) {
-- measurementList[i] = dp.getMeasurementId();
-- insertValues[i] = dp.getValue().toString();
-- i++;
-- }
-- logNode.write(new InsertPlan(2, tsRecord.deviceId, tsRecord.time, measurementList,
-- insertValues));
-- }
-- } catch (IOException e) {
-- if (!isMonitor) {
-- updateStatHashMapWhenFail(tsRecord);
-- }
-- throw new FileNodeManagerException(e);
-- }
-- }
--
-- private void checkTimestamp(TSRecord tsRecord) throws FileNodeManagerException {
-- if (tsRecord.time < 0) {
-- LOGGER.error("The insert time lt 0, {}.", tsRecord);
-- throw new FileNodeManagerException("The insert time lt 0, the tsrecord is " + tsRecord);
-- }
-- }
--
-- private void updateStat(boolean isMonitor, TSRecord tsRecord) {
-- if (!isMonitor) {
-- statParamsHashMap.get(MonitorConstants.FileNodeManagerStatConstants.TOTAL_POINTS.name())
-- .addAndGet(tsRecord.dataPointList.size());
-- }
-- }
--
-- private void insertOverflow(FileNodeProcessor fileNodeProcessor, long timestamp,
-- TSRecord tsRecord, boolean isMonitor, String deviceId)
-- throws FileNodeManagerException {
-- // get overflow processor
-- OverflowProcessor overflowProcessor;
-- String filenodeName = fileNodeProcessor.getProcessorName();
-- try {
-- overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
-- } catch (ProcessorException e) {
-- LOGGER.error("Get the overflow processor failed, the filenode is {}, insert time is {}",
-- filenodeName, timestamp);
-- if (!isMonitor) {
-- updateStatHashMapWhenFail(tsRecord);
-- }
-- throw new FileNodeManagerException(e);
-- }
-- // insert wal
-- try {
-- writeLog(tsRecord, isMonitor, overflowProcessor.getLogNode());
-- } catch (IOException e) {
-- throw new FileNodeManagerException(e);
-- }
-- // insert overflow data
-- try {
-- overflowProcessor.insert(tsRecord);
-- fileNodeProcessor.changeTypeToChanged(deviceId, timestamp);
-- fileNodeProcessor.setOverflowed(true);
-- } catch (IOException e) {
-- LOGGER.error("Insert into overflow error, the reason is {}", e);
-- if (!isMonitor) {
-- updateStatHashMapWhenFail(tsRecord);
-- }
-- throw new FileNodeManagerException(e);
-- }
-- }
--
-- private void insertBufferWrite(FileNodeProcessor fileNodeProcessor, long timestamp,
-- boolean isMonitor, TSRecord tsRecord, String deviceId)
-- throws FileNodeManagerException, FileNodeProcessorException {
--
-- long start1 = System.currentTimeMillis();
-- // get bufferwrite processor
-- BufferWriteProcessor bufferWriteProcessor;
-- String filenodeName = fileNodeProcessor.getProcessorName();
-- try {
-- bufferWriteProcessor = fileNodeProcessor.getBufferWriteProcessor(filenodeName, timestamp);
--
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error("Get the bufferwrite processor failed, the filenode is {}, insert time is {}",
-- filenodeName, timestamp);
-- if (!isMonitor) {
-- updateStatHashMapWhenFail(tsRecord);
-- }
-- throw new FileNodeManagerException(e);
-- } finally {
-- long start1_1 = System.currentTimeMillis() - start1;
-- if (start1_1 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step-1-1, cost: {}", start1_1);
-- }
-- }
--
-- long start1_2 = System.currentTimeMillis();
-- // Add a new interval file to newfilelist
-- if (bufferWriteProcessor.isNewProcessor()) {
-- bufferWriteProcessor.setNewProcessor(false);
-- String bufferwriteBaseDir = bufferWriteProcessor.getBaseDir();
-- String bufferwriteRelativePath = bufferWriteProcessor.getFileRelativePath();
-- try {
-- bufferWriteProcessor.setCurrentTsFileResource(new TsFileResource(new File(new File(bufferwriteBaseDir), bufferwriteRelativePath), false));
-- fileNodeProcessor.addIntervalFileNode(bufferWriteProcessor.getCurrentTsFileResource());
-- } catch (Exception e) {
-- if (!isMonitor) {
-- updateStatHashMapWhenFail(tsRecord);
-- }
-- throw new FileNodeManagerException(e);
-- }
-- }
-- start1_2 = System.currentTimeMillis() - start1_2;
-- if (start1_2 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step-1-2, cost: {}", start1_2);
-- }
--
-- start1 = System.currentTimeMillis() - start1;
-- if (start1 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step-1, cost: {}", start1);
-- }
--
-- long start2 = System.currentTimeMillis();
--
-- long start2_1 = start2;
-- // insert wal
-- try {
-- writeLog(tsRecord, isMonitor, bufferWriteProcessor.getLogNode());
-- } catch (IOException e) {
-- throw new FileNodeManagerException(e);
-- }
-- start2_1 = System.currentTimeMillis() - start2_1;
-- if (start2_1 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step2-1 cost: {}", start2_1);
-- }
--
-- long start2_2 = System.currentTimeMillis();
-- // Write data
-- long prevStartTime = fileNodeProcessor.getIntervalFileNodeStartTime(deviceId);
-- long prevUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
--
-- fileNodeProcessor.setIntervalFileNodeStartTime(deviceId);
-- fileNodeProcessor.setLastUpdateTime(deviceId, timestamp);
--
-- start2_2 = System.currentTimeMillis() - start2_2;
-- if (start2_2 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step2-2 cost: {}", start2_2);
-- }
-- try {
-- long start2_3 = System.currentTimeMillis();
--
-- // insert tsrecord and check flushMetadata
-- if (!bufferWriteProcessor.write(tsRecord)) {
-- start2_3 = System.currentTimeMillis() - start2_3;
-- if (start2_3 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step2-3 cost: {}", start2_3);
-- }
--
-- long start2_4 = System.currentTimeMillis();
-- // undo time update
-- fileNodeProcessor.setIntervalFileNodeStartTime(deviceId, prevStartTime);
-- fileNodeProcessor.setLastUpdateTime(deviceId, prevUpdateTime);
-- start2_4 = System.currentTimeMillis() - start2_4;
-- if (start2_4 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step2-4 cost: {}", start2_4);
-- }
-- }
-- } catch (BufferWriteProcessorException e) {
-- if (!isMonitor) {
-- updateStatHashMapWhenFail(tsRecord);
-- }
-- throw new FileNodeManagerException(e);
-- }
-- start2 = System.currentTimeMillis() - start2;
-- if (start2 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step-2, cost: {}", start2);
-- }
--
-- long start3 = System.currentTimeMillis();
--
-- // check if the file should be closed
-- if (bufferWriteProcessor
-- .getFileSize() > IoTDBDescriptor.getInstance()
-- .getConfig().getBufferwriteFileSizeThreshold()) {
-- if (LOGGER.isInfoEnabled()) {
-- LOGGER.info(
-- "The filenode processor {} will setCloseMark the bufferwrite processor, "
-- + "because the size[{}] of tsfile {} reaches the threshold {}",
-- filenodeName, MemUtils.bytesCntToStr(bufferWriteProcessor.getFileSize()),
-- bufferWriteProcessor.getInsertFilePath(), MemUtils.bytesCntToStr(
-- IoTDBDescriptor.getInstance().getConfig().getBufferwriteFileSizeThreshold()));
-- }
--
-- fileNodeProcessor.closeBufferWrite();
-- start3 = System.currentTimeMillis() - start3;
-- if (start3 > 1000) {
-- LOGGER.info("FileNodeManager.insertBufferWrite step-3, setCloseMark buffer insert cost: {}", start3);
-- }
-- }
-- }
--
-- /**
-- * update data.
-- */
-- public void update(String deviceId, String measurementId, long startTime, long endTime,
-- TSDataType type, String v)
-- throws FileNodeManagerException {
--
-- FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
-- try {
--
-- long lastUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
-- if (startTime > lastUpdateTime) {
-- LOGGER.warn("The update range is error, startTime {} is great than lastUpdateTime {}",
-- startTime,
-- lastUpdateTime);
-- return;
-- }
-- long finalEndTime = endTime > lastUpdateTime ? lastUpdateTime : endTime;
--
-- String filenodeName = fileNodeProcessor.getProcessorName();
-- // get overflow processor
-- OverflowProcessor overflowProcessor;
-- try {
-- overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
-- } catch (ProcessorException e) {
-- LOGGER.error(
-- "Get the overflow processor failed, the filenode is {}, "
-- + "insert time range is from {} to {}",
-- filenodeName, startTime, finalEndTime);
-- throw new FileNodeManagerException(e);
-- }
-- overflowProcessor.update(deviceId, measurementId, startTime, finalEndTime, type, v);
-- // change the type of tsfile to overflowed
-- fileNodeProcessor.changeTypeToChanged(deviceId, startTime, finalEndTime);
-- fileNodeProcessor.setOverflowed(true);
--
-- // insert wal
-- try {
-- if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
-- overflowProcessor.getLogNode().write(
-- new UpdatePlan(startTime, finalEndTime, v, new Path(deviceId
-- + "." + measurementId)));
-- }
-- } catch (IOException e) {
-- throw new FileNodeManagerException(e);
-- }
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- }
--
-- /**
-- * delete data.
-- */
-- public void delete(String deviceId, String measurementId, long timestamp)
-- throws FileNodeManagerException {
--
-- FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
-- try {
-- long lastUpdateTime = fileNodeProcessor.getLastUpdateTime(deviceId);
-- // no tsfile data, the delete operation is invalid
-- if (lastUpdateTime == -1) {
-- LOGGER.warn("The last update time is -1, delete overflow is invalid, "
-- + "the filenode processor is {}",
-- fileNodeProcessor.getProcessorName());
-- } else {
-- // insert wal
-- if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
-- // get processors for wal
-- String filenodeName = fileNodeProcessor.getProcessorName();
-- OverflowProcessor overflowProcessor;
-- BufferWriteProcessor bufferWriteProcessor;
-- try {
-- overflowProcessor = fileNodeProcessor.getOverflowProcessor(filenodeName);
-- // in case that no BufferWriteProcessor is available, a new BufferWriteProcessor is
-- // needed to access LogNode.
-- // TODO this may make the time range of the next TsFile a little wider
-- bufferWriteProcessor = fileNodeProcessor.getBufferWriteProcessor(filenodeName,
-- lastUpdateTime + 1);
-- } catch (ProcessorException e) {
-- LOGGER.error("Getting the processor failed, the filenode is {}, delete time is {}.",
-- filenodeName, timestamp);
-- throw new FileNodeManagerException(e);
-- }
-- try {
-- overflowProcessor.getLogNode().write(new DeletePlan(timestamp,
-- new Path(deviceId + "." + measurementId)));
-- bufferWriteProcessor.getLogNode().write(new DeletePlan(timestamp,
-- new Path(deviceId + "." + measurementId)));
-- } catch (IOException e) {
-- throw new FileNodeManagerException(e);
-- }
-- }
--
-- try {
-- fileNodeProcessor.delete(deviceId, measurementId, timestamp);
-- } catch (IOException e) {
-- throw new FileNodeManagerException(e);
-- }
-- // change the type of tsfile to overflowed
-- fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
-- fileNodeProcessor.setOverflowed(true);
--
-- }
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- }
--
-- private void delete(String processorName,
-- Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator)
-- throws FileNodeManagerException {
-- if (!processorMap.containsKey(processorName)) {
-- //TODO do we need to call processorIterator.remove() ?
-- LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
-- return;
-- }
-- LOGGER.info("Try to delete the filenode processor {}.", processorName);
-- FileNodeProcessor processor = processorMap.get(processorName);
-- if (!processor.tryWriteLock()) {
-- throw new FileNodeManagerException(String
-- .format("Can't delete the filenode processor %s because Can't get the insert lock.",
-- processorName));
-- }
--
-- try {
-- if (!processor.canBeClosed()) {
-- LOGGER.warn("The filenode processor {} can't be deleted.", processorName);
-- return;
-- }
--
-- try {
-- LOGGER.info("Delete the filenode processor {}.", processorName);
-- processor.delete();
-- processorIterator.remove();
-- } catch (ProcessorException e) {
-- LOGGER.error("Delete the filenode processor {} by iterator error.", processorName, e);
-- throw new FileNodeManagerException(e);
-- }
-- } finally {
-- processor.writeUnlock();
-- }
-- }
--
-- /**
-- * Similar to delete(), but only deletes data in BufferWrite. Only used by WAL recovery.
-- */
-- public void deleteBufferWrite(String deviceId, String measurementId, long timestamp)
-- throws FileNodeManagerException {
-- FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
-- try {
-- fileNodeProcessor.deleteBufferWrite(deviceId, measurementId, timestamp);
-- } catch (BufferWriteProcessorException | IOException e) {
-- throw new FileNodeManagerException(e);
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- // change the type of tsfile to overflowed
-- fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
-- fileNodeProcessor.setOverflowed(true);
-- }
--
-- /**
-- * Similar to delete(), but only deletes data in Overflow. Only used by WAL recovery.
-- */
-- public void deleteOverflow(String deviceId, String measurementId, long timestamp)
-- throws FileNodeManagerException {
-- FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
-- try {
-- fileNodeProcessor.deleteOverflow(deviceId, measurementId, timestamp);
-- } catch (ProcessorException e) {
-- throw new FileNodeManagerException(e);
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- // change the type of tsfile to overflowed
-- fileNodeProcessor.changeTypeToChangedForDelete(deviceId, timestamp);
-- fileNodeProcessor.setOverflowed(true);
-- }
--
-- /**
-- * begin query.
-- *
-- * @param deviceId queried deviceId
-- * @return a query token for the device.
-- */
-- public int beginQuery(String deviceId) throws FileNodeManagerException {
-- FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
-- try {
-- LOGGER.debug("Get the FileNodeProcessor: filenode is {}, begin query.",
-- fileNodeProcessor.getProcessorName());
-- return fileNodeProcessor.addMultiPassCount();
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- }
--
-- /**
-- * query data.
-- */
-- public QueryDataSource query(SingleSeriesExpression seriesExpression, QueryContext context)
-- throws FileNodeManagerException {
-- String deviceId = seriesExpression.getSeriesPath().getDevice();
-- String measurementId = seriesExpression.getSeriesPath().getMeasurement();
-- FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, false);
-- LOGGER.debug("Get the FileNodeProcessor: filenode is {}, query.",
-- fileNodeProcessor.getProcessorName());
-- try {
-- QueryDataSource queryDataSource;
-- // query operation must have overflow processor
-- if (!fileNodeProcessor.hasOverflowProcessor()) {
-- try {
-- fileNodeProcessor.getOverflowProcessor(fileNodeProcessor.getProcessorName());
-- } catch (ProcessorException e) {
-- LOGGER.error("Get the overflow processor failed, the filenode is {}, query is {},{}",
-- fileNodeProcessor.getProcessorName(), deviceId, measurementId);
-- throw new FileNodeManagerException(e);
-- }
-- }
-- try {
-- queryDataSource = fileNodeProcessor.query(deviceId, measurementId, context);
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error("Query error: the deviceId {}, the measurementId {}", deviceId, measurementId,
-- e);
-- throw new FileNodeManagerException(e);
-- }
-- // return query structure
-- return queryDataSource;
-- } finally {
-- fileNodeProcessor.readUnlock();
-- }
-- }
--
-- /**
-- * end query.
-- */
-- public void endQuery(String deviceId, int token) throws FileNodeManagerException {
--
-- FileNodeProcessor fileNodeProcessor = getProcessor(deviceId, true);
-- try {
-- LOGGER.debug("Get the FileNodeProcessor: {} end query.",
-- fileNodeProcessor.getProcessorName());
-- fileNodeProcessor.decreaseMultiPassCount(token);
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error("Failed to end query: the deviceId {}, token {}.", deviceId, token, e);
-- throw new FileNodeManagerException(e);
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- }
--
-- /**
-- * Append one specified tsfile to the storage group. <b>This method is only provided for
-- * transmission module</b>
-- *
-- * @param fileNodeName the seriesPath of storage group
-- * @param appendFile the appended tsfile information
-- */
-- public boolean appendFileToFileNode(String fileNodeName, TsFileResource appendFile,
-- String appendFilePath) throws FileNodeManagerException {
-- FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
-- try {
-- // check append file
-- for (Map.Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
-- if (fileNodeProcessor.getLastUpdateTime(entry.getKey()) >= entry.getValue()) {
-- return false;
-- }
-- }
-- // setCloseMark bufferwrite file
-- fileNodeProcessor.closeBufferWrite();
-- // append file to storage group.
-- fileNodeProcessor.appendFile(appendFile, appendFilePath);
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error("Cannot append the file {} to {}", appendFile.getFile().getAbsolutePath(), fileNodeName, e);
-- throw new FileNodeManagerException(e);
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- return true;
-- }
--
-- /**
-- * get all overlap tsfiles which are conflict with the appendFile.
-- *
-- * @param fileNodeName the seriesPath of storage group
-- * @param appendFile the appended tsfile information
-- */
-- public List<String> getOverlapFilesFromFileNode(String fileNodeName, TsFileResource appendFile,
-- String uuid) throws FileNodeManagerException {
-- FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
-- List<String> overlapFiles;
-- try {
-- overlapFiles = fileNodeProcessor.getOverlapFiles(appendFile, uuid);
-- } catch (FileNodeProcessorException e) {
-- throw new FileNodeManagerException(e);
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- return overlapFiles;
-- }
--
-- /**
-- * merge all overflowed filenode.
-- *
-- * @throws FileNodeManagerException FileNodeManagerException
-- */
-- public void mergeAll() throws FileNodeManagerException {
-- if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
-- LOGGER.warn("Failed to merge all overflowed filenode, because filenode manager status is {}",
-- fileNodeManagerStatus);
-- return;
-- }
--
-- fileNodeManagerStatus = FileNodeManagerStatus.MERGE;
-- LOGGER.info("Start to merge all overflowed filenode");
-- List<String> allFileNodeNames;
-- try {
-- allFileNodeNames = MManager.getInstance().getAllFileNames();
-- } catch (PathErrorException e) {
-- LOGGER.error("Get all storage group seriesPath error,", e);
-- throw new FileNodeManagerException(e);
-- }
-- List<Future<?>> futureTasks = new ArrayList<>();
-- for (String fileNodeName : allFileNodeNames) {
-- FileNodeProcessor fileNodeProcessor = getProcessor(fileNodeName, true);
-- try {
-- Future<?> task = fileNodeProcessor.submitToMerge();
-- if (task != null) {
-- LOGGER.info("Submit the filenode {} to the merge pool", fileNodeName);
-- futureTasks.add(task);
-- }
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- }
-- long totalTime = 0;
-- // loop waiting for merge to end, the longest waiting time is
-- // 60s.
-- int time = 2;
-- List<Exception> mergeException = new ArrayList<>();
-- for (Future<?> task : futureTasks) {
-- while (!task.isDone()) {
-- try {
-- LOGGER.info(
-- "Waiting for the end of merge, already waiting for {}s, "
-- + "continue to wait anothor {}s",
-- totalTime, time);
-- TimeUnit.SECONDS.sleep(time);
-- totalTime += time;
-- time = updateWaitTime(time);
-- } catch (InterruptedException e) {
-- LOGGER.error("Unexpected interruption {}", e);
-- Thread.currentThread().interrupt();
-- }
-- }
-- try {
-- task.get();
-- } catch (InterruptedException e) {
-- LOGGER.error("Unexpected interruption {}", e);
-- } catch (ExecutionException e) {
-- mergeException.add(e);
-- LOGGER.error("The exception for merge: {}", e);
-- }
-- }
-- if (!mergeException.isEmpty()) {
-- // just throw the first exception
-- throw new FileNodeManagerException(mergeException.get(0));
-- }
-- fileNodeManagerStatus = FileNodeManagerStatus.NONE;
-- LOGGER.info("End to merge all overflowed filenode");
-- }
--
-- private int updateWaitTime(int time) {
-- return time < 32 ? time * 2 : 60;
-- }
--
-- /**
-- * delete one filenode.
-- */
-- public void deleteOneFileNode(String processorName) throws FileNodeManagerException {
-- if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
-- return;
-- }
--
-- fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
-- try {
-- if (processorMap.containsKey(processorName)) {
-- deleteFileNodeBlocked(processorName);
-- }
-- String fileNodePath = TsFileDBConf.getFileNodeDir();
-- fileNodePath = standardizeDir(fileNodePath) + processorName;
-- FileUtils.deleteDirectory(new File(fileNodePath));
--
-- cleanBufferWrite(processorName);
--
-- MultiFileLogNodeManager.getInstance()
-- .deleteNode(processorName + IoTDBConstant.BUFFERWRITE_LOG_NODE_SUFFIX);
-- MultiFileLogNodeManager.getInstance()
-- .deleteNode(processorName + IoTDBConstant.OVERFLOW_LOG_NODE_SUFFIX);
-- } catch (IOException e) {
-- LOGGER.error("Delete the filenode processor {} error.", processorName, e);
-- throw new FileNodeManagerException(e);
-- } finally {
-- fileNodeManagerStatus = FileNodeManagerStatus.NONE;
-- }
-- }
--
-- private void cleanBufferWrite(String processorName) throws IOException {
- List<String> bufferwritePathList = DIRECTORY_MANAGER.getAllTsFileFolders();
- List<String> bufferwritePathList = directories.getAllTsFileFolders();
-- for (String bufferwritePath : bufferwritePathList) {
-- bufferwritePath = standardizeDir(bufferwritePath) + processorName;
-- File bufferDir = new File(bufferwritePath);
-- // free and setCloseMark the streams under this bufferwrite directory
-- if (!bufferDir.exists()) {
-- continue;
-- }
-- File[] bufferFiles = bufferDir.listFiles();
-- if (bufferFiles != null) {
-- for (File bufferFile : bufferFiles) {
-- FileReaderManager.getInstance().closeFileAndRemoveReader(bufferFile.getPath());
-- }
-- }
-- FileUtils.deleteDirectory(new File(bufferwritePath));
-- }
-- }
--
-- private void deleteFileNodeBlocked(String processorName) throws FileNodeManagerException {
-- LOGGER.info("Forced to delete the filenode processor {}", processorName);
-- FileNodeProcessor processor = processorMap.get(processorName);
-- while (true) {
-- if (processor.tryWriteLock()) {
-- try {
-- if (processor.canBeClosed()) {
-- LOGGER.info("Delete the filenode processor {}.", processorName);
-- processor.delete();
-- processorMap.remove(processorName);
-- break;
-- } else {
-- LOGGER.info(
-- "Can't delete the filenode processor {}, "
-- + "because the filenode processor can't be closed."
-- + " Wait 100ms to retry");
-- }
-- } catch (ProcessorException e) {
-- LOGGER.error("Delete the filenode processor {} error.", processorName, e);
-- throw new FileNodeManagerException(e);
-- } finally {
-- processor.writeUnlock();
-- }
-- } else {
-- LOGGER.info(
-- "Can't delete the filenode processor {}, because it can't get the insert lock."
-- + " Wait 100ms to retry", processorName);
-- }
-- try {
-- TimeUnit.MILLISECONDS.sleep(100);
-- } catch (InterruptedException e) {
-- LOGGER.error(e.getMessage());
-- Thread.currentThread().interrupt();
-- }
-- }
-- }
--
-- private String standardizeDir(String originalPath) {
-- String res = originalPath;
-- if ((originalPath.length() > 0
-- && originalPath.charAt(originalPath.length() - 1) != File.separatorChar)
-- || originalPath.length() == 0) {
-- res = originalPath + File.separatorChar;
-- }
-- return res;
-- }
--
-- /**
-- * add time series.
-- */
-- public void addTimeSeries(Path path, TSDataType dataType, TSEncoding encoding,
-- CompressionType compressor,
-- Map<String, String> props) throws FileNodeManagerException {
-- FileNodeProcessor fileNodeProcessor = getProcessor(path.getFullPath(), true);
-- try {
-- fileNodeProcessor.addTimeSeries(path.getMeasurement(), dataType, encoding, compressor, props);
-- } finally {
-- fileNodeProcessor.writeUnlock();
-- }
-- }
--
--
-- /**
-- * Force to setCloseMark the filenode processor.
-- */
-- public void closeOneFileNode(String processorName) throws FileNodeManagerException {
-- if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
-- return;
-- }
--
-- fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
-- try {
-- LOGGER.info("Force to setCloseMark the filenode processor {}.", processorName);
-- while (!closeOneProcessor(processorName)) {
-- try {
-- LOGGER.info("Can't force to setCloseMark the filenode processor {}, wait 100ms to retry",
-- processorName);
-- TimeUnit.MILLISECONDS.sleep(100);
-- } catch (InterruptedException e) {
-- // ignore the interrupted exception
-- LOGGER.error("Unexpected interruption {}", e);
-- Thread.currentThread().interrupt();
-- }
-- }
-- } finally {
-- fileNodeManagerStatus = FileNodeManagerStatus.NONE;
-- }
-- }
--
--
-- /**
-- * try to setCloseMark the filenode processor. The name of filenode processor is processorName
-- * notice: this method has the same function with close()
-- */
-- private boolean closeOneProcessor(String processorName) throws FileNodeManagerException {
-- if (!processorMap.containsKey(processorName)) {
-- return true;
-- }
--
-- Processor processor = processorMap.get(processorName);
-- if (processor.tryWriteLock()) {
-- try {
-- if (processor.canBeClosed()) {
-- processor.close();
-- return true;
-- } else {
-- return false;
-- }
-- } catch (ProcessorException e) {
-- LOGGER.error("Close the filenode processor {} error.", processorName, e);
-- throw new FileNodeManagerException(e);
-- } finally {
-- processor.writeUnlock();
-- }
-- } else {
-- return false;
-- }
-- }
--
-- /**
-- * try to setCloseMark the filenode processor.
-- * notice: This method has the same function with closeOneProcessor()
-- */
-- private void close(String processorName) throws FileNodeManagerException {
-- if (!processorMap.containsKey(processorName)) {
-- LOGGER.warn("The processorMap doesn't contain the filenode processor {}.", processorName);
-- return;
-- }
-- LOGGER.info("Try to setCloseMark the filenode processor {}.", processorName);
-- FileNodeProcessor processor = processorMap.get(processorName);
-- if (!processor.tryWriteLock()) {
-- LOGGER.warn("Can't get the insert lock of the filenode processor {}.", processorName);
-- return;
-- }
-- try {
-- if (processor.canBeClosed()) {
-- try {
-- LOGGER.info("Close the filenode processor {}.", processorName);
-- processor.close();
-- } catch (ProcessorException e) {
-- LOGGER.error("Close the filenode processor {} error.", processorName, e);
-- throw new FileNodeManagerException(e);
-- }
-- } else {
-- LOGGER.warn("The filenode processor {} can't be closed.", processorName);
-- }
-- } finally {
-- processor.writeUnlock();
-- }
-- }
--
-- /**
-- * delete all filenode.
-- */
-- public synchronized boolean deleteAll() throws FileNodeManagerException {
-- LOGGER.info("Start deleting all filenode");
-- if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
-- LOGGER.info("Failed to delete all filenode processor because of merge operation");
-- return false;
-- }
--
-- fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
-- try {
-- Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator = processorMap.entrySet()
-- .iterator();
-- while (processorIterator.hasNext()) {
-- Map.Entry<String, FileNodeProcessor> processorEntry = processorIterator.next();
-- delete(processorEntry.getKey(), processorIterator);
-- }
-- return processorMap.isEmpty();
-- } finally {
-- LOGGER.info("Deleting all FileNodeProcessors ends");
-- fileNodeManagerStatus = FileNodeManagerStatus.NONE;
-- }
-- }
--
-- /**
-- * Try to setCloseMark All.
-- */
-- public void closeAll() throws FileNodeManagerException {
-- LOGGER.info("Start closing all filenode processor");
-- if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
-- LOGGER.info("Failed to setCloseMark all filenode processor because of merge operation");
-- return;
-- }
-- fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
-- try {
-- for (Map.Entry<String, FileNodeProcessor> processorEntry : processorMap.entrySet()) {
-- close(processorEntry.getKey());
-- }
-- } finally {
-- LOGGER.info("Close all FileNodeProcessors ends");
-- fileNodeManagerStatus = FileNodeManagerStatus.NONE;
-- }
-- }
--
-- /**
-- * force flushMetadata to control memory usage.
-- */
-- public void forceFlush(BasicMemController.UsageLevel level) {
-- // you may add some delicate process like below
-- // or you could provide multiple methods for different urgency
-- switch (level) {
-- // only select the most urgent (most active or biggest in size)
-- // processors to flushMetadata
-- // only select top 10% active memory user to flushMetadata
-- case WARNING:
-- try {
-- flushTop(0.1f);
-- } catch (IOException e) {
-- LOGGER.error("force flushMetadata memory data error: {}", e);
-- }
-- break;
-- // force all processors to flushMetadata
-- case DANGEROUS:
-- try {
-- flushAll();
-- } catch (IOException e) {
-- LOGGER.error("force flushMetadata memory data error: {}", e);
-- }
-- break;
-- // if the flushMetadata thread pool is not full ( or half full), start a new
-- // flushMetadata task
-- case SAFE:
-- if (FlushPoolManager.getInstance().getActiveCnt() < 0.5 * FlushPoolManager.getInstance()
-- .getThreadCnt()) {
-- try {
-- flushTop(0.01f);
-- } catch (IOException e) {
-- LOGGER.error("force flushMetadata memory data error: ", e);
-- }
-- }
-- break;
-- default:
-- }
-- }
--
-- private void flushAll() throws IOException {
-- for (FileNodeProcessor processor : processorMap.values()) {
-- if (!processor.tryLock(true)) {
-- continue;
-- }
-- try {
-- boolean isMerge = processor.flush().isHasOverflowFlushTask();
-- if (isMerge) {
-- processor.submitToMerge();
-- }
-- } finally {
-- processor.unlock(true);
-- }
-- }
-- }
--
-- private void flushTop(float percentage) throws IOException {
-- List<FileNodeProcessor> tempProcessors = new ArrayList<>(processorMap.values());
-- // sort the tempProcessors as descending order
-- tempProcessors.sort((o1, o2) -> (int) (o2.memoryUsage() - o1.memoryUsage()));
-- int flushNum =
-- (int) (tempProcessors.size() * percentage) > 1
-- ? (int) (tempProcessors.size() * percentage)
-- : 1;
-- for (int i = 0; i < flushNum && i < tempProcessors.size(); i++) {
-- FileNodeProcessor processor = tempProcessors.get(i);
-- // 64M
-- if (processor.memoryUsage() <= TSFileConfig.groupSizeInByte / 2) {
-- continue;
-- }
-- long start = System.currentTimeMillis();
-- processor.writeLock();
-- try {
-- boolean isMerge = processor.flush().isHasOverflowFlushTask();
-- if (isMerge) {
-- processor.submitToMerge();
-- }
-- } finally {
-- processor.writeUnlock();
-- }
-- start = System.currentTimeMillis() - start;
-- LOGGER.info("flushMetadata Top cost: {}", start);
-- }
-- }
--
-- @Override
-- public void start() {
-- // do no thing
-- }
--
-- @Override
-- public void stop() {
-- try {
-- closeAll();
-- } catch (FileNodeManagerException e) {
-- LOGGER.error("Failed to setCloseMark file node manager because .", e);
-- }
--
-- boolean notFinished = true;
-- while (notFinished) {
-- int size = 0;
-- for (FileNodeProcessor fileNodeProcessor : processorMap.values()) {
-- size += fileNodeProcessor.getClosingBufferWriteProcessor().size();
-- }
-- if (size == 0) {
-- notFinished = false;
-- } else {
-- try {
-- Thread.sleep(10);
-- } catch (InterruptedException e) {
-- LOGGER.error("File node Manager Stop process is interrupted", e);
-- }
-- }
-- }
-- closedProcessorCleaner.shutdownNow();
-- }
--
-- @Override
-- public ServiceType getID() {
-- return ServiceType.FILE_NODE_SERVICE;
-- }
--
-- private enum FileNodeManagerStatus {
-- NONE, MERGE, CLOSE
-- }
--
-- private static class FileNodeManagerHolder {
--
-- private FileNodeManagerHolder() {
-- }
--
-- private static final FileNodeManager INSTANCE = new FileNodeManager(
-- TsFileDBConf.getFileNodeDir());
-- }
--
--}
--
--
--
++// if (size == 0) {
++// notFinished = false;
++// } else {
++// try {
++// Thread.sleep(10);
++// } catch (InterruptedException e) {
++// LOGGER.error("File node Manager Stop process is interrupted", e);
++// }
++// }
++// }
++// closedProcessorCleaner.shutdownNow();
++// }
++//
++// @Override
++// public ServiceType getID() {
++// return ServiceType.FILE_NODE_SERVICE;
++// }
++//
++// private enum FileNodeManagerStatus {
++// NONE, MERGE, CLOSE
++// }
++//
++// private static class FileNodeManagerHolder {
++//
++// private FileNodeManagerHolder() {
++// }
++//
++// private static final FileNodeManager INSTANCE = new FileNodeManager(
++// TsFileDBConf.getFileNodeDir());
++// }
++//
++//}
++//
++//
++//
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
index 1869aad,a61edc9..7eca2ce
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
@@@ -1,2130 -1,2130 +1,2130 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.filenode;
--
--import static java.time.ZonedDateTime.ofInstant;
--
--import java.io.File;
--import java.io.FileInputStream;
--import java.io.FileOutputStream;
--import java.io.IOException;
--import java.nio.file.FileSystems;
--import java.nio.file.Files;
--import java.time.Instant;
--import java.time.ZoneId;
--import java.time.ZonedDateTime;
--import java.util.ArrayList;
--import java.util.HashMap;
--import java.util.HashSet;
--import java.util.Iterator;
--import java.util.List;
--import java.util.Map;
--import java.util.Map.Entry;
--import java.util.Objects;
--import java.util.Set;
--import java.util.concurrent.CountDownLatch;
--import java.util.concurrent.ExecutionException;
--import java.util.concurrent.Future;
--import java.util.concurrent.TimeUnit;
--import java.util.concurrent.TimeoutException;
--import java.util.concurrent.atomic.AtomicInteger;
--import java.util.concurrent.atomic.AtomicLong;
--import java.util.concurrent.locks.ReentrantLock;
--import java.util.function.Consumer;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBConstant;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
- import org.apache.iotdb.db.conf.directories.DirectoryManager;
-import org.apache.iotdb.db.conf.directories.Directories;
--import org.apache.iotdb.db.engine.Processor;
--import org.apache.iotdb.db.engine.bufferwrite.Action;
--import org.apache.iotdb.db.engine.bufferwrite.ActionException;
--import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
--import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
--import org.apache.iotdb.db.engine.modification.Deletion;
--import org.apache.iotdb.db.engine.modification.Modification;
--import org.apache.iotdb.db.engine.modification.ModificationFile;
--import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
--import org.apache.iotdb.db.engine.pool.MergePoolManager;
--import org.apache.iotdb.db.engine.querycontext.GlobalSortedSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.OverflowInsertFile;
--import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
--import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
--import org.apache.iotdb.db.engine.querycontext.UnsealedTsFile;
--import org.apache.iotdb.db.engine.version.SimpleFileVersionController;
--import org.apache.iotdb.db.engine.version.VersionController;
--import org.apache.iotdb.db.exception.BufferWriteProcessorException;
--import org.apache.iotdb.db.exception.ErrorDebugException;
--import org.apache.iotdb.db.exception.FileNodeProcessorException;
--import org.apache.iotdb.db.exception.OverflowProcessorException;
--import org.apache.iotdb.db.exception.PathErrorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.metadata.MManager;
--import org.apache.iotdb.db.monitor.IStatistic;
--import org.apache.iotdb.db.monitor.MonitorConstants;
--import org.apache.iotdb.db.monitor.StatMonitor;
--import org.apache.iotdb.db.query.context.QueryContext;
--import org.apache.iotdb.db.query.control.FileReaderManager;
--import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
--import org.apache.iotdb.db.query.reader.IReader;
--import org.apache.iotdb.db.sync.conf.Constans;
--import org.apache.iotdb.db.utils.ImmediateFuture;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.db.utils.QueryUtils;
--import org.apache.iotdb.db.utils.TimeValuePair;
--import org.apache.iotdb.db.writelog.recover.SeqTsFileRecoverPerformer;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
--import org.apache.iotdb.tsfile.file.footer.ChunkGroupFooter;
--import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
--import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
--import org.apache.iotdb.tsfile.read.common.Path;
--import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
--import org.apache.iotdb.tsfile.read.filter.TimeFilter;
--import org.apache.iotdb.tsfile.read.filter.basic.Filter;
--import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory;
--import org.apache.iotdb.tsfile.utils.Pair;
--import org.apache.iotdb.tsfile.write.chunk.ChunkBuffer;
--import org.apache.iotdb.tsfile.write.chunk.ChunkWriterImpl;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.LongDataPoint;
--import org.apache.iotdb.tsfile.write.schema.FileSchema;
--import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
--import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
--import org.slf4j.Logger;
--import org.slf4j.LoggerFactory;
--
--public class FileNodeProcessor extends Processor implements IStatistic {
--
-- private static final String WARN_NO_SUCH_OVERFLOWED_FILE = "Can not find any tsfile which"
-- + " will be overflowed in the filenode processor {}, ";
-- public static final String RESTORE_FILE_SUFFIX = ".restore";
-- private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeProcessor.class);
-- private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
-- private static final MManager mManager = MManager.getInstance();
- private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
- private static final Directories directories = Directories.getInstance();
-- private final String statStorageDeltaName;
-- private final HashMap<String, AtomicLong> statParamsHashMap = new HashMap<>();
-- /**
-- * Used to keep the oldest timestamp for each deviceId. The key is deviceId.
-- */
-- private volatile boolean isOverflowed;
-- private Map<String, Long> lastUpdateTimeMap;
-- private Map<String, Long> flushLastUpdateTimeMap;
-- private Map<String, List<TsFileResource>> invertedIndexOfFiles;
-- private TsFileResource emptyTsFileResource;
--// private TsFileResourceV2 currentTsFileResource;
-- private List<TsFileResource> newFileNodes;
-- private FileNodeProcessorStatus isMerging;
--
-- /**
-- * this is used when work->merge operation
-- */
-- private int numOfMergeFile;
-- private FileNodeProcessorStore fileNodeProcessorStore;
-- private String fileNodeRestoreFilePath;
-- private final Object fileNodeRestoreLock = new Object();
--
-- /**
-- * last merge time
-- */
-- private long lastMergeTime = -1;
-- private BufferWriteProcessor bufferWriteProcessor = null;
--
-- //the bufferwrite Processors that are closing. (Because they are not closed well,
-- // their memtable are not released and we have to query data from them.
-- //private ConcurrentSkipListSet<BufferWriteProcessor> closingBufferWriteProcessor = new ConcurrentSkipListSet<>();
-- private CopyOnReadLinkedList<BufferWriteProcessor> closingBufferWriteProcessor = new CopyOnReadLinkedList<>();
--
-- private OverflowProcessor overflowProcessor = null;
-- private Set<Integer> oldMultiPassTokenSet = null;
-- private Set<Integer> newMultiPassTokenSet = new HashSet<>();
--
-- /**
-- * Represent the number of old queries that have not ended.
-- * This parameter only decreases but not increase.
-- */
-- private CountDownLatch oldMultiPassCount = null;
--
-- /**
-- * Represent the number of new queries that have not ended.
-- */
-- private AtomicInteger newMultiPassCount = new AtomicInteger(0);
--
-- /**
-- * statistic monitor parameters
-- */
-- private Map<String, Action> parameters;
-- private FileSchema fileSchema;
--
-- private Action fileNodeFlushAction = () -> {
-- synchronized (fileNodeProcessorStore) {
-- try {
-- writeStoreToDisk(fileNodeProcessorStore);
-- } catch (FileNodeProcessorException e) {
-- throw new ActionException(e);
-- }
-- }
-- };
--
-- private Action bufferwriteFlushAction = () -> {
-- // update the lastUpdateTime Notice: Thread safe
-- synchronized (fileNodeProcessorStore) {
-- // deep copy
-- Map<String, Long> tempLastUpdateMap = new HashMap<>(lastUpdateTimeMap);
-- // update flushLastUpdateTimeMap
-- for (Entry<String, Long> entry : lastUpdateTimeMap.entrySet()) {
-- flushLastUpdateTimeMap.put(entry.getKey(), entry.getValue() + 1);
-- }
-- fileNodeProcessorStore.setLastUpdateTimeMap(tempLastUpdateMap);
-- }
-- };
--
--// private Action bufferwriteCloseAction = new Action() {
++///**
++// * Licensed to the Apache Software Foundation (ASF) under one
++// * or more contributor license agreements. See the NOTICE file
++// * distributed with this work for additional information
++// * regarding copyright ownership. The ASF licenses this file
++// * to you under the Apache License, Version 2.0 (the
++// * "License"); you may not use this file except in compliance
++// * with the License. You may obtain a copy of the License at
++// *
++// * http://www.apache.org/licenses/LICENSE-2.0
++// *
++// * Unless required by applicable law or agreed to in writing,
++// * software distributed under the License is distributed on an
++// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++// * KIND, either express or implied. See the License for the
++// * specific language governing permissions and limitations
++// * under the License.
++// */
++//package org.apache.iotdb.db.engine.filenode;
//
--// @Override
--// public void act() {
--// synchronized (fileNodeProcessorStore) {
--// fileNodeProcessorStore.setLatestFlushTimeForEachDevice(lastUpdateTimeMap);
--// addLastTimeToIntervalFile();
--// fileNodeProcessorStore.setSequenceFileList(newFileNodes);
++//import static java.time.ZonedDateTime.ofInstant;
++//
++//import java.io.File;
++//import java.io.FileInputStream;
++//import java.io.FileOutputStream;
++//import java.io.IOException;
++//import java.nio.file.FileSystems;
++//import java.nio.file.Files;
++//import java.time.Instant;
++//import java.time.ZoneId;
++//import java.time.ZonedDateTime;
++//import java.util.ArrayList;
++//import java.util.HashMap;
++//import java.util.HashSet;
++//import java.util.Iterator;
++//import java.util.List;
++//import java.util.Map;
++//import java.util.Map.Entry;
++//import java.util.Objects;
++//import java.util.Set;
++//import java.util.concurrent.CountDownLatch;
++//import java.util.concurrent.ExecutionException;
++//import java.util.concurrent.Future;
++//import java.util.concurrent.TimeUnit;
++//import java.util.concurrent.TimeoutException;
++//import java.util.concurrent.atomic.AtomicInteger;
++//import java.util.concurrent.atomic.AtomicLong;
++//import java.util.concurrent.locks.ReentrantLock;
++//import java.util.function.Consumer;
++//import org.apache.iotdb.db.conf.IoTDBConfig;
++//import org.apache.iotdb.db.conf.IoTDBConstant;
++//import org.apache.iotdb.db.conf.IoTDBDescriptor;
++//import org.apache.iotdb.db.conf.directories.DirectoryManager;
++//import org.apache.iotdb.db.engine.Processor;
++//import org.apache.iotdb.db.engine.bufferwrite.Action;
++//import org.apache.iotdb.db.engine.bufferwrite.ActionException;
++//import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
++//import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
++//import org.apache.iotdb.db.engine.modification.Deletion;
++//import org.apache.iotdb.db.engine.modification.Modification;
++//import org.apache.iotdb.db.engine.modification.ModificationFile;
++//import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
++//import org.apache.iotdb.db.engine.pool.MergePoolManager;
++//import org.apache.iotdb.db.engine.querycontext.GlobalSortedSeriesDataSource;
++//import org.apache.iotdb.db.engine.querycontext.OverflowInsertFile;
++//import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
++//import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
++//import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
++//import org.apache.iotdb.db.engine.querycontext.UnsealedTsFile;
++//import org.apache.iotdb.db.engine.version.SimpleFileVersionController;
++//import org.apache.iotdb.db.engine.version.VersionController;
++//import org.apache.iotdb.db.exception.BufferWriteProcessorException;
++//import org.apache.iotdb.db.exception.ErrorDebugException;
++//import org.apache.iotdb.db.exception.FileNodeProcessorException;
++//import org.apache.iotdb.db.exception.OverflowProcessorException;
++//import org.apache.iotdb.db.exception.PathErrorException;
++//import org.apache.iotdb.db.exception.ProcessorException;
++//import org.apache.iotdb.db.metadata.MManager;
++//import org.apache.iotdb.db.monitor.IStatistic;
++//import org.apache.iotdb.db.monitor.MonitorConstants;
++//import org.apache.iotdb.db.monitor.StatMonitor;
++//import org.apache.iotdb.db.query.context.QueryContext;
++//import org.apache.iotdb.db.query.control.FileReaderManager;
++//import org.apache.iotdb.db.query.factory.SeriesReaderFactory;
++//import org.apache.iotdb.db.query.reader.IReader;
++//import org.apache.iotdb.db.sync.conf.Constans;
++//import org.apache.iotdb.db.utils.ImmediateFuture;
++//import org.apache.iotdb.db.utils.MemUtils;
++//import org.apache.iotdb.db.utils.QueryUtils;
++//import org.apache.iotdb.db.utils.TimeValuePair;
++//import org.apache.iotdb.db.writelog.recover.SeqTsFileRecoverPerformer;
++//import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
++//import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
++//import org.apache.iotdb.tsfile.file.footer.ChunkGroupFooter;
++//import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
++//import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
++//import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
++//import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
++//import org.apache.iotdb.tsfile.read.common.Path;
++//import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
++//import org.apache.iotdb.tsfile.read.filter.TimeFilter;
++//import org.apache.iotdb.tsfile.read.filter.basic.Filter;
++//import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory;
++//import org.apache.iotdb.tsfile.utils.Pair;
++//import org.apache.iotdb.tsfile.write.chunk.ChunkBuffer;
++//import org.apache.iotdb.tsfile.write.chunk.ChunkWriterImpl;
++//import org.apache.iotdb.tsfile.write.record.TSRecord;
++//import org.apache.iotdb.tsfile.write.record.datapoint.LongDataPoint;
++//import org.apache.iotdb.tsfile.write.schema.FileSchema;
++//import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
++//import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
++//import org.slf4j.Logger;
++//import org.slf4j.LoggerFactory;
++//
++//public class FileNodeProcessor extends Processor implements IStatistic {
++//
++// private static final String WARN_NO_SUCH_OVERFLOWED_FILE = "Can not find any tsfile which"
++// + " will be overflowed in the filenode processor {}, ";
++// public static final String RESTORE_FILE_SUFFIX = ".restore";
++// private static final Logger LOGGER = LoggerFactory.getLogger(FileNodeProcessor.class);
++// private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
++// private static final MManager mManager = MManager.getInstance();
++// private static final DirectoryManager DIRECTORY_MANAGER = DirectoryManager.getInstance();
++// private final String statStorageDeltaName;
++// private final HashMap<String, AtomicLong> statParamsHashMap = new HashMap<>();
++// /**
++// * Used to keep the oldest timestamp for each deviceId. The key is deviceId.
++// */
++// private volatile boolean isOverflowed;
++// private Map<String, Long> lastUpdateTimeMap;
++// private Map<String, Long> flushLastUpdateTimeMap;
++// private Map<String, List<TsFileResource>> invertedIndexOfFiles;
++// private TsFileResource emptyTsFileResource;
++//// private TsFileResourceV2 currentTsFileResource;
++// private List<TsFileResource> newFileNodes;
++// private FileNodeProcessorStatus isMerging;
++//
++// /**
++// * this is used when work->merge operation
++// */
++// private int numOfMergeFile;
++// private FileNodeProcessorStore fileNodeProcessorStore;
++// private String fileNodeRestoreFilePath;
++// private final Object fileNodeRestoreLock = new Object();
++//
++// /**
++// * last merge time
++// */
++// private long lastMergeTime = -1;
++// private BufferWriteProcessor bufferWriteProcessor = null;
++//
++// //the bufferwrite Processors that are closing. (Because they are not closed well,
++// // their memtable are not released and we have to query data from them.
++// //private ConcurrentSkipListSet<BufferWriteProcessor> closingBufferWriteProcessor = new ConcurrentSkipListSet<>();
++// private CopyOnReadLinkedList<BufferWriteProcessor> closingBufferWriteProcessor = new CopyOnReadLinkedList<>();
++//
++// private OverflowProcessor overflowProcessor = null;
++// private Set<Integer> oldMultiPassTokenSet = null;
++// private Set<Integer> newMultiPassTokenSet = new HashSet<>();
++//
++// /**
++// * Represent the number of old queries that have not ended.
++// * This parameter only decreases but not increase.
++// */
++// private CountDownLatch oldMultiPassCount = null;
++//
++// /**
++// * Represent the number of new queries that have not ended.
++// */
++// private AtomicInteger newMultiPassCount = new AtomicInteger(0);
++//
++// /**
++// * statistic monitor parameters
++// */
++// private Map<String, Action> parameters;
++// private FileSchema fileSchema;
++//
++// private Action fileNodeFlushAction = () -> {
++// synchronized (fileNodeProcessorStore) {
++// try {
++// writeStoreToDisk(fileNodeProcessorStore);
++// } catch (FileNodeProcessorException e) {
++// throw new ActionException(e);
++// }
++// }
++// };
++//
++// private Action bufferwriteFlushAction = () -> {
++// // update the lastUpdateTime Notice: Thread safe
++// synchronized (fileNodeProcessorStore) {
++// // deep copy
++// Map<String, Long> tempLastUpdateMap = new HashMap<>(lastUpdateTimeMap);
++// // update flushLastUpdateTimeMap
++// for (Entry<String, Long> entry : lastUpdateTimeMap.entrySet()) {
++// flushLastUpdateTimeMap.put(entry.getKey(), entry.getValue() + 1);
// }
++// fileNodeProcessorStore.setLastUpdateTimeMap(tempLastUpdateMap);
// }
++// };
//
--// private void addLastTimeToIntervalFile() {
++//// private Action bufferwriteCloseAction = new Action() {
++////
++//// @Override
++//// public void act() {
++//// synchronized (fileNodeProcessorStore) {
++//// fileNodeProcessorStore.setLatestFlushTimeForEachDevice(lastUpdateTimeMap);
++//// addLastTimeToIntervalFile();
++//// fileNodeProcessorStore.setSequenceFileList(newFileNodes);
++//// }
++//// }
++////
++//// private void addLastTimeToIntervalFile() {
++////
++//// if (!newFileNodes.isEmpty()) {
++//// // end time with one start time
++//// Map<String, Long> endTimeMap = new HashMap<>();
++//// for (Entry<String, Long> startTime : currentTsFileResource.getStartTimeMap().entrySet()) {
++//// String deviceId = startTime.getKey();
++//// endTimeMap.put(deviceId, lastUpdateTimeMap.get(deviceId));
++//// }
++//// currentTsFileResource.setEndTimeMap(endTimeMap);
++//// }
++//// }
++//// };
++//
++// private Consumer<BufferWriteProcessor> bufferwriteCloseConsumer = (bwProcessor) -> {
++// synchronized (fileNodeProcessorStore) {
++// fileNodeProcessorStore.setLastUpdateTimeMap(lastUpdateTimeMap);
//
// if (!newFileNodes.isEmpty()) {
// // end time with one start time
// Map<String, Long> endTimeMap = new HashMap<>();
--// for (Entry<String, Long> startTime : currentTsFileResource.getStartTimeMap().entrySet()) {
++// TsFileResource resource = bwProcessor.getCurrentTsFileResource();
++// for (Entry<String, Long> startTime : resource.getStartTimeMap().entrySet()) {
// String deviceId = startTime.getKey();
// endTimeMap.put(deviceId, lastUpdateTimeMap.get(deviceId));
// }
--// currentTsFileResource.setEndTimeMap(endTimeMap);
++// resource.setEndTimeMap(endTimeMap);
// }
++// fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++// }
++// };
++//
++//
++// private Action overflowFlushAction = () -> {
++//
++// // update the new TsFileResourceV2 List and emptyIntervalFile.
++// // Notice: thread safe
++// synchronized (fileNodeProcessorStore) {
++// fileNodeProcessorStore.setOverflowed(isOverflowed);
++// fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
++// fileNodeProcessorStore.setNewFileNodes(newFileNodes);
// }
// };
--
-- private Consumer<BufferWriteProcessor> bufferwriteCloseConsumer = (bwProcessor) -> {
-- synchronized (fileNodeProcessorStore) {
-- fileNodeProcessorStore.setLastUpdateTimeMap(lastUpdateTimeMap);
--
-- if (!newFileNodes.isEmpty()) {
-- // end time with one start time
-- Map<String, Long> endTimeMap = new HashMap<>();
-- TsFileResource resource = bwProcessor.getCurrentTsFileResource();
-- for (Entry<String, Long> startTime : resource.getStartTimeMap().entrySet()) {
-- String deviceId = startTime.getKey();
-- endTimeMap.put(deviceId, lastUpdateTimeMap.get(deviceId));
-- }
-- resource.setEndTimeMap(endTimeMap);
-- }
-- fileNodeProcessorStore.setNewFileNodes(newFileNodes);
-- }
-- };
--
--
-- private Action overflowFlushAction = () -> {
--
-- // update the new TsFileResourceV2 List and emptyIntervalFile.
-- // Notice: thread safe
-- synchronized (fileNodeProcessorStore) {
-- fileNodeProcessorStore.setOverflowed(isOverflowed);
-- fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
-- fileNodeProcessorStore.setNewFileNodes(newFileNodes);
-- }
-- };
-- // Token for query which used to
-- private int multiPassLockToken = 0;
-- private VersionController versionController;
-- private ReentrantLock mergeDeleteLock = new ReentrantLock();
--
-- /**
-- * This is the modification file of the result of the current merge.
-- */
-- private ModificationFile mergingModification;
--
-- private TsFileIOWriter mergeFileWriter = null;
-- private String mergeOutputPath = null;
-- private String mergeBaseDir = null;
-- private String mergeFileName = null;
-- private boolean mergeIsChunkGroupHasData = false;
-- private long mergeStartPos;
--
-- /**
-- * constructor of FileNodeProcessor.
-- */
-- FileNodeProcessor(String fileNodeDirPath, String processorName)
-- throws FileNodeProcessorException {
-- super(processorName);
-- for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
-- MonitorConstants.FileNodeProcessorStatConstants.values()) {
-- statParamsHashMap.put(statConstant.name(), new AtomicLong(0));
-- }
-- statStorageDeltaName =
-- MonitorConstants.STAT_STORAGE_GROUP_PREFIX + MonitorConstants.MONITOR_PATH_SEPARATOR
-- + MonitorConstants.FILE_NODE_PATH + MonitorConstants.MONITOR_PATH_SEPARATOR
-- + processorName.replaceAll("\\.", "_");
--
-- this.parameters = new HashMap<>();
-- String dirPath = fileNodeDirPath;
-- if (dirPath.length() > 0
-- && dirPath.charAt(dirPath.length() - 1) != File.separatorChar) {
-- dirPath = dirPath + File.separatorChar;
-- }
--
-- File restoreFolder = new File(dirPath + processorName);
-- if (!restoreFolder.exists()) {
-- restoreFolder.mkdirs();
-- LOGGER.info(
-- "The restore directory of the filenode processor {} doesn't exist. Create new " +
-- "directory {}",
-- getProcessorName(), restoreFolder.getAbsolutePath());
-- }
-- fileNodeRestoreFilePath = new File(restoreFolder, processorName + RESTORE_FILE_SUFFIX)
-- .getPath();
-- try {
-- fileNodeProcessorStore = readStoreFromDisk();
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error(
-- "The fileNode processor {} encountered an error when recoverying restore " +
-- "information.", processorName);
-- throw new FileNodeProcessorException(e);
-- }
-- // TODO deep clone the lastupdate time
-- emptyTsFileResource = fileNodeProcessorStore.getEmptyTsFileResource();
-- newFileNodes = fileNodeProcessorStore.getNewFileNodes();
-- isMerging = fileNodeProcessorStore.getFileNodeProcessorStatus();
-- numOfMergeFile = fileNodeProcessorStore.getNumOfMergeFile();
-- invertedIndexOfFiles = new HashMap<>();
--
-- // construct the fileschema
-- try {
-- this.fileSchema = constructFileSchema(processorName);
-- } catch (WriteProcessException e) {
-- throw new FileNodeProcessorException(e);
-- }
--
-- recover();
--
-- // RegistStatService
-- if (TsFileDBConf.isEnableStatMonitor()) {
-- StatMonitor statMonitor = StatMonitor.getInstance();
-- registerStatMetadata();
-- statMonitor.registerStatistics(statStorageDeltaName, this);
-- }
-- try {
-- versionController = new SimpleFileVersionController(restoreFolder.getPath());
-- } catch (IOException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- }
--
-- @Override
-- public Map<String, AtomicLong> getStatParamsHashMap() {
-- return statParamsHashMap;
-- }
--
-- @Override
-- public void registerStatMetadata() {
-- Map<String, String> hashMap = new HashMap<>();
-- for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
-- MonitorConstants.FileNodeProcessorStatConstants.values()) {
-- hashMap
-- .put(statStorageDeltaName + MonitorConstants.MONITOR_PATH_SEPARATOR + statConstant.name(),
-- MonitorConstants.DATA_TYPE_INT64);
-- }
-- StatMonitor.getInstance().registerStatStorageGroup(hashMap);
-- }
--
-- @Override
-- public List<String> getAllPathForStatistic() {
-- List<String> list = new ArrayList<>();
-- for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
-- MonitorConstants.FileNodeProcessorStatConstants.values()) {
-- list.add(
-- statStorageDeltaName + MonitorConstants.MONITOR_PATH_SEPARATOR + statConstant.name());
-- }
-- return list;
-- }
--
-- @Override
-- public Map<String, TSRecord> getAllStatisticsValue() {
-- Long curTime = System.currentTimeMillis();
-- HashMap<String, TSRecord> tsRecordHashMap = new HashMap<>();
-- TSRecord tsRecord = new TSRecord(curTime, statStorageDeltaName);
--
-- Map<String, AtomicLong> hashMap = getStatParamsHashMap();
-- tsRecord.dataPointList = new ArrayList<>();
-- for (Map.Entry<String, AtomicLong> entry : hashMap.entrySet()) {
-- tsRecord.dataPointList.add(new LongDataPoint(entry.getKey(), entry.getValue().get()));
-- }
--
-- tsRecordHashMap.put(statStorageDeltaName, tsRecord);
-- return tsRecordHashMap;
-- }
--
-- /**
-- * add interval FileNode.
-- */
-- void addIntervalFileNode(TsFileResource tsFileResource) throws ActionException {
-- newFileNodes.add(tsFileResource);
-- fileNodeProcessorStore.setNewFileNodes(newFileNodes);
-- fileNodeFlushAction.act();
-- }
--
-- /**
-- * set interval filenode start time.
-- *
-- * @param deviceId device ID
-- */
-- void setIntervalFileNodeStartTime(String deviceId) {
-- if (getBufferWriteProcessor().getCurrentTsFileResource().getStartTime(deviceId) == -1) {
-- getBufferWriteProcessor().getCurrentTsFileResource().setStartTime(deviceId,
-- flushLastUpdateTimeMap.get(deviceId));
-- if (!invertedIndexOfFiles.containsKey(deviceId)) {
-- invertedIndexOfFiles.put(deviceId, new ArrayList<>());
-- }
-- invertedIndexOfFiles.get(deviceId).add(getBufferWriteProcessor().getCurrentTsFileResource());
-- }
-- }
--
-- void setIntervalFileNodeStartTime(String deviceId, long time) {
-- if (time != -1) {
-- getBufferWriteProcessor().getCurrentTsFileResource().setStartTime(deviceId, time);
-- } else {
-- getBufferWriteProcessor().getCurrentTsFileResource().removeTime(deviceId);
-- invertedIndexOfFiles.get(deviceId).remove(getBufferWriteProcessor().getCurrentTsFileResource());
-- }
-- }
--
-- long getIntervalFileNodeStartTime(String deviceId) {
-- return getBufferWriteProcessor().getCurrentTsFileResource().getStartTime(deviceId);
-- }
--
-- private void addAllFileIntoIndex(List<TsFileResource> fileList) {
-- // clear map
-- invertedIndexOfFiles.clear();
-- // add all file to index
-- for (TsFileResource fileNode : fileList) {
-- if (fileNode.getStartTimeMap().isEmpty()) {
-- continue;
-- }
-- for (String deviceId : fileNode.getStartTimeMap().keySet()) {
-- if (!invertedIndexOfFiles.containsKey(deviceId)) {
-- invertedIndexOfFiles.put(deviceId, new ArrayList<>());
-- }
-- invertedIndexOfFiles.get(deviceId).add(fileNode);
-- }
-- }
-- }
--
-- public boolean isOverflowed() {
-- return isOverflowed;
-- }
--
-- /**
-- * if overflow insert, update and delete insert into this filenode processor, set
-- * <code>isOverflowed</code> to true.
-- */
-- public void setOverflowed(boolean isOverflowed) {
-- if (this.isOverflowed != isOverflowed) {
-- this.isOverflowed = isOverflowed;
-- }
-- }
--
-- public FileNodeProcessorStatus getFileNodeProcessorStatus() {
-- return isMerging;
-- }
--
-- /**
-- * execute filenode recovery.
-- */
-- public void recover() throws FileNodeProcessorException {
-- // restore sequential files
-- parameters.put(FileNodeConstants.BUFFERWRITE_FLUSH_ACTION, bufferwriteFlushAction);
-- //parameters.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
-- parameters
-- .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
-- parameters.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowFlushAction);
-- parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
--
-- for (int i = 0; i < newFileNodes.size(); i++) {
-- TsFileResource tsFile = newFileNodes.get(i);
-- try {
-- String filePath = tsFile.getFilePath();
-- String logNodePrefix = BufferWriteProcessor.logNodePrefix(processorName);
-- SeqTsFileRecoverPerformer recoverPerformer =
-- new SeqTsFileRecoverPerformer(logNodePrefix,
-- fileSchema, versionController, tsFile);
-- recoverPerformer.recover();
-- } catch (ProcessorException e) {
-- LOGGER.error(
-- "The filenode processor {} failed to recover the bufferwrite processor, "
-- + "the last bufferwrite file is {}.",
-- getProcessorName(), tsFile.getFile().getName());
-- throw new FileNodeProcessorException(e);
-- }
-- }
-- recoverUpdateTimeMap();
--
-- // restore the overflow processor
-- LOGGER.info("The filenode processor {} will recover the overflow processor.",
-- getProcessorName());
--
-- try {
-- overflowProcessor = new OverflowProcessor(getProcessorName(), parameters, fileSchema,
-- versionController);
-- } catch (ProcessorException e) {
-- LOGGER.error("The filenode processor {} failed to recovery the overflow processor.",
-- getProcessorName());
-- throw new FileNodeProcessorException(e);
-- }
--
-- if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
-- // re-merge all file
-- // if bufferwrite processor is not null, and setCloseMark
-- LOGGER.info("The filenode processor {} is recovering, the filenode status is {}.",
-- getProcessorName(), isMerging);
-- merge();
-- } else if (isMerging == FileNodeProcessorStatus.WAITING) {
-- LOGGER.info("The filenode processor {} is recovering, the filenode status is {}.",
-- getProcessorName(), isMerging);
-- switchWaitingToWorking();
-- }
-- // add file into index of file
-- addAllFileIntoIndex(newFileNodes);
-- }
--
-- private void recoverUpdateTimeMap() {
-- lastUpdateTimeMap = new HashMap<>();
-- flushLastUpdateTimeMap = new HashMap<>();
-- for (TsFileResource tsFileResource : newFileNodes) {
-- Map<String, Long> endTimeMap = tsFileResource.getEndTimeMap();
-- endTimeMap.forEach((key, value) -> {
-- Long lastTime = lastUpdateTimeMap.get(key);
-- if (lastTime == null || lastTime < value) {
-- lastUpdateTimeMap.put(key, value);
-- flushLastUpdateTimeMap.put(key, value);
-- }
-- });
-- }
-- }
--
-- //when calling this method, the bufferWriteProcessor must not be null
-- private BufferWriteProcessor getBufferWriteProcessor() {
-- return bufferWriteProcessor;
-- }
--
-- /**
-- * get buffer insert processor by processor name and insert time.
-- */
-- public BufferWriteProcessor getBufferWriteProcessor(String processorName, long insertTime)
-- throws FileNodeProcessorException {
-- if (bufferWriteProcessor == null) {
-- Map<String, Action> params = new HashMap<>();
-- params.put(FileNodeConstants.BUFFERWRITE_FLUSH_ACTION, bufferwriteFlushAction);
-- //params.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
-- params
-- .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
- String baseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
- String baseDir = directories.getNextFolderForTsfile();
-- LOGGER.info("Allocate folder {} for the new bufferwrite processor.", baseDir);
-- // construct processor or restore
-- try {
-- bufferWriteProcessor = new BufferWriteProcessor(baseDir, processorName,
-- insertTime + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR
-- + System.currentTimeMillis(),
-- params, bufferwriteCloseConsumer, versionController, fileSchema);
-- } catch (BufferWriteProcessorException e) {
-- throw new FileNodeProcessorException(String
-- .format("The filenode processor %s failed to get the bufferwrite processor.",
-- processorName), e);
-- }
-- }
-- return bufferWriteProcessor;
-- }
--
-- /**
-- * get overflow processor by processor name.
-- */
-- public OverflowProcessor getOverflowProcessor(String processorName) throws ProcessorException {
-- if (overflowProcessor == null) {
-- Map<String, Action> params = new HashMap<>();
-- // construct processor or restore
-- params.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowFlushAction);
-- params
-- .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
-- overflowProcessor = new OverflowProcessor(processorName, params, fileSchema,
-- versionController);
-- } else if (overflowProcessor.isClosed()) {
-- overflowProcessor.reopen();
-- }
-- return overflowProcessor;
-- }
--
-- /**
-- * get overflow processor.
-- */
-- public OverflowProcessor getOverflowProcessor() {
-- if (overflowProcessor == null || overflowProcessor.isClosed()) {
-- LOGGER.error("The overflow processor is null when getting the overflowProcessor");
-- }
-- return overflowProcessor;
-- }
--
-- public boolean hasOverflowProcessor() {
-- return overflowProcessor != null && !overflowProcessor.isClosed();
-- }
--
-- public void setBufferwriteProcessroToClosed() {
--
-- bufferWriteProcessor = null;
-- }
--
-- public boolean hasBufferwriteProcessor() {
--
-- return bufferWriteProcessor != null;
-- }
--
-- /**
-- * set last update time.
-- */
-- public void setLastUpdateTime(String deviceId, long timestamp) {
-- if (!lastUpdateTimeMap.containsKey(deviceId) || lastUpdateTimeMap.get(deviceId) < timestamp) {
-- lastUpdateTimeMap.put(deviceId, timestamp);
-- }
-- if (timestamp == -1) {
-- lastUpdateTimeMap.remove(deviceId);
-- }
-- }
--
-- /**
-- * get last update time.
-- */
-- public long getLastUpdateTime(String deviceId) {
--
-- if (lastUpdateTimeMap.containsKey(deviceId)) {
-- return lastUpdateTimeMap.get(deviceId);
-- } else {
-- return -1;
-- }
-- }
--
-- /**
-- * get flushMetadata last update time.
-- */
-- public long getFlushLastUpdateTime(String deviceId) {
-- if (!flushLastUpdateTimeMap.containsKey(deviceId)) {
-- flushLastUpdateTimeMap.put(deviceId, 0L);
-- }
-- return flushLastUpdateTimeMap.get(deviceId);
-- }
--
-- public Map<String, Long> getLastUpdateTimeMap() {
-- return lastUpdateTimeMap;
-- }
--
-- /**
-- * For insert overflow.
-- */
-- public void changeTypeToChanged(String deviceId, long timestamp) {
-- if (!invertedIndexOfFiles.containsKey(deviceId)) {
-- LOGGER.warn(
-- WARN_NO_SUCH_OVERFLOWED_FILE
-- + "the data is [device:{},time:{}]",
-- getProcessorName(), deviceId, timestamp);
-- emptyTsFileResource.setStartTime(deviceId, 0L);
-- emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
-- emptyTsFileResource.changeTypeToChanged(isMerging);
-- } else {
-- List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
-- int index = searchIndexNodeByTimestamp(deviceId, timestamp, temp);
-- changeTypeToChanged(temp.get(index), deviceId);
-- }
-- }
--
-- private void changeTypeToChanged(TsFileResource fileNode, String deviceId) {
-- fileNode.changeTypeToChanged(isMerging);
-- if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
-- fileNode.addMergeChanged(deviceId);
-- }
-- }
--
-- /**
-- * For update overflow.
-- */
-- public void changeTypeToChanged(String deviceId, long startTime, long endTime) {
-- if (!invertedIndexOfFiles.containsKey(deviceId)) {
-- LOGGER.warn(
-- WARN_NO_SUCH_OVERFLOWED_FILE
-- + "the data is [device:{}, start time:{}, end time:{}]",
-- getProcessorName(), deviceId, startTime, endTime);
-- emptyTsFileResource.setStartTime(deviceId, 0L);
-- emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
-- emptyTsFileResource.changeTypeToChanged(isMerging);
-- } else {
-- List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
-- int left = searchIndexNodeByTimestamp(deviceId, startTime, temp);
-- int right = searchIndexNodeByTimestamp(deviceId, endTime, temp);
-- for (int i = left; i <= right; i++) {
-- changeTypeToChanged(temp.get(i), deviceId);
-- }
-- }
-- }
--
-- /**
-- * For delete overflow.
-- */
-- public void changeTypeToChangedForDelete(String deviceId, long timestamp) {
-- if (!invertedIndexOfFiles.containsKey(deviceId)) {
-- LOGGER.warn(
-- WARN_NO_SUCH_OVERFLOWED_FILE
-- + "the data is [device:{}, delete time:{}]",
-- getProcessorName(), deviceId, timestamp);
-- emptyTsFileResource.setStartTime(deviceId, 0L);
-- emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
-- emptyTsFileResource.changeTypeToChanged(isMerging);
-- } else {
-- List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
-- int index = searchIndexNodeByTimestamp(deviceId, timestamp, temp);
-- for (int i = 0; i <= index; i++) {
-- temp.get(i).changeTypeToChanged(isMerging);
-- if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
-- temp.get(i).addMergeChanged(deviceId);
-- }
-- }
-- }
-- }
--
-- /**
-- * Search the index of the interval by the timestamp.
-- *
-- * @return index of interval
-- */
-- private int searchIndexNodeByTimestamp(String deviceId, long timestamp,
-- List<TsFileResource> fileList) {
-- int index = 1;
-- while (index < fileList.size()) {
-- if (timestamp < fileList.get(index).getStartTime(deviceId)) {
-- break;
-- } else {
-- index++;
-- }
-- }
-- return index - 1;
-- }
--
-- /**
-- * add multiple pass lock.
-- */
-- public int addMultiPassCount() {
-- LOGGER.debug("Add MultiPassCount: cloneList lock newMultiPassCount.");
-- newMultiPassCount.incrementAndGet();
-- while (newMultiPassTokenSet.contains(multiPassLockToken)) {
-- multiPassLockToken++;
-- }
-- newMultiPassTokenSet.add(multiPassLockToken);
-- LOGGER.debug("Add multi token:{}, nsPath:{}.", multiPassLockToken, getProcessorName());
-- return multiPassLockToken;
-- }
--
-- /**
-- * decrease multiple pass count. TODO: use the return value or remove it.
-- */
-- public boolean decreaseMultiPassCount(int token) throws FileNodeProcessorException {
-- if (newMultiPassTokenSet.contains(token)) {
-- int newMultiPassCountValue = newMultiPassCount.decrementAndGet();
-- if (newMultiPassCountValue < 0) {
-- throw new FileNodeProcessorException(String
-- .format("Remove MultiPassCount error, newMultiPassCount:%d", newMultiPassCountValue));
-- }
-- newMultiPassTokenSet.remove(token);
-- LOGGER.debug("Remove multi token:{}, nspath:{}, new set:{}, count:{}", token,
-- getProcessorName(),
-- newMultiPassTokenSet, newMultiPassCount);
-- return true;
-- } else if (oldMultiPassTokenSet != null && oldMultiPassTokenSet.contains(token)) {
-- // remove token first, then unlock
-- oldMultiPassTokenSet.remove(token);
-- oldMultiPassCount.countDown();
-- long oldMultiPassCountValue = oldMultiPassCount.getCount();
-- if (oldMultiPassCountValue < 0) {
-- throw new FileNodeProcessorException(String
-- .format("Remove MultiPassCount error, oldMultiPassCount:%d", oldMultiPassCountValue));
-- }
-- LOGGER.debug("Remove multi token:{}, old set:{}, count:{}", token, oldMultiPassTokenSet,
-- oldMultiPassCount.getCount());
-- return true;
-- } else {
-- LOGGER.error("remove token error:{},new set:{}, old set:{}", token, newMultiPassTokenSet,
-- oldMultiPassTokenSet);
-- // should add throw exception
-- return false;
-- }
-- }
--
-- /**
-- * query data.
-- */
-- public <T extends Comparable<T>> QueryDataSource query(String deviceId, String measurementId,
-- QueryContext context) throws FileNodeProcessorException {
-- // query overflow data
-- MeasurementSchema mSchema;
-- TSDataType dataType;
--
-- //mSchema = mManager.getSchemaForOnePath(deviceId + "." + measurementId);
-- mSchema = fileSchema.getMeasurementSchema(measurementId);
-- dataType = mSchema.getType();
--
-- OverflowSeriesDataSource overflowSeriesDataSource;
-- try {
-- overflowSeriesDataSource = overflowProcessor.query(deviceId, measurementId, dataType,
-- mSchema.getProps(), context);
-- } catch (IOException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- // tsfile dataØØ
-- List<TsFileResource> bufferwriteDataInFiles = new ArrayList<>();
-- for (TsFileResource tsFileResource : newFileNodes) {
-- // add the same tsFileResource, but not the same reference
-- if (tsFileResource.isClosed()) {
-- bufferwriteDataInFiles.add(tsFileResource.backUp());
-- }
-- }
-- Pair<ReadOnlyMemChunk, List<ChunkMetaData>> bufferwritedata = new Pair<>(null, null);
-- // bufferwrite data
-- UnsealedTsFile unsealedTsFile = null;
--
-- if (!newFileNodes.isEmpty() && !newFileNodes.get(newFileNodes.size() - 1).isClosed()
-- && !newFileNodes.get(newFileNodes.size() - 1).getStartTimeMap().isEmpty()) {
-- unsealedTsFile = new UnsealedTsFile();
-- unsealedTsFile.setFilePath(newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath());
-- if (bufferWriteProcessor == null) {
-- throw new FileNodeProcessorException(String.format(
-- "The last of tsfile %s in filenode processor %s is not closed, "
-- + "but the bufferwrite processor is null.",
-- newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath(), getProcessorName()));
-- }
-- bufferwritedata = bufferWriteProcessor
-- .queryBufferWriteData(deviceId, measurementId, dataType, mSchema.getProps());
--
-- try {
-- List<Modification> pathModifications = context.getPathModifications(
-- bufferWriteProcessor.getCurrentTsFileResource().getModFile(), deviceId
-- + IoTDBConstant.PATH_SEPARATOR + measurementId
-- );
-- if (!pathModifications.isEmpty()) {
-- QueryUtils.modifyChunkMetaData(bufferwritedata.right, pathModifications);
-- }
-- } catch (IOException e) {
-- throw new FileNodeProcessorException(e);
-- }
--
-- unsealedTsFile.setTimeSeriesChunkMetaDatas(bufferwritedata.right);
-- }
-- GlobalSortedSeriesDataSource globalSortedSeriesDataSource = new GlobalSortedSeriesDataSource(
-- new Path(deviceId + "." + measurementId), bufferwriteDataInFiles, unsealedTsFile,
-- bufferwritedata.left);
-- return new QueryDataSource(globalSortedSeriesDataSource, overflowSeriesDataSource);
--
-- }
--
-- /**
-- * append one specified tsfile to this filenode processor.
-- *
-- * @param appendFile the appended tsfile information
-- * @param appendFilePath the seriesPath of appended file
-- */
-- public void appendFile(TsFileResource appendFile, String appendFilePath)
-- throws FileNodeProcessorException {
-- try {
-- if (!appendFile.getFile().getParentFile().exists()) {
-- appendFile.getFile().getParentFile().mkdirs();
-- }
-- // move file
-- File originFile = new File(appendFilePath);
-- File targetFile = appendFile.getFile();
-- if (!originFile.exists()) {
-- throw new FileNodeProcessorException(
-- String.format("The appended file %s does not exist.", appendFilePath));
-- }
-- if (targetFile.exists()) {
-- throw new FileNodeProcessorException(
-- String.format("The appended target file %s already exists.",
-- appendFile.getFile().getAbsolutePath()));
-- }
-- if (!originFile.renameTo(targetFile)) {
-- LOGGER.warn("File renaming failed when appending new file. Origin: {}, Target: {}",
-- originFile.getPath(), targetFile.getPath());
-- }
-- // append the new tsfile
-- this.newFileNodes.add(appendFile);
-- // update the lastUpdateTime
-- for (Entry<String, Long> entry : appendFile.getEndTimeMap().entrySet()) {
-- lastUpdateTimeMap.put(entry.getKey(), entry.getValue());
-- }
-- bufferwriteFlushAction.act();
-- fileNodeProcessorStore.setNewFileNodes(newFileNodes);
-- // reconstruct the inverted index of the newFileNodes
-- fileNodeFlushAction.act();
-- addAllFileIntoIndex(newFileNodes);
-- } catch (Exception e) {
-- LOGGER.error("Failed to append the tsfile {} to filenode processor {}.", appendFile,
-- getProcessorName());
-- throw new FileNodeProcessorException(e);
-- }
-- }
--
-- /**
-- * get overlap tsfiles which are conflict with the appendFile.
-- *
-- * @param appendFile the appended tsfile information
-- */
-- public List<String> getOverlapFiles(TsFileResource appendFile, String uuid)
-- throws FileNodeProcessorException {
-- List<String> overlapFiles = new ArrayList<>();
-- try {
-- for (TsFileResource tsFileResource : newFileNodes) {
-- getOverlapFiles(appendFile, tsFileResource, uuid, overlapFiles);
-- }
-- } catch (IOException e) {
-- LOGGER.error("Failed to get overlap tsfiles which conflict with the appendFile.");
-- throw new FileNodeProcessorException(e);
-- }
-- return overlapFiles;
-- }
--
-- private void getOverlapFiles(TsFileResource appendFile, TsFileResource tsFileResource,
-- String uuid, List<String> overlapFiles) throws IOException {
-- for (Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
-- if (tsFileResource.getStartTimeMap().containsKey(entry.getKey()) &&
-- tsFileResource.getEndTime(entry.getKey()) >= entry.getValue()
-- && tsFileResource.getStartTime(entry.getKey()) <= appendFile
-- .getEndTime(entry.getKey())) {
-- String relativeFilePath =
-- Constans.SYNC_SERVER + File.separatorChar + uuid + File.separatorChar
-- + Constans.BACK_UP_DIRECTORY_NAME
-- + File.separatorChar + tsFileResource.getRelativePath();
-- File newFile = new File(
- DirectoryManager.getInstance().getTsFileFolder(tsFileResource.getBaseDirIndex()),
- Directories.getInstance().getTsFileFolder(tsFileResource.getBaseDirIndex()),
-- relativeFilePath);
-- if (!newFile.getParentFile().exists()) {
-- newFile.getParentFile().mkdirs();
-- }
-- java.nio.file.Path link = FileSystems.getDefault().getPath(newFile.getPath());
-- java.nio.file.Path target = FileSystems.getDefault()
-- .getPath(tsFileResource.getFile().getAbsolutePath());
-- Files.createLink(link, target);
-- overlapFiles.add(newFile.getPath());
-- break;
-- }
-- }
-- }
--
-- /**
-- * add time series.
-- */
-- public void addTimeSeries(String measurementId, TSDataType dataType, TSEncoding encoding,
-- CompressionType compressor, Map<String, String> props) {
-- fileSchema.registerMeasurement(new MeasurementSchema(measurementId, dataType, encoding,
-- compressor, props));
-- }
--
-- /**
-- * submit the merge task to the <code>MergePool</code>.
-- *
-- * @return null -can't submit the merge task, because this filenode is not overflowed or it is
-- * merging now. Future - submit the merge task successfully.
-- */
-- Future submitToMerge() {
-- ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
-- if (lastMergeTime > 0) {
-- long thisMergeTime = System.currentTimeMillis();
-- long mergeTimeInterval = thisMergeTime - lastMergeTime;
-- ZonedDateTime lastDateTime = ofInstant(Instant.ofEpochMilli(lastMergeTime),
-- zoneId);
-- ZonedDateTime thisDateTime = ofInstant(Instant.ofEpochMilli(thisMergeTime),
-- zoneId);
-- LOGGER.info(
-- "The filenode {} last merge time is {}, this merge time is {}, "
-- + "merge time interval is {}s",
-- getProcessorName(), lastDateTime, thisDateTime, mergeTimeInterval / 1000);
-- }
-- lastMergeTime = System.currentTimeMillis();
--
-- if (overflowProcessor != null && !overflowProcessor.isClosed()) {
-- if (overflowProcessor.getFileSize() < IoTDBDescriptor.getInstance()
-- .getConfig().getOverflowFileSizeThreshold()) {
-- if (LOGGER.isInfoEnabled()) {
-- LOGGER.info(
-- "Skip this merge taks submission, because the size{} of overflow processor {} "
-- + "does not reaches the threshold {}.",
-- MemUtils.bytesCntToStr(overflowProcessor.getFileSize()), getProcessorName(),
-- MemUtils.bytesCntToStr(
-- IoTDBDescriptor.getInstance().getConfig().getOverflowFileSizeThreshold()));
-- }
-- return null;
-- }
-- } else {
-- LOGGER.info(
-- "Skip this merge taks submission, because the filenode processor {} "
-- + "has no overflow processor.",
-- getProcessorName());
-- return null;
-- }
-- if (isOverflowed && isMerging == FileNodeProcessorStatus.NONE) {
-- Runnable mergeThread;
-- mergeThread = new MergeRunnale();
-- LOGGER.info("Submit the merge task, the merge filenode is {}", getProcessorName());
-- return MergePoolManager.getInstance().submit(mergeThread);
-- } else {
-- if (!isOverflowed) {
-- LOGGER.info(
-- "Skip this merge taks submission, because the filenode processor {} is not " +
-- "overflowed.",
-- getProcessorName());
-- } else {
-- LOGGER.warn(
-- "Skip this merge task submission, because last merge task is not over yet, "
-- + "the merge filenode processor is {}",
-- getProcessorName());
-- }
-- }
-- return null;
-- }
--
-- /**
-- * Prepare for merge, setCloseMark the bufferwrite and overflow.
-- */
-- private void prepareForMerge() {
-- try {
-- LOGGER.info("The filenode processor {} prepares for merge, closes the bufferwrite processor",
-- getProcessorName());
-- Future<Boolean> future = closeBufferWrite();
-- future.get();
-- LOGGER.info("The bufferwrite processor {} is closed successfully",
-- getProcessorName());
-- // try to get overflow processor
-- getOverflowProcessor(getProcessorName());
-- // must setCloseMark the overflow processor
-- while (!getOverflowProcessor().canBeClosed()) {
-- waitForClosing();
-- }
-- LOGGER.info("The filenode processor {} prepares for merge, closes the overflow processor",
-- getProcessorName());
-- getOverflowProcessor().close();
-- } catch (ProcessorException | InterruptedException | ExecutionException e) {
-- LOGGER.error("The filenode processor {} prepares for merge error.", getProcessorName());
-- writeUnlock();
-- throw new ErrorDebugException(e);
-- }
-- }
--
-- private void waitForClosing() {
-- try {
-- LOGGER.info(
-- "The filenode processor {} prepares for merge, the overflow {} can't be closed, "
-- + "wait 100ms,",
-- getProcessorName(), getProcessorName());
-- TimeUnit.MICROSECONDS.sleep(100);
-- } catch (InterruptedException e) {
-- Thread.currentThread().interrupt();
-- }
-- }
--
-- /**
-- * Merge this storage group, merge the tsfile data with overflow data.
-- */
-- public void merge() throws FileNodeProcessorException {
-- // setCloseMark bufferwrite and overflow, prepare for merge
-- LOGGER.info("The filenode processor {} begins to merge.", getProcessorName());
-- writeLock();
-- prepareForMerge();
-- // change status from overflowed to no overflowed
-- isOverflowed = false;
-- // change status from work to merge
-- isMerging = FileNodeProcessorStatus.MERGING_WRITE;
-- // check the empty file
-- Map<String, Long> startTimeMap = emptyTsFileResource.getStartTimeMap();
-- mergeCheckEmptyFile(startTimeMap);
--
-- for (TsFileResource tsFileResource : newFileNodes) {
-- if (tsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
-- tsFileResource.setOverflowChangeType(OverflowChangeType.CHANGED);
-- }
-- }
--
-- addAllFileIntoIndex(newFileNodes);
-- synchronized (fileNodeProcessorStore) {
-- fileNodeProcessorStore.setOverflowed(isOverflowed);
-- fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
-- fileNodeProcessorStore.setNewFileNodes(newFileNodes);
-- fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
-- // flushMetadata this filenode information
-- try {
-- writeStoreToDisk(fileNodeProcessorStore);
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error("The filenode processor {} writes restore information error when merging.",
-- getProcessorName(), e);
-- writeUnlock();
-- throw new FileNodeProcessorException(e);
-- }
-- }
-- // add numOfMergeFile to control the number of the merge file
-- List<TsFileResource> backupIntervalFiles;
--
-- backupIntervalFiles = switchFileNodeToMerge();
-- //
-- // clear empty file
-- //
-- boolean needEmtpy = false;
-- if (emptyTsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
-- needEmtpy = true;
-- }
-- emptyTsFileResource.clear();
-- // attention
-- try {
-- if (overflowProcessor.isClosed()) {
-- overflowProcessor.reopen();
-- }
-- overflowProcessor.switchWorkToMerge();
-- } catch (ProcessorException | IOException e) {
-- LOGGER.error("The filenode processor {} can't switch overflow processor from work to merge.",
-- getProcessorName(), e);
-- writeUnlock();
-- throw new FileNodeProcessorException(e);
-- }
-- LOGGER.info("The filenode processor {} switches from {} to {}.", getProcessorName(),
-- FileNodeProcessorStatus.NONE, FileNodeProcessorStatus.MERGING_WRITE);
-- writeUnlock();
--
-- // query tsfile data and overflow data, and merge them
-- int numOfMergeFiles = 0;
-- int allNeedMergeFiles = backupIntervalFiles.size();
-- for (TsFileResource backupIntervalFile : backupIntervalFiles) {
-- numOfMergeFiles++;
-- if (backupIntervalFile.getOverflowChangeType() == OverflowChangeType.CHANGED) {
-- // query data and merge
-- String filePathBeforeMerge = backupIntervalFile.getRelativePath();
-- try {
-- LOGGER.info(
-- "The filenode processor {} begins merging the {}/{} tsfile[{}] with "
-- + "overflow file, the process is {}%",
-- getProcessorName(), numOfMergeFiles, allNeedMergeFiles, filePathBeforeMerge,
-- (int) (((numOfMergeFiles - 1) / (float) allNeedMergeFiles) * 100));
-- long startTime = System.currentTimeMillis();
-- String newFile = queryAndWriteDataForMerge(backupIntervalFile);
-- long endTime = System.currentTimeMillis();
-- long timeConsume = endTime - startTime;
-- ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
-- LOGGER.info(
-- "The fileNode processor {} has merged the {}/{} tsfile[{}->{}] over, "
-- + "start time of merge is {}, end time of merge is {}, "
-- + "time consumption is {}ms,"
-- + " the process is {}%",
-- getProcessorName(), numOfMergeFiles, allNeedMergeFiles, filePathBeforeMerge,
-- newFile, ofInstant(Instant.ofEpochMilli(startTime),
-- zoneId), ofInstant(Instant.ofEpochMilli(endTime), zoneId), timeConsume,
-- numOfMergeFiles / (float) allNeedMergeFiles * 100);
-- } catch (IOException | PathErrorException e) {
-- LOGGER.error("Merge: query and insert data error.", e);
-- throw new FileNodeProcessorException(e);
-- }
-- } else if (backupIntervalFile.getOverflowChangeType() == OverflowChangeType.MERGING_CHANGE) {
-- LOGGER.error("The overflowChangeType of backupIntervalFile must not be {}",
-- OverflowChangeType.MERGING_CHANGE);
-- // handle this error, throw one runtime exception
-- throw new FileNodeProcessorException(
-- "The overflowChangeType of backupIntervalFile must not be "
-- + OverflowChangeType.MERGING_CHANGE);
-- } else {
-- LOGGER.debug(
-- "The filenode processor {} is merging, the interval file {} doesn't "
-- + "need to be merged.",
-- getProcessorName(), backupIntervalFile.getRelativePath());
-- }
-- }
--
-- // change status from merge to wait
-- switchMergeToWaiting(backupIntervalFiles, needEmtpy);
--
-- // change status from wait to work
-- switchWaitingToWorking();
-- }
--
-- private void mergeCheckEmptyFile(Map<String, Long> startTimeMap) {
-- if (emptyTsFileResource.getOverflowChangeType() == OverflowChangeType.NO_CHANGE) {
-- return;
-- }
-- Iterator<Entry<String, Long>> iterator = emptyTsFileResource.getEndTimeMap().entrySet()
-- .iterator();
-- while (iterator.hasNext()) {
-- Entry<String, Long> entry = iterator.next();
-- String deviceId = entry.getKey();
-- if (invertedIndexOfFiles.containsKey(deviceId)) {
-- invertedIndexOfFiles.get(deviceId).get(0).setOverflowChangeType(OverflowChangeType.CHANGED);
-- startTimeMap.remove(deviceId);
-- iterator.remove();
-- }
-- }
-- if (emptyTsFileResource.checkEmpty()) {
-- emptyTsFileResource.clear();
-- } else {
-- if (!newFileNodes.isEmpty()) {
-- TsFileResource first = newFileNodes.get(0);
-- for (String deviceId : emptyTsFileResource.getStartTimeMap().keySet()) {
-- first.setStartTime(deviceId, emptyTsFileResource.getStartTime(deviceId));
-- first.setEndTime(deviceId, emptyTsFileResource.getEndTime(deviceId));
-- first.setOverflowChangeType(OverflowChangeType.CHANGED);
-- }
-- emptyTsFileResource.clear();
-- } else {
-- emptyTsFileResource.setOverflowChangeType(OverflowChangeType.CHANGED);
-- }
-- }
-- }
--
-- private List<TsFileResource> switchFileNodeToMerge() throws FileNodeProcessorException {
-- List<TsFileResource> result = new ArrayList<>();
-- if (emptyTsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
-- // add empty
-- result.add(emptyTsFileResource.backUp());
-- if (!newFileNodes.isEmpty()) {
-- throw new FileNodeProcessorException(
-- String.format("The status of empty file is %s, but the new file list is not empty",
-- emptyTsFileResource.getOverflowChangeType()));
-- }
-- return result;
-- }
-- if (newFileNodes.isEmpty()) {
-- LOGGER.error("No file was changed when merging, the filenode is {}", getProcessorName());
-- throw new FileNodeProcessorException(
-- "No file was changed when merging, the filenode is " + getProcessorName());
-- }
-- for (TsFileResource tsFileResource : newFileNodes) {
-- updateFileNode(tsFileResource, result);
-- }
-- return result;
-- }
--
-- private void updateFileNode(TsFileResource tsFileResource, List<TsFileResource> result) {
-- if (tsFileResource.getOverflowChangeType() == OverflowChangeType.NO_CHANGE) {
-- result.add(tsFileResource.backUp());
-- } else {
-- Map<String, Long> startTimeMap = new HashMap<>();
-- Map<String, Long> endTimeMap = new HashMap<>();
-- for (String deviceId : tsFileResource.getEndTimeMap().keySet()) {
-- List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
-- int index = temp.indexOf(tsFileResource);
-- int size = temp.size();
-- // start time
-- if (index == 0) {
-- startTimeMap.put(deviceId, 0L);
-- } else {
-- startTimeMap.put(deviceId, tsFileResource.getStartTime(deviceId));
-- }
-- // end time
-- if (index < size - 1) {
-- endTimeMap.put(deviceId, temp.get(index + 1).getStartTime(deviceId) - 1);
-- } else {
-- endTimeMap.put(deviceId, tsFileResource.getEndTime(deviceId));
-- }
-- }
-- TsFileResource node = new TsFileResource(startTimeMap, endTimeMap,
-- tsFileResource.getOverflowChangeType(), tsFileResource.getFile());
-- result.add(node);
-- }
-- }
--
-- private void switchMergeToWaiting(List<TsFileResource> backupIntervalFiles, boolean needEmpty)
-- throws FileNodeProcessorException {
-- LOGGER.info("The status of filenode processor {} switches from {} to {}.", getProcessorName(),
-- FileNodeProcessorStatus.MERGING_WRITE, FileNodeProcessorStatus.WAITING);
-- writeLock();
-- try {
-- oldMultiPassTokenSet = newMultiPassTokenSet;
-- oldMultiPassCount = new CountDownLatch(newMultiPassCount.get());
-- newMultiPassTokenSet = new HashSet<>();
-- newMultiPassCount = new AtomicInteger(0);
-- List<TsFileResource> result = new ArrayList<>();
-- int beginIndex = 0;
-- if (needEmpty) {
-- TsFileResource empty = backupIntervalFiles.get(0);
-- if (!empty.checkEmpty()) {
-- updateEmpty(empty, result);
-- beginIndex++;
-- }
-- }
-- // reconstruct the file index
-- addAllFileIntoIndex(backupIntervalFiles);
-- // check the merge changed file
-- for (int i = beginIndex; i < backupIntervalFiles.size(); i++) {
-- TsFileResource newFile = newFileNodes.get(i - beginIndex);
-- TsFileResource temp = backupIntervalFiles.get(i);
-- if (newFile.getOverflowChangeType() == OverflowChangeType.MERGING_CHANGE) {
-- updateMergeChanged(newFile, temp);
-- }
-- if (!temp.checkEmpty()) {
-- result.add(temp);
-- }
-- }
-- // add new file when merge
-- for (int i = backupIntervalFiles.size() - beginIndex; i < newFileNodes.size(); i++) {
-- TsFileResource fileNode = newFileNodes.get(i);
-- if (fileNode.isClosed()) {
-- result.add(fileNode.backUp());
-- } else {
-- result.add(fileNode);
-- }
-- }
--
-- isMerging = FileNodeProcessorStatus.WAITING;
-- newFileNodes = result;
-- // reconstruct the index
-- addAllFileIntoIndex(newFileNodes);
-- // clear merge changed
-- for (TsFileResource fileNode : newFileNodes) {
-- fileNode.clearMergeChanged();
-- }
--
-- synchronized (fileNodeProcessorStore) {
-- fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
-- fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
-- fileNodeProcessorStore.setNewFileNodes(newFileNodes);
-- try {
-- writeStoreToDisk(fileNodeProcessorStore);
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error(
-- "Merge: failed to insert filenode information to revocery file, the filenode is " +
-- "{}.",
-- getProcessorName(), e);
-- throw new FileNodeProcessorException(
-- "Merge: insert filenode information to revocery file failed, the filenode is "
-- + getProcessorName());
-- }
-- }
-- } finally {
-- writeUnlock();
-- }
-- }
--
-- private void updateEmpty(TsFileResource empty, List<TsFileResource> result) {
-- for (String deviceId : empty.getStartTimeMap().keySet()) {
-- if (invertedIndexOfFiles.containsKey(deviceId)) {
-- TsFileResource temp = invertedIndexOfFiles.get(deviceId).get(0);
-- if (temp.getMergeChanged().contains(deviceId)) {
-- empty.setOverflowChangeType(OverflowChangeType.CHANGED);
-- break;
-- }
-- }
-- }
-- empty.clearMergeChanged();
-- result.add(empty.backUp());
-- }
--
-- private void updateMergeChanged(TsFileResource newFile, TsFileResource temp) {
-- for (String deviceId : newFile.getMergeChanged()) {
-- if (temp.getStartTimeMap().containsKey(deviceId)) {
-- temp.setOverflowChangeType(OverflowChangeType.CHANGED);
-- } else {
-- changeTypeToChanged(deviceId, newFile.getStartTime(deviceId),
-- newFile.getEndTime(deviceId));
-- }
-- }
-- }
--
--
-- private void switchWaitingToWorking()
-- throws FileNodeProcessorException {
--
-- LOGGER.info("The status of filenode processor {} switches from {} to {}.", getProcessorName(),
-- FileNodeProcessorStatus.WAITING, FileNodeProcessorStatus.NONE);
--
-- if (oldMultiPassCount != null) {
-- LOGGER.info("The old Multiple Pass Token set is {}, the old Multiple Pass Count is {}",
-- oldMultiPassTokenSet,
-- oldMultiPassCount);
-- try {
-- oldMultiPassCount.await();
-- } catch (InterruptedException e) {
-- LOGGER.info(
-- "The filenode processor {} encountered an error when it waits for all old queries over.",
-- getProcessorName());
-- throw new FileNodeProcessorException(e);
-- }
-- }
--
-- try {
-- writeLock();
-- try {
-- // delete the all files which are in the newFileNodes
-- // notice: the last restore file of the interval file
--
- List<String> bufferwriteDirPathList = DIRECTORY_MANAGER.getAllTsFileFolders();
- List<String> bufferwriteDirPathList = directories.getAllTsFileFolders();
-- List<File> bufferwriteDirList = new ArrayList<>();
-- collectBufferWriteDirs(bufferwriteDirPathList, bufferwriteDirList);
--
-- Set<String> bufferFiles = new HashSet<>();
-- collectBufferWriteFiles(bufferFiles);
--
-- // add the restore file, if the last file is not closed
-- if (!newFileNodes.isEmpty() && !newFileNodes.get(newFileNodes.size() - 1).isClosed()) {
-- String bufferFileRestorePath =
-- newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath() + RESTORE_FILE_SUFFIX;
-- bufferFiles.add(bufferFileRestorePath);
-- }
--
-- deleteBufferWriteFiles(bufferwriteDirList, bufferFiles);
--
-- // merge switch
-- changeFileNodes();
--
-- // overflow switch from merge to work
-- overflowProcessor.switchMergeToWork();
--
-- // insert status to file
-- isMerging = FileNodeProcessorStatus.NONE;
-- synchronized (fileNodeProcessorStore) {
-- fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
-- fileNodeProcessorStore.setNewFileNodes(newFileNodes);
-- fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
-- writeStoreToDisk(fileNodeProcessorStore);
-- }
-- } catch (IOException e) {
-- LOGGER.info(
-- "The filenode processor {} encountered an error when its "
-- + "status switched from {} to {}.",
-- getProcessorName(), FileNodeProcessorStatus.NONE,
-- FileNodeProcessorStatus.MERGING_WRITE);
-- throw new FileNodeProcessorException(e);
-- } finally {
-- writeUnlock();
-- }
-- } finally {
-- oldMultiPassTokenSet = null;
-- oldMultiPassCount = null;
-- }
--
-- }
--
-- private void collectBufferWriteDirs(List<String> bufferwriteDirPathList,
-- List<File> bufferwriteDirList) {
-- for (String bufferwriteDirPath : bufferwriteDirPathList) {
-- if (bufferwriteDirPath.length() > 0
-- && bufferwriteDirPath.charAt(bufferwriteDirPath.length() - 1)
-- != File.separatorChar) {
-- bufferwriteDirPath = bufferwriteDirPath + File.separatorChar;
-- }
-- bufferwriteDirPath = bufferwriteDirPath + getProcessorName();
-- File bufferwriteDir = new File(bufferwriteDirPath);
-- bufferwriteDirList.add(bufferwriteDir);
-- if (!bufferwriteDir.exists()) {
-- bufferwriteDir.mkdirs();
-- }
-- }
-- }
--
-- private void collectBufferWriteFiles(Set<String> bufferFiles) {
-- for (TsFileResource bufferFileNode : newFileNodes) {
-- String bufferFilePath = bufferFileNode.getFile().getAbsolutePath();
-- if (bufferFilePath != null) {
-- bufferFiles.add(bufferFilePath);
-- }
-- }
-- }
--
-- private void deleteBufferWriteFiles(List<File> bufferwriteDirList, Set<String> bufferFiles)
-- throws IOException {
-- for (File bufferwriteDir : bufferwriteDirList) {
-- File[] files = bufferwriteDir.listFiles();
-- if (files == null) {
-- continue;
-- }
-- for (File file : files) {
-- if (!bufferFiles.contains(file.getPath())) {
-- FileReaderManager.getInstance().closeFileAndRemoveReader(file.getPath());
-- if (!file.delete()) {
-- LOGGER.warn("Cannot delete BufferWrite file {}", file.getPath());
-- }
-- }
-- }
-- }
-- }
--
-- private void changeFileNodes() {
-- for (TsFileResource fileNode : newFileNodes) {
-- if (fileNode.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
-- fileNode.setOverflowChangeType(OverflowChangeType.CHANGED);
-- }
-- }
-- }
--
-- private String queryAndWriteDataForMerge(TsFileResource backupIntervalFile)
-- throws IOException, FileNodeProcessorException, PathErrorException {
-- Map<String, Long> startTimeMap = new HashMap<>();
-- Map<String, Long> endTimeMap = new HashMap<>();
--
-- mergeFileWriter = null;
-- mergeOutputPath = null;
-- mergeBaseDir = null;
-- mergeFileName = null;
-- // modifications are blocked before mergeModification is created to avoid
-- // losing some modification.
-- mergeDeleteLock.lock();
-- QueryContext context = new QueryContext();
-- try {
-- FileReaderManager.getInstance().increaseFileReaderReference(backupIntervalFile.getFilePath(),
-- true);
-- for (String deviceId : backupIntervalFile.getStartTimeMap().keySet()) {
-- // query one deviceId
-- List<Path> pathList = new ArrayList<>();
-- mergeIsChunkGroupHasData = false;
-- mergeStartPos = -1;
-- ChunkGroupFooter footer;
-- int numOfChunk = 0;
-- try {
-- List<String> pathStrings = mManager.getLeafNodePathInNextLevel(deviceId);
-- for (String string : pathStrings) {
-- pathList.add(new Path(string));
-- }
-- } catch (PathErrorException e) {
-- LOGGER.error("Can't get all the paths from MManager, the deviceId is {}", deviceId);
-- throw new FileNodeProcessorException(e);
-- }
-- if (pathList.isEmpty()) {
-- continue;
-- }
-- for (Path path : pathList) {
-- // query one measurement in the special deviceId
-- String measurementId = path.getMeasurement();
-- TSDataType dataType = mManager.getSeriesType(path.getFullPath());
-- OverflowSeriesDataSource overflowSeriesDataSource = overflowProcessor.queryMerge(deviceId,
-- measurementId, dataType, true, context);
-- Filter timeFilter = FilterFactory
-- .and(TimeFilter.gtEq(backupIntervalFile.getStartTime(deviceId)),
-- TimeFilter.ltEq(backupIntervalFile.getEndTime(deviceId)));
-- SingleSeriesExpression seriesFilter = new SingleSeriesExpression(path, timeFilter);
--
-- for (OverflowInsertFile overflowInsertFile : overflowSeriesDataSource
-- .getOverflowInsertFileList()) {
-- FileReaderManager.getInstance()
-- .increaseFileReaderReference(overflowInsertFile.getFilePath(),
-- false);
-- }
--
-- IReader seriesReader = SeriesReaderFactory.getInstance()
-- .createSeriesReaderForMerge(backupIntervalFile,
-- overflowSeriesDataSource, seriesFilter, context);
-- numOfChunk += queryAndWriteSeries(seriesReader, path, seriesFilter, dataType,
-- startTimeMap, endTimeMap, overflowSeriesDataSource);
-- }
-- if (mergeIsChunkGroupHasData) {
-- // end the new rowGroupMetadata
-- mergeFileWriter.endChunkGroup(0);
-- }
-- }
-- } finally {
-- FileReaderManager.getInstance().decreaseFileReaderReference(backupIntervalFile.getFilePath(),
-- true);
--
-- if (mergeDeleteLock.isLocked()) {
-- mergeDeleteLock.unlock();
-- }
-- }
--
-- if (mergeFileWriter != null) {
-- mergeFileWriter.endFile(fileSchema);
-- }
-- backupIntervalFile.setFile(new File(mergeBaseDir + File.separator + mergeFileName));
-- backupIntervalFile.setOverflowChangeType(OverflowChangeType.NO_CHANGE);
-- backupIntervalFile.setStartTimeMap(startTimeMap);
-- backupIntervalFile.setEndTimeMap(endTimeMap);
-- backupIntervalFile.setModFile(mergingModification);
-- mergingModification = null;
-- return mergeFileName;
-- }
--
-- private int queryAndWriteSeries(IReader seriesReader, Path path,
-- SingleSeriesExpression seriesFilter, TSDataType dataType,
-- Map<String, Long> startTimeMap, Map<String, Long> endTimeMap,
-- OverflowSeriesDataSource overflowSeriesDataSource)
-- throws IOException {
-- int numOfChunk = 0;
-- try {
-- if (!seriesReader.hasNext()) {
-- LOGGER.debug(
-- "The time-series {} has no data with the filter {} in the filenode processor {}",
-- path, seriesFilter, getProcessorName());
-- } else {
-- numOfChunk++;
-- TimeValuePair timeValuePair = seriesReader.next();
-- if (mergeFileWriter == null) {
- mergeBaseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
- mergeBaseDir = directories.getNextFolderForTsfile();
-- mergeFileName = timeValuePair.getTimestamp()
-- + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR + System.currentTimeMillis();
-- mergeOutputPath = constructOutputFilePath(mergeBaseDir, getProcessorName(),
-- mergeFileName);
-- mergeFileName = getProcessorName() + File.separatorChar + mergeFileName;
-- mergeFileWriter = new TsFileIOWriter(new File(mergeOutputPath));
-- mergingModification = new ModificationFile(mergeOutputPath
-- + ModificationFile.FILE_SUFFIX);
-- mergeDeleteLock.unlock();
-- }
-- if (!mergeIsChunkGroupHasData) {
-- // start a new rowGroupMetadata
-- mergeIsChunkGroupHasData = true;
-- // the datasize and numOfChunk is fake
-- // the accurate datasize and numOfChunk will get after insert all this device data.
-- mergeFileWriter.startFlushChunkGroup(path.getDevice());// TODO please check me.
-- mergeStartPos = mergeFileWriter.getPos();
-- }
-- // init the serieswWriteImpl
-- MeasurementSchema measurementSchema = fileSchema
-- .getMeasurementSchema(path.getMeasurement());
-- ChunkBuffer pageWriter = new ChunkBuffer(measurementSchema);
-- int pageSizeThreshold = TSFileConfig.pageSizeInByte;
-- ChunkWriterImpl seriesWriterImpl = new ChunkWriterImpl(measurementSchema, pageWriter,
-- pageSizeThreshold);
-- // insert the series data
-- writeOneSeries(path.getDevice(), seriesWriterImpl, dataType,
-- seriesReader,
-- startTimeMap, endTimeMap, timeValuePair);
-- // flushMetadata the series data
-- seriesWriterImpl.writeToFileWriter(mergeFileWriter);
-- }
-- } finally {
-- for (OverflowInsertFile overflowInsertFile : overflowSeriesDataSource
-- .getOverflowInsertFileList()) {
-- FileReaderManager.getInstance()
-- .decreaseFileReaderReference(overflowInsertFile.getFilePath(),
-- false);
-- }
-- }
-- return numOfChunk;
-- }
--
--
-- private void writeOneSeries(String deviceId, ChunkWriterImpl seriesWriterImpl,
-- TSDataType dataType, IReader seriesReader, Map<String, Long> startTimeMap,
-- Map<String, Long> endTimeMap, TimeValuePair firstTVPair) throws IOException {
-- long startTime;
-- long endTime;
-- TimeValuePair localTV = firstTVPair;
-- writeTVPair(seriesWriterImpl, dataType, localTV);
-- startTime = endTime = localTV.getTimestamp();
-- if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId) > startTime) {
-- startTimeMap.put(deviceId, startTime);
-- }
-- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) < endTime) {
-- endTimeMap.put(deviceId, endTime);
-- }
-- while (seriesReader.hasNext()) {
-- localTV = seriesReader.next();
-- endTime = localTV.getTimestamp();
-- writeTVPair(seriesWriterImpl, dataType, localTV);
-- }
-- if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) < endTime) {
-- endTimeMap.put(deviceId, endTime);
-- }
-- }
--
-- private void writeTVPair(ChunkWriterImpl seriesWriterImpl, TSDataType dataType,
-- TimeValuePair timeValuePair) throws IOException {
-- switch (dataType) {
-- case BOOLEAN:
-- seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getBoolean());
-- break;
-- case INT32:
-- seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getInt());
-- break;
-- case INT64:
-- seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getLong());
-- break;
-- case FLOAT:
-- seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getFloat());
-- break;
-- case DOUBLE:
-- seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getDouble());
-- break;
-- case TEXT:
-- seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getBinary());
-- break;
-- default:
-- LOGGER.error("Not support data type: {}", dataType);
-- break;
-- }
-- }
--
--
-- private String constructOutputFilePath(String baseDir, String processorName, String fileName) {
--
-- String localBaseDir = baseDir;
-- if (localBaseDir.charAt(localBaseDir.length() - 1) != File.separatorChar) {
-- localBaseDir = localBaseDir + File.separatorChar + processorName;
-- }
-- File dataDir = new File(localBaseDir);
-- if (!dataDir.exists()) {
-- LOGGER.warn("The bufferwrite processor data dir doesn't exists, create new directory {}",
-- localBaseDir);
-- dataDir.mkdirs();
-- }
-- File outputFile = new File(dataDir, fileName);
-- return outputFile.getPath();
-- }
--
-- private FileSchema constructFileSchema(String processorName) throws WriteProcessException {
--
-- List<MeasurementSchema> columnSchemaList;
-- columnSchemaList = mManager.getSchemaForFileName(processorName);
--
-- FileSchema schema = new FileSchema();
-- for (MeasurementSchema measurementSchema : columnSchemaList) {
-- schema.registerMeasurement(measurementSchema);
-- }
-- return schema;
--
-- }
--
-- @Override
-- public boolean canBeClosed() {
-- if (isMerging != FileNodeProcessorStatus.NONE) {
-- LOGGER.info("The filenode {} can't be closed, because the filenode status is {}",
-- getProcessorName(),
-- isMerging);
-- return false;
-- }
-- if (newMultiPassCount.get() != 0) {
-- LOGGER.warn("The filenode {} can't be closed, because newMultiPassCount is {}. The newMultiPassTokenSet is {}",
-- getProcessorName(), newMultiPassCount, newMultiPassTokenSet);
-- return false;
-- }
--
-- if (oldMultiPassCount == null) {
-- return true;
-- }
-- if (oldMultiPassCount.getCount() == 0) {
-- return true;
-- } else {
-- LOGGER.info("The filenode {} can't be closed, because oldMultiPassCount is {}",
-- getProcessorName(), oldMultiPassCount.getCount());
-- return false;
-- }
-- }
--
-- @Override
-- public FileNodeFlushFuture flush() throws IOException {
-- Future<Boolean> bufferWriteFlushFuture = null;
-- Future<Boolean> overflowFlushFuture = null;
-- if (bufferWriteProcessor != null) {
-- bufferWriteFlushFuture = bufferWriteProcessor.flush();
-- }
-- if (overflowProcessor != null && !overflowProcessor.isClosed()) {
-- overflowFlushFuture = overflowProcessor.flush();
-- }
-- return new FileNodeFlushFuture(bufferWriteFlushFuture, overflowFlushFuture);
-- }
--
-- /**
-- * Close the bufferwrite processor.
-- */
-- public Future<Boolean> closeBufferWrite() throws FileNodeProcessorException {
-- if (bufferWriteProcessor == null) {
-- return new ImmediateFuture<>(true);
-- }
-- try {
-- while (!bufferWriteProcessor.canBeClosed()) {
-- waitForBufferWriteClose();
-- }
-- bufferWriteProcessor.close();
-- Future<Boolean> result = bufferWriteProcessor.getCloseFuture();
-- closingBufferWriteProcessor.add(bufferWriteProcessor);
-- bufferWriteProcessor = null;
-- return result;
-- } catch (BufferWriteProcessorException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- }
--
--
--
-- private void waitForBufferWriteClose() {
-- try {
-- LOGGER.info("The bufferwrite {} can't be closed, wait 100ms",
-- bufferWriteProcessor.getProcessorName());
-- TimeUnit.MICROSECONDS.sleep(100);
-- } catch (InterruptedException e) {
-- LOGGER.error("Unexpected interruption", e);
-- Thread.currentThread().interrupt();
-- }
-- }
--
-- /**
-- * Close the overflow processor.
-- */
-- public void closeOverflow() throws FileNodeProcessorException {
-- if (overflowProcessor == null || overflowProcessor.isClosed()) {
-- return;
-- }
-- try {
-- while (!overflowProcessor.canBeClosed()) {
-- waitForOverflowClose();
-- }
-- overflowProcessor.close();
-- } catch (OverflowProcessorException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- }
--
-- private void waitForOverflowClose() {
-- try {
-- LOGGER.info("The overflow {} can't be closed, wait 100ms",
-- overflowProcessor.getProcessorName());
-- TimeUnit.MICROSECONDS.sleep(100);
-- } catch (InterruptedException e) {
-- LOGGER.error("Unexpected interruption", e);
-- Thread.currentThread().interrupt();
-- }
-- }
--
-- @Override
-- public void close() throws FileNodeProcessorException {
-- LOGGER.info("Will setCloseMark FileNode Processor {}.", getProcessorName());
-- Future<Boolean> result = closeBufferWrite();
-- try {
-- result.get();
-- } catch (InterruptedException | ExecutionException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- closeOverflow();
-- for (TsFileResource fileNode : newFileNodes) {
-- if (fileNode.getModFile() != null) {
-- try {
-- fileNode.getModFile().close();
-- } catch (IOException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- }
-- }
-- }
--
-- /**
-- * deregister the filenode processor.
-- */
-- public void delete() throws ProcessorException {
-- if (TsFileDBConf.isEnableStatMonitor()) {
-- // remove the monitor
-- LOGGER.info("Deregister the filenode processor: {} from monitor.", getProcessorName());
-- StatMonitor.getInstance().deregisterStatistics(statStorageDeltaName);
-- }
-- closeBufferWrite();
-- closeOverflow();
-- for (TsFileResource fileNode : newFileNodes) {
-- if (fileNode.getModFile() != null) {
-- try {
-- fileNode.getModFile().close();
-- } catch (IOException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- }
-- }
-- }
--
-- @Override
-- public long memoryUsage() {
-- long memSize = 0;
-- if (bufferWriteProcessor != null) {
-- memSize += bufferWriteProcessor.memoryUsage();
-- }
-- if (overflowProcessor != null) {
-- memSize += overflowProcessor.memoryUsage();
-- }
-- return memSize;
-- }
--
-- private void writeStoreToDisk(FileNodeProcessorStore fileNodeProcessorStore)
-- throws FileNodeProcessorException {
--
-- synchronized (fileNodeRestoreLock) {
-- try (FileOutputStream fileOutputStream = new FileOutputStream(fileNodeRestoreFilePath)) {
-- fileNodeProcessorStore.serialize(fileOutputStream);
-- LOGGER.debug("The filenode processor {} writes restore information to the restore file",
-- getProcessorName());
-- } catch (IOException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- }
-- }
--
-- private FileNodeProcessorStore readStoreFromDisk() throws FileNodeProcessorException {
--
-- synchronized (fileNodeRestoreLock) {
-- File restoreFile = new File(fileNodeRestoreFilePath);
-- if (!restoreFile.exists() || restoreFile.length() == 0) {
-- try {
-- return new FileNodeProcessorStore(false, new HashMap<>(),
-- new TsFileResource(null, false),
-- new ArrayList<>(), FileNodeProcessorStatus.NONE, 0);
-- } catch (IOException e) {
-- throw new FileNodeProcessorException(e);
-- }
-- }
-- try (FileInputStream inputStream = new FileInputStream(fileNodeRestoreFilePath)) {
-- return FileNodeProcessorStore.deSerialize(inputStream);
-- } catch (IOException e) {
-- LOGGER
-- .error("Failed to deserialize the FileNodeRestoreFile {}, {}", fileNodeRestoreFilePath,
-- e);
-- throw new FileNodeProcessorException(e);
-- }
-- }
-- }
--
-- String getFileNodeRestoreFilePath() {
-- return fileNodeRestoreFilePath;
-- }
--
-- /**
-- * Delete data whose timestamp <= 'timestamp' and belong to timeseries deviceId.measurementId.
-- *
-- * @param deviceId the deviceId of the timeseries to be deleted.
-- * @param measurementId the measurementId of the timeseries to be deleted.
-- * @param timestamp the delete range is (0, timestamp].
-- */
-- public void delete(String deviceId, String measurementId, long timestamp) throws IOException {
-- // TODO: how to avoid partial deletion?
-- mergeDeleteLock.lock();
-- long version = versionController.nextVersion();
--
-- // record what files are updated so we can roll back them in case of exception
-- List<ModificationFile> updatedModFiles = new ArrayList<>();
--
-- try {
-- String fullPath = deviceId +
-- IoTDBConstant.PATH_SEPARATOR + measurementId;
-- Deletion deletion = new Deletion(fullPath, version, timestamp);
-- if (mergingModification != null) {
-- mergingModification.write(deletion);
-- updatedModFiles.add(mergingModification);
-- }
-- deleteBufferWriteFiles(deviceId, deletion, updatedModFiles);
-- // delete data in memory
-- OverflowProcessor ofProcessor = getOverflowProcessor(getProcessorName());
-- ofProcessor.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
-- if (bufferWriteProcessor != null) {
-- bufferWriteProcessor.delete(deviceId, measurementId, timestamp);
-- }
-- } catch (Exception e) {
-- // roll back
-- for (ModificationFile modFile : updatedModFiles) {
-- modFile.abort();
-- }
-- throw new IOException(e);
-- } finally {
-- mergeDeleteLock.unlock();
-- }
-- }
--
-- private void deleteBufferWriteFiles(String deviceId, Deletion deletion,
-- List<ModificationFile> updatedModFiles) throws IOException {
-- BufferWriteProcessor bufferWriteProcessor = getBufferWriteProcessor();
-- TsFileResource resource = null;
-- if (bufferWriteProcessor != null) {
-- //bufferWriteProcessor == null means the bufferWriteProcessor is closed now.
-- resource = bufferWriteProcessor.getCurrentTsFileResource();
-- if (resource != null && resource.containsDevice(deviceId)) {
-- resource.getModFile().write(deletion);
-- updatedModFiles.add(resource.getModFile());
-- }
-- }
--
-- for (TsFileResource fileNode : newFileNodes) {
-- if (fileNode != resource && fileNode.containsDevice(deviceId)
-- && fileNode.getStartTime(deviceId) <= deletion.getTimestamp()) {
-- fileNode.getModFile().write(deletion);
-- updatedModFiles.add(fileNode.getModFile());
-- }
-- }
-- }
--
-- /**
-- * Similar to delete(), but only deletes data in BufferWrite. Only used by WAL recovery.
-- */
-- public void deleteBufferWrite(String deviceId, String measurementId, long timestamp)
-- throws IOException, BufferWriteProcessorException {
-- String fullPath = deviceId +
-- IoTDBConstant.PATH_SEPARATOR + measurementId;
-- long version = versionController.nextVersion();
-- Deletion deletion = new Deletion(fullPath, version, timestamp);
--
-- List<ModificationFile> updatedModFiles = new ArrayList<>();
-- try {
-- deleteBufferWriteFiles(deviceId, deletion, updatedModFiles);
-- } catch (IOException e) {
-- for (ModificationFile modificationFile : updatedModFiles) {
-- modificationFile.abort();
-- }
-- throw e;
-- }
-- if (bufferWriteProcessor != null) {
-- try {
-- bufferWriteProcessor.delete(deviceId, measurementId, timestamp);
-- } catch (BufferWriteProcessorException e) {
-- throw new IOException(e);
-- }
-- }
-- }
--
-- /**
-- * Similar to delete(), but only deletes data in Overflow. Only used by WAL recovery.
-- */
-- public void deleteOverflow(String deviceId, String measurementId, long timestamp)
-- throws ProcessorException {
-- long version = versionController.nextVersion();
--
-- OverflowProcessor overflowProcessor = getOverflowProcessor(getProcessorName());
-- List<ModificationFile> updatedModFiles = new ArrayList<>();
-- try {
-- overflowProcessor.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
-- } catch (IOException e) {
-- for (ModificationFile modificationFile : updatedModFiles) {
-- try {
-- modificationFile.abort();
-- } catch (IOException e1) {
-- throw new ProcessorException(e);
-- }
-- }
-- throw new ProcessorException(e);
-- }
-- }
--
-- public CopyOnReadLinkedList<BufferWriteProcessor> getClosingBufferWriteProcessor() {
-- for (BufferWriteProcessor processor: closingBufferWriteProcessor.cloneList()) {
-- if (processor.isClosed()) {
-- closingBufferWriteProcessor.remove(processor);
-- }
-- }
-- closingBufferWriteProcessor.reset();
-- return closingBufferWriteProcessor;
-- }
--
-- @Override
-- public boolean equals(Object o) {
-- if (this == o) {
-- return true;
-- }
-- if (o == null || getClass() != o.getClass()) {
-- return false;
-- }
-- if (!super.equals(o)) {
-- return false;
-- }
-- FileNodeProcessor that = (FileNodeProcessor) o;
-- return isOverflowed == that.isOverflowed &&
-- numOfMergeFile == that.numOfMergeFile &&
-- lastMergeTime == that.lastMergeTime &&
-- multiPassLockToken == that.multiPassLockToken &&
-- Objects.equals(statStorageDeltaName, that.statStorageDeltaName) &&
-- Objects.equals(statParamsHashMap, that.statParamsHashMap) &&
-- Objects.equals(lastUpdateTimeMap, that.lastUpdateTimeMap) &&
-- Objects.equals(flushLastUpdateTimeMap, that.flushLastUpdateTimeMap) &&
-- Objects.equals(invertedIndexOfFiles, that.invertedIndexOfFiles) &&
-- Objects.equals(emptyTsFileResource, that.emptyTsFileResource) &&
-- Objects.equals(newFileNodes, that.newFileNodes) &&
-- isMerging == that.isMerging &&
-- Objects.equals(fileNodeProcessorStore, that.fileNodeProcessorStore) &&
-- Objects.equals(fileNodeRestoreFilePath, that.fileNodeRestoreFilePath) &&
-- Objects.equals(bufferWriteProcessor, that.bufferWriteProcessor) &&
-- Objects.equals(overflowProcessor, that.overflowProcessor) &&
-- Objects.equals(oldMultiPassTokenSet, that.oldMultiPassTokenSet) &&
-- Objects.equals(newMultiPassTokenSet, that.newMultiPassTokenSet) &&
-- Objects.equals(oldMultiPassCount, that.oldMultiPassCount) &&
-- Objects.equals(newMultiPassCount, that.newMultiPassCount) &&
-- Objects.equals(parameters, that.parameters) &&
-- Objects.equals(fileSchema, that.fileSchema) &&
-- Objects.equals(fileNodeFlushAction, that.fileNodeFlushAction) &&
-- Objects.equals(bufferwriteFlushAction, that.bufferwriteFlushAction) &&
-- Objects.equals(overflowFlushAction, that.overflowFlushAction);
-- }
--
-- @Override
-- public int hashCode() {
-- return processorName.hashCode();
-- }
--
-- public class MergeRunnale implements Runnable {
--
-- @Override
-- public void run() {
-- try {
-- ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
-- long mergeStartTime = System.currentTimeMillis();
-- merge();
-- long mergeEndTime = System.currentTimeMillis();
-- long intervalTime = mergeEndTime - mergeStartTime;
-- LOGGER.info(
-- "The filenode processor {} merge start time is {}, "
-- + "merge end time is {}, merge consumes {}ms.",
-- getProcessorName(), ofInstant(Instant.ofEpochMilli(mergeStartTime),
-- zoneId), ofInstant(Instant.ofEpochMilli(mergeEndTime),
-- zoneId), intervalTime);
-- } catch (FileNodeProcessorException e) {
-- LOGGER.error("The filenode processor {} encountered an error when merging.",
-- getProcessorName(), e);
-- throw new ErrorDebugException(e);
-- }
-- }
-- }
--
-- /**
-- * wait for all closing processors finishing their tasks
-- */
-- public void waitforAllClosed() throws FileNodeProcessorException {
-- close();
-- while (getClosingBufferWriteProcessor().size() != 0) {
-- checkAllClosingProcessors();
-- try {
-- Thread.sleep(10);
-- } catch (InterruptedException e) {
-- LOGGER.error("Filenode Processor {} is interrupted when waiting for all closed.", processorName, e);
-- }
-- }
-- }
--
--
-- void checkAllClosingProcessors() {
-- Iterator<BufferWriteProcessor> iterator =
-- this.getClosingBufferWriteProcessor().iterator();
-- while (iterator.hasNext()) {
-- BufferWriteProcessor processor = iterator.next();
-- try {
-- if (processor.getCloseFuture().get(10, TimeUnit.MILLISECONDS)) {
-- //if finished, we can remove it.
-- iterator.remove();
-- }
-- } catch (InterruptedException | ExecutionException e) {
-- LOGGER.error("Close bufferwrite processor {} failed.", processor.getProcessorName(), e);
-- } catch (TimeoutException e) {
-- //do nothing.
-- }
-- }
-- this.getClosingBufferWriteProcessor().reset();
-- }
--}
++// // Token for query which used to
++// private int multiPassLockToken = 0;
++// private VersionController versionController;
++// private ReentrantLock mergeDeleteLock = new ReentrantLock();
++//
++// /**
++// * This is the modification file of the result of the current merge.
++// */
++// private ModificationFile mergingModification;
++//
++// private TsFileIOWriter mergeFileWriter = null;
++// private String mergeOutputPath = null;
++// private String mergeBaseDir = null;
++// private String mergeFileName = null;
++// private boolean mergeIsChunkGroupHasData = false;
++// private long mergeStartPos;
++//
++// /**
++// * constructor of FileNodeProcessor.
++// */
++// FileNodeProcessor(String fileNodeDirPath, String processorName)
++// throws FileNodeProcessorException {
++// super(processorName);
++// for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
++// MonitorConstants.FileNodeProcessorStatConstants.values()) {
++// statParamsHashMap.put(statConstant.name(), new AtomicLong(0));
++// }
++// statStorageDeltaName =
++// MonitorConstants.STAT_STORAGE_GROUP_PREFIX + MonitorConstants.MONITOR_PATH_SEPARATOR
++// + MonitorConstants.FILE_NODE_PATH + MonitorConstants.MONITOR_PATH_SEPARATOR
++// + processorName.replaceAll("\\.", "_");
++//
++// this.parameters = new HashMap<>();
++// String dirPath = fileNodeDirPath;
++// if (dirPath.length() > 0
++// && dirPath.charAt(dirPath.length() - 1) != File.separatorChar) {
++// dirPath = dirPath + File.separatorChar;
++// }
++//
++// File restoreFolder = new File(dirPath + processorName);
++// if (!restoreFolder.exists()) {
++// restoreFolder.mkdirs();
++// LOGGER.info(
++// "The restore directory of the filenode processor {} doesn't exist. Create new " +
++// "directory {}",
++// getProcessorName(), restoreFolder.getAbsolutePath());
++// }
++// fileNodeRestoreFilePath = new File(restoreFolder, processorName + RESTORE_FILE_SUFFIX)
++// .getPath();
++// try {
++// fileNodeProcessorStore = readStoreFromDisk();
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error(
++// "The fileNode processor {} encountered an error when recoverying restore " +
++// "information.", processorName);
++// throw new FileNodeProcessorException(e);
++// }
++// // TODO deep clone the lastupdate time
++// emptyTsFileResource = fileNodeProcessorStore.getEmptyTsFileResource();
++// newFileNodes = fileNodeProcessorStore.getNewFileNodes();
++// isMerging = fileNodeProcessorStore.getFileNodeProcessorStatus();
++// numOfMergeFile = fileNodeProcessorStore.getNumOfMergeFile();
++// invertedIndexOfFiles = new HashMap<>();
++//
++// // construct the fileschema
++// try {
++// this.fileSchema = constructFileSchema(processorName);
++// } catch (WriteProcessException e) {
++// throw new FileNodeProcessorException(e);
++// }
++//
++// recover();
++//
++// // RegistStatService
++// if (TsFileDBConf.isEnableStatMonitor()) {
++// StatMonitor statMonitor = StatMonitor.getInstance();
++// registerStatMetadata();
++// statMonitor.registerStatistics(statStorageDeltaName, this);
++// }
++// try {
++// versionController = new SimpleFileVersionController(restoreFolder.getPath());
++// } catch (IOException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// }
++//
++// @Override
++// public Map<String, AtomicLong> getStatParamsHashMap() {
++// return statParamsHashMap;
++// }
++//
++// @Override
++// public void registerStatMetadata() {
++// Map<String, String> hashMap = new HashMap<>();
++// for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
++// MonitorConstants.FileNodeProcessorStatConstants.values()) {
++// hashMap
++// .put(statStorageDeltaName + MonitorConstants.MONITOR_PATH_SEPARATOR + statConstant.name(),
++// MonitorConstants.DATA_TYPE_INT64);
++// }
++// StatMonitor.getInstance().registerStatStorageGroup(hashMap);
++// }
++//
++// @Override
++// public List<String> getAllPathForStatistic() {
++// List<String> list = new ArrayList<>();
++// for (MonitorConstants.FileNodeProcessorStatConstants statConstant :
++// MonitorConstants.FileNodeProcessorStatConstants.values()) {
++// list.add(
++// statStorageDeltaName + MonitorConstants.MONITOR_PATH_SEPARATOR + statConstant.name());
++// }
++// return list;
++// }
++//
++// @Override
++// public Map<String, TSRecord> getAllStatisticsValue() {
++// Long curTime = System.currentTimeMillis();
++// HashMap<String, TSRecord> tsRecordHashMap = new HashMap<>();
++// TSRecord tsRecord = new TSRecord(curTime, statStorageDeltaName);
++//
++// Map<String, AtomicLong> hashMap = getStatParamsHashMap();
++// tsRecord.dataPointList = new ArrayList<>();
++// for (Map.Entry<String, AtomicLong> entry : hashMap.entrySet()) {
++// tsRecord.dataPointList.add(new LongDataPoint(entry.getKey(), entry.getValue().get()));
++// }
++//
++// tsRecordHashMap.put(statStorageDeltaName, tsRecord);
++// return tsRecordHashMap;
++// }
++//
++// /**
++// * add interval FileNode.
++// */
++// void addIntervalFileNode(TsFileResource tsFileResource) throws ActionException {
++// newFileNodes.add(tsFileResource);
++// fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++// fileNodeFlushAction.act();
++// }
++//
++// /**
++// * set interval filenode start time.
++// *
++// * @param deviceId device ID
++// */
++// void setIntervalFileNodeStartTime(String deviceId) {
++// if (getBufferWriteProcessor().getCurrentTsFileResource().getStartTime(deviceId) == -1) {
++// getBufferWriteProcessor().getCurrentTsFileResource().setStartTime(deviceId,
++// flushLastUpdateTimeMap.get(deviceId));
++// if (!invertedIndexOfFiles.containsKey(deviceId)) {
++// invertedIndexOfFiles.put(deviceId, new ArrayList<>());
++// }
++// invertedIndexOfFiles.get(deviceId).add(getBufferWriteProcessor().getCurrentTsFileResource());
++// }
++// }
++//
++// void setIntervalFileNodeStartTime(String deviceId, long time) {
++// if (time != -1) {
++// getBufferWriteProcessor().getCurrentTsFileResource().setStartTime(deviceId, time);
++// } else {
++// getBufferWriteProcessor().getCurrentTsFileResource().removeTime(deviceId);
++// invertedIndexOfFiles.get(deviceId).remove(getBufferWriteProcessor().getCurrentTsFileResource());
++// }
++// }
++//
++// long getIntervalFileNodeStartTime(String deviceId) {
++// return getBufferWriteProcessor().getCurrentTsFileResource().getStartTime(deviceId);
++// }
++//
++// private void addAllFileIntoIndex(List<TsFileResource> fileList) {
++// // clear map
++// invertedIndexOfFiles.clear();
++// // add all file to index
++// for (TsFileResource fileNode : fileList) {
++// if (fileNode.getStartTimeMap().isEmpty()) {
++// continue;
++// }
++// for (String deviceId : fileNode.getStartTimeMap().keySet()) {
++// if (!invertedIndexOfFiles.containsKey(deviceId)) {
++// invertedIndexOfFiles.put(deviceId, new ArrayList<>());
++// }
++// invertedIndexOfFiles.get(deviceId).add(fileNode);
++// }
++// }
++// }
++//
++// public boolean isOverflowed() {
++// return isOverflowed;
++// }
++//
++// /**
++// * if overflow insert, update and delete insert into this filenode processor, set
++// * <code>isOverflowed</code> to true.
++// */
++// public void setOverflowed(boolean isOverflowed) {
++// if (this.isOverflowed != isOverflowed) {
++// this.isOverflowed = isOverflowed;
++// }
++// }
++//
++// public FileNodeProcessorStatus getFileNodeProcessorStatus() {
++// return isMerging;
++// }
++//
++// /**
++// * execute filenode recovery.
++// */
++// public void recover() throws FileNodeProcessorException {
++// // restore sequential files
++// parameters.put(FileNodeConstants.BUFFERWRITE_FLUSH_ACTION, bufferwriteFlushAction);
++// //parameters.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
++// parameters
++// .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
++// parameters.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowFlushAction);
++// parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
++//
++// for (int i = 0; i < newFileNodes.size(); i++) {
++// TsFileResource tsFile = newFileNodes.get(i);
++//// try {
++//// String filePath = tsFile.getFilePath();
++//// String logNodePrefix = BufferWriteProcessor.logNodePrefix(processorName);
++//// SeqTsFileRecoverPerformer recoverPerformer =
++//// new SeqTsFileRecoverPerformer(logNodePrefix,
++//// fileSchema, versionController, tsFile);
++//// recoverPerformer.recover();
++//// } catch (ProcessorException e) {
++//// LOGGER.error(
++//// "The filenode processor {} failed to recover the bufferwrite processor, "
++//// + "the last bufferwrite file is {}.",
++//// getProcessorName(), tsFile.getFile().getName());
++//// throw new FileNodeProcessorException(e);
++//// }
++// }
++// recoverUpdateTimeMap();
++//
++// // restore the overflow processor
++// LOGGER.info("The filenode processor {} will recover the overflow processor.",
++// getProcessorName());
++//
++// try {
++// overflowProcessor = new OverflowProcessor(getProcessorName(), parameters, fileSchema,
++// versionController);
++// } catch (ProcessorException e) {
++// LOGGER.error("The filenode processor {} failed to recovery the overflow processor.",
++// getProcessorName());
++// throw new FileNodeProcessorException(e);
++// }
++//
++// if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
++// // re-merge all file
++// // if bufferwrite processor is not null, and setCloseMark
++// LOGGER.info("The filenode processor {} is recovering, the filenode status is {}.",
++// getProcessorName(), isMerging);
++// merge();
++// } else if (isMerging == FileNodeProcessorStatus.WAITING) {
++// LOGGER.info("The filenode processor {} is recovering, the filenode status is {}.",
++// getProcessorName(), isMerging);
++// switchWaitingToWorking();
++// }
++// // add file into index of file
++// addAllFileIntoIndex(newFileNodes);
++// }
++//
++// private void recoverUpdateTimeMap() {
++// lastUpdateTimeMap = new HashMap<>();
++// flushLastUpdateTimeMap = new HashMap<>();
++// for (TsFileResource tsFileResource : newFileNodes) {
++// Map<String, Long> endTimeMap = tsFileResource.getEndTimeMap();
++// endTimeMap.forEach((key, value) -> {
++// Long lastTime = lastUpdateTimeMap.get(key);
++// if (lastTime == null || lastTime < value) {
++// lastUpdateTimeMap.put(key, value);
++// flushLastUpdateTimeMap.put(key, value);
++// }
++// });
++// }
++// }
++//
++// //when calling this method, the bufferWriteProcessor must not be null
++// private BufferWriteProcessor getBufferWriteProcessor() {
++// return bufferWriteProcessor;
++// }
++//
++// /**
++// * get buffer insert processor by processor name and insert time.
++// */
++// public BufferWriteProcessor getBufferWriteProcessor(String processorName, long insertTime)
++// throws FileNodeProcessorException {
++// if (bufferWriteProcessor == null) {
++// Map<String, Action> params = new HashMap<>();
++// params.put(FileNodeConstants.BUFFERWRITE_FLUSH_ACTION, bufferwriteFlushAction);
++// //params.put(FileNodeConstants.BUFFERWRITE_CLOSE_ACTION, bufferwriteCloseAction);
++// params
++// .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
++// String baseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
++// LOGGER.info("Allocate folder {} for the new bufferwrite processor.", baseDir);
++// // construct processor or restore
++// try {
++// bufferWriteProcessor = new BufferWriteProcessor(baseDir, processorName,
++// insertTime + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR
++// + System.currentTimeMillis(),
++// params, bufferwriteCloseConsumer, versionController, fileSchema);
++// } catch (BufferWriteProcessorException e) {
++// throw new FileNodeProcessorException(String
++// .format("The filenode processor %s failed to get the bufferwrite processor.",
++// processorName), e);
++// }
++// }
++// return bufferWriteProcessor;
++// }
++//
++// /**
++// * get overflow processor by processor name.
++// */
++// public OverflowProcessor getOverflowProcessor(String processorName) throws ProcessorException {
++// if (overflowProcessor == null) {
++// Map<String, Action> params = new HashMap<>();
++// // construct processor or restore
++// params.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowFlushAction);
++// params
++// .put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, fileNodeFlushAction);
++// overflowProcessor = new OverflowProcessor(processorName, params, fileSchema,
++// versionController);
++// } else if (overflowProcessor.isClosed()) {
++// overflowProcessor.reopen();
++// }
++// return overflowProcessor;
++// }
++//
++// /**
++// * get overflow processor.
++// */
++// public OverflowProcessor getOverflowProcessor() {
++// if (overflowProcessor == null || overflowProcessor.isClosed()) {
++// LOGGER.error("The overflow processor is null when getting the overflowProcessor");
++// }
++// return overflowProcessor;
++// }
++//
++// public boolean hasOverflowProcessor() {
++// return overflowProcessor != null && !overflowProcessor.isClosed();
++// }
++//
++// public void setBufferwriteProcessroToClosed() {
++//
++// bufferWriteProcessor = null;
++// }
++//
++// public boolean hasBufferwriteProcessor() {
++//
++// return bufferWriteProcessor != null;
++// }
++//
++// /**
++// * set last update time.
++// */
++// public void setLastUpdateTime(String deviceId, long timestamp) {
++// if (!lastUpdateTimeMap.containsKey(deviceId) || lastUpdateTimeMap.get(deviceId) < timestamp) {
++// lastUpdateTimeMap.put(deviceId, timestamp);
++// }
++// if (timestamp == -1) {
++// lastUpdateTimeMap.remove(deviceId);
++// }
++// }
++//
++// /**
++// * get last update time.
++// */
++// public long getLastUpdateTime(String deviceId) {
++//
++// if (lastUpdateTimeMap.containsKey(deviceId)) {
++// return lastUpdateTimeMap.get(deviceId);
++// } else {
++// return -1;
++// }
++// }
++//
++// /**
++// * get flushMetadata last update time.
++// */
++// public long getFlushLastUpdateTime(String deviceId) {
++// if (!flushLastUpdateTimeMap.containsKey(deviceId)) {
++// flushLastUpdateTimeMap.put(deviceId, 0L);
++// }
++// return flushLastUpdateTimeMap.get(deviceId);
++// }
++//
++// public Map<String, Long> getLastUpdateTimeMap() {
++// return lastUpdateTimeMap;
++// }
++//
++// /**
++// * For insert overflow.
++// */
++// public void changeTypeToChanged(String deviceId, long timestamp) {
++// if (!invertedIndexOfFiles.containsKey(deviceId)) {
++// LOGGER.warn(
++// WARN_NO_SUCH_OVERFLOWED_FILE
++// + "the data is [device:{},time:{}]",
++// getProcessorName(), deviceId, timestamp);
++// emptyTsFileResource.setStartTime(deviceId, 0L);
++// emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
++// emptyTsFileResource.changeTypeToChanged(isMerging);
++// } else {
++// List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
++// int index = searchIndexNodeByTimestamp(deviceId, timestamp, temp);
++// changeTypeToChanged(temp.get(index), deviceId);
++// }
++// }
++//
++// private void changeTypeToChanged(TsFileResource fileNode, String deviceId) {
++// fileNode.changeTypeToChanged(isMerging);
++// if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
++// fileNode.addMergeChanged(deviceId);
++// }
++// }
++//
++// /**
++// * For update overflow.
++// */
++// public void changeTypeToChanged(String deviceId, long startTime, long endTime) {
++// if (!invertedIndexOfFiles.containsKey(deviceId)) {
++// LOGGER.warn(
++// WARN_NO_SUCH_OVERFLOWED_FILE
++// + "the data is [device:{}, start time:{}, end time:{}]",
++// getProcessorName(), deviceId, startTime, endTime);
++// emptyTsFileResource.setStartTime(deviceId, 0L);
++// emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
++// emptyTsFileResource.changeTypeToChanged(isMerging);
++// } else {
++// List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
++// int left = searchIndexNodeByTimestamp(deviceId, startTime, temp);
++// int right = searchIndexNodeByTimestamp(deviceId, endTime, temp);
++// for (int i = left; i <= right; i++) {
++// changeTypeToChanged(temp.get(i), deviceId);
++// }
++// }
++// }
++//
++// /**
++// * For delete overflow.
++// */
++// public void changeTypeToChangedForDelete(String deviceId, long timestamp) {
++// if (!invertedIndexOfFiles.containsKey(deviceId)) {
++// LOGGER.warn(
++// WARN_NO_SUCH_OVERFLOWED_FILE
++// + "the data is [device:{}, delete time:{}]",
++// getProcessorName(), deviceId, timestamp);
++// emptyTsFileResource.setStartTime(deviceId, 0L);
++// emptyTsFileResource.setEndTime(deviceId, getLastUpdateTime(deviceId));
++// emptyTsFileResource.changeTypeToChanged(isMerging);
++// } else {
++// List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
++// int index = searchIndexNodeByTimestamp(deviceId, timestamp, temp);
++// for (int i = 0; i <= index; i++) {
++// temp.get(i).changeTypeToChanged(isMerging);
++// if (isMerging == FileNodeProcessorStatus.MERGING_WRITE) {
++// temp.get(i).addMergeChanged(deviceId);
++// }
++// }
++// }
++// }
++//
++// /**
++// * Search the index of the interval by the timestamp.
++// *
++// * @return index of interval
++// */
++// private int searchIndexNodeByTimestamp(String deviceId, long timestamp,
++// List<TsFileResource> fileList) {
++// int index = 1;
++// while (index < fileList.size()) {
++// if (timestamp < fileList.get(index).getStartTime(deviceId)) {
++// break;
++// } else {
++// index++;
++// }
++// }
++// return index - 1;
++// }
++//
++// /**
++// * add multiple pass lock.
++// */
++// public int addMultiPassCount() {
++// LOGGER.debug("Add MultiPassCount: cloneList lock newMultiPassCount.");
++// newMultiPassCount.incrementAndGet();
++// while (newMultiPassTokenSet.contains(multiPassLockToken)) {
++// multiPassLockToken++;
++// }
++// newMultiPassTokenSet.add(multiPassLockToken);
++// LOGGER.debug("Add multi token:{}, nsPath:{}.", multiPassLockToken, getProcessorName());
++// return multiPassLockToken;
++// }
++//
++// /**
++// * decrease multiple pass count. TODO: use the return value or remove it.
++// */
++// public boolean decreaseMultiPassCount(int token) throws FileNodeProcessorException {
++// if (newMultiPassTokenSet.contains(token)) {
++// int newMultiPassCountValue = newMultiPassCount.decrementAndGet();
++// if (newMultiPassCountValue < 0) {
++// throw new FileNodeProcessorException(String
++// .format("Remove MultiPassCount error, newMultiPassCount:%d", newMultiPassCountValue));
++// }
++// newMultiPassTokenSet.remove(token);
++// LOGGER.debug("Remove multi token:{}, nspath:{}, new set:{}, count:{}", token,
++// getProcessorName(),
++// newMultiPassTokenSet, newMultiPassCount);
++// return true;
++// } else if (oldMultiPassTokenSet != null && oldMultiPassTokenSet.contains(token)) {
++// // remove token first, then unlock
++// oldMultiPassTokenSet.remove(token);
++// oldMultiPassCount.countDown();
++// long oldMultiPassCountValue = oldMultiPassCount.getCount();
++// if (oldMultiPassCountValue < 0) {
++// throw new FileNodeProcessorException(String
++// .format("Remove MultiPassCount error, oldMultiPassCount:%d", oldMultiPassCountValue));
++// }
++// LOGGER.debug("Remove multi token:{}, old set:{}, count:{}", token, oldMultiPassTokenSet,
++// oldMultiPassCount.getCount());
++// return true;
++// } else {
++// LOGGER.error("remove token error:{},new set:{}, old set:{}", token, newMultiPassTokenSet,
++// oldMultiPassTokenSet);
++// // should add throw exception
++// return false;
++// }
++// }
++//
++// /**
++// * query data.
++// */
++// public <T extends Comparable<T>> QueryDataSource query(String deviceId, String measurementId,
++// QueryContext context) throws FileNodeProcessorException {
++// // query overflow data
++// MeasurementSchema mSchema;
++// TSDataType dataType;
++//
++// //mSchema = mManager.getSchemaForOnePath(deviceId + "." + measurementId);
++// mSchema = fileSchema.getMeasurementSchema(measurementId);
++// dataType = mSchema.getType();
++//
++// OverflowSeriesDataSource overflowSeriesDataSource;
++// try {
++// overflowSeriesDataSource = overflowProcessor.query(deviceId, measurementId, dataType,
++// mSchema.getProps(), context);
++// } catch (IOException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// // tsfile dataØØ
++// List<TsFileResource> bufferwriteDataInFiles = new ArrayList<>();
++// for (TsFileResource tsFileResource : newFileNodes) {
++// // add the same tsFileResource, but not the same reference
++// if (tsFileResource.isClosed()) {
++// bufferwriteDataInFiles.add(tsFileResource.backUp());
++// }
++// }
++// Pair<ReadOnlyMemChunk, List<ChunkMetaData>> bufferwritedata = new Pair<>(null, null);
++// // bufferwrite data
++// UnsealedTsFile unsealedTsFile = null;
++//
++// if (!newFileNodes.isEmpty() && !newFileNodes.get(newFileNodes.size() - 1).isClosed()
++// && !newFileNodes.get(newFileNodes.size() - 1).getStartTimeMap().isEmpty()) {
++// unsealedTsFile = new UnsealedTsFile();
++// unsealedTsFile.setFilePath(newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath());
++// if (bufferWriteProcessor == null) {
++// throw new FileNodeProcessorException(String.format(
++// "The last of tsfile %s in filenode processor %s is not closed, "
++// + "but the bufferwrite processor is null.",
++// newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath(), getProcessorName()));
++// }
++// bufferwritedata = bufferWriteProcessor
++// .queryBufferWriteData(deviceId, measurementId, dataType, mSchema.getProps());
++//
++// try {
++// List<Modification> pathModifications = context.getPathModifications(
++// bufferWriteProcessor.getCurrentTsFileResource().getModFile(), deviceId
++// + IoTDBConstant.PATH_SEPARATOR + measurementId
++// );
++// if (!pathModifications.isEmpty()) {
++// QueryUtils.modifyChunkMetaData(bufferwritedata.right, pathModifications);
++// }
++// } catch (IOException e) {
++// throw new FileNodeProcessorException(e);
++// }
++//
++// unsealedTsFile.setTimeSeriesChunkMetaDatas(bufferwritedata.right);
++// }
++// GlobalSortedSeriesDataSource globalSortedSeriesDataSource = new GlobalSortedSeriesDataSource(
++// new Path(deviceId + "." + measurementId), bufferwriteDataInFiles, unsealedTsFile,
++// bufferwritedata.left);
++// return new QueryDataSource(globalSortedSeriesDataSource, overflowSeriesDataSource);
++//
++// }
++//
++// /**
++// * append one specified tsfile to this filenode processor.
++// *
++// * @param appendFile the appended tsfile information
++// * @param appendFilePath the seriesPath of appended file
++// */
++// public void appendFile(TsFileResource appendFile, String appendFilePath)
++// throws FileNodeProcessorException {
++// try {
++// if (!appendFile.getFile().getParentFile().exists()) {
++// appendFile.getFile().getParentFile().mkdirs();
++// }
++// // move file
++// File originFile = new File(appendFilePath);
++// File targetFile = appendFile.getFile();
++// if (!originFile.exists()) {
++// throw new FileNodeProcessorException(
++// String.format("The appended file %s does not exist.", appendFilePath));
++// }
++// if (targetFile.exists()) {
++// throw new FileNodeProcessorException(
++// String.format("The appended target file %s already exists.",
++// appendFile.getFile().getAbsolutePath()));
++// }
++// if (!originFile.renameTo(targetFile)) {
++// LOGGER.warn("File renaming failed when appending new file. Origin: {}, Target: {}",
++// originFile.getPath(), targetFile.getPath());
++// }
++// // append the new tsfile
++// this.newFileNodes.add(appendFile);
++// // update the lastUpdateTime
++// for (Entry<String, Long> entry : appendFile.getEndTimeMap().entrySet()) {
++// lastUpdateTimeMap.put(entry.getKey(), entry.getValue());
++// }
++// bufferwriteFlushAction.act();
++// fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++// // reconstruct the inverted index of the newFileNodes
++// fileNodeFlushAction.act();
++// addAllFileIntoIndex(newFileNodes);
++// } catch (Exception e) {
++// LOGGER.error("Failed to append the tsfile {} to filenode processor {}.", appendFile,
++// getProcessorName());
++// throw new FileNodeProcessorException(e);
++// }
++// }
++//
++// /**
++// * get overlap tsfiles which are conflict with the appendFile.
++// *
++// * @param appendFile the appended tsfile information
++// */
++// public List<String> getOverlapFiles(TsFileResource appendFile, String uuid)
++// throws FileNodeProcessorException {
++// List<String> overlapFiles = new ArrayList<>();
++// try {
++// for (TsFileResource tsFileResource : newFileNodes) {
++// getOverlapFiles(appendFile, tsFileResource, uuid, overlapFiles);
++// }
++// } catch (IOException e) {
++// LOGGER.error("Failed to get overlap tsfiles which conflict with the appendFile.");
++// throw new FileNodeProcessorException(e);
++// }
++// return overlapFiles;
++// }
++//
++// private void getOverlapFiles(TsFileResource appendFile, TsFileResource tsFileResource,
++// String uuid, List<String> overlapFiles) throws IOException {
++// for (Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
++// if (tsFileResource.getStartTimeMap().containsKey(entry.getKey()) &&
++// tsFileResource.getEndTime(entry.getKey()) >= entry.getValue()
++// && tsFileResource.getStartTime(entry.getKey()) <= appendFile
++// .getEndTime(entry.getKey())) {
++// String relativeFilePath =
++// Constans.SYNC_SERVER + File.separatorChar + uuid + File.separatorChar
++// + Constans.BACK_UP_DIRECTORY_NAME
++// + File.separatorChar + tsFileResource.getRelativePath();
++// File newFile = new File(
++// DirectoryManager.getInstance().getTsFileFolder(tsFileResource.getBaseDirIndex()),
++// relativeFilePath);
++// if (!newFile.getParentFile().exists()) {
++// newFile.getParentFile().mkdirs();
++// }
++// java.nio.file.Path link = FileSystems.getDefault().getPath(newFile.getPath());
++// java.nio.file.Path target = FileSystems.getDefault()
++// .getPath(tsFileResource.getFile().getAbsolutePath());
++// Files.createLink(link, target);
++// overlapFiles.add(newFile.getPath());
++// break;
++// }
++// }
++// }
++//
++// /**
++// * add time series.
++// */
++// public void addTimeSeries(String measurementId, TSDataType dataType, TSEncoding encoding,
++// CompressionType compressor, Map<String, String> props) {
++// fileSchema.registerMeasurement(new MeasurementSchema(measurementId, dataType, encoding,
++// compressor, props));
++// }
++//
++// /**
++// * submit the merge task to the <code>MergePool</code>.
++// *
++// * @return null -can't submit the merge task, because this filenode is not overflowed or it is
++// * merging now. Future - submit the merge task successfully.
++// */
++// Future submitToMerge() {
++// ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
++// if (lastMergeTime > 0) {
++// long thisMergeTime = System.currentTimeMillis();
++// long mergeTimeInterval = thisMergeTime - lastMergeTime;
++// ZonedDateTime lastDateTime = ofInstant(Instant.ofEpochMilli(lastMergeTime),
++// zoneId);
++// ZonedDateTime thisDateTime = ofInstant(Instant.ofEpochMilli(thisMergeTime),
++// zoneId);
++// LOGGER.info(
++// "The filenode {} last merge time is {}, this merge time is {}, "
++// + "merge time interval is {}s",
++// getProcessorName(), lastDateTime, thisDateTime, mergeTimeInterval / 1000);
++// }
++// lastMergeTime = System.currentTimeMillis();
++//
++// if (overflowProcessor != null && !overflowProcessor.isClosed()) {
++// if (overflowProcessor.getFileSize() < IoTDBDescriptor.getInstance()
++// .getConfig().getOverflowFileSizeThreshold()) {
++// if (LOGGER.isInfoEnabled()) {
++// LOGGER.info(
++// "Skip this merge taks submission, because the size{} of overflow processor {} "
++// + "does not reaches the threshold {}.",
++// MemUtils.bytesCntToStr(overflowProcessor.getFileSize()), getProcessorName(),
++// MemUtils.bytesCntToStr(
++// IoTDBDescriptor.getInstance().getConfig().getOverflowFileSizeThreshold()));
++// }
++// return null;
++// }
++// } else {
++// LOGGER.info(
++// "Skip this merge taks submission, because the filenode processor {} "
++// + "has no overflow processor.",
++// getProcessorName());
++// return null;
++// }
++// if (isOverflowed && isMerging == FileNodeProcessorStatus.NONE) {
++// Runnable mergeThread;
++// mergeThread = new MergeRunnale();
++// LOGGER.info("Submit the merge task, the merge filenode is {}", getProcessorName());
++// return MergePoolManager.getInstance().submit(mergeThread);
++// } else {
++// if (!isOverflowed) {
++// LOGGER.info(
++// "Skip this merge taks submission, because the filenode processor {} is not " +
++// "overflowed.",
++// getProcessorName());
++// } else {
++// LOGGER.warn(
++// "Skip this merge task submission, because last merge task is not over yet, "
++// + "the merge filenode processor is {}",
++// getProcessorName());
++// }
++// }
++// return null;
++// }
++//
++// /**
++// * Prepare for merge, setCloseMark the bufferwrite and overflow.
++// */
++// private void prepareForMerge() {
++// try {
++// LOGGER.info("The filenode processor {} prepares for merge, closes the bufferwrite processor",
++// getProcessorName());
++// Future<Boolean> future = closeBufferWrite();
++// future.get();
++// LOGGER.info("The bufferwrite processor {} is closed successfully",
++// getProcessorName());
++// // try to get overflow processor
++// getOverflowProcessor(getProcessorName());
++// // must setCloseMark the overflow processor
++// while (!getOverflowProcessor().canBeClosed()) {
++// waitForClosing();
++// }
++// LOGGER.info("The filenode processor {} prepares for merge, closes the overflow processor",
++// getProcessorName());
++// getOverflowProcessor().close();
++// } catch (ProcessorException | InterruptedException | ExecutionException e) {
++// LOGGER.error("The filenode processor {} prepares for merge error.", getProcessorName());
++// writeUnlock();
++// throw new ErrorDebugException(e);
++// }
++// }
++//
++// private void waitForClosing() {
++// try {
++// LOGGER.info(
++// "The filenode processor {} prepares for merge, the overflow {} can't be closed, "
++// + "wait 100ms,",
++// getProcessorName(), getProcessorName());
++// TimeUnit.MICROSECONDS.sleep(100);
++// } catch (InterruptedException e) {
++// Thread.currentThread().interrupt();
++// }
++// }
++//
++// /**
++// * Merge this storage group, merge the tsfile data with overflow data.
++// */
++// public void merge() throws FileNodeProcessorException {
++// // setCloseMark bufferwrite and overflow, prepare for merge
++// LOGGER.info("The filenode processor {} begins to merge.", getProcessorName());
++// writeLock();
++// prepareForMerge();
++// // change status from overflowed to no overflowed
++// isOverflowed = false;
++// // change status from work to merge
++// isMerging = FileNodeProcessorStatus.MERGING_WRITE;
++// // check the empty file
++// Map<String, Long> startTimeMap = emptyTsFileResource.getStartTimeMap();
++// mergeCheckEmptyFile(startTimeMap);
++//
++// for (TsFileResource tsFileResource : newFileNodes) {
++// if (tsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
++// tsFileResource.setOverflowChangeType(OverflowChangeType.CHANGED);
++// }
++// }
++//
++// addAllFileIntoIndex(newFileNodes);
++// synchronized (fileNodeProcessorStore) {
++// fileNodeProcessorStore.setOverflowed(isOverflowed);
++// fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
++// fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++// fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
++// // flushMetadata this filenode information
++// try {
++// writeStoreToDisk(fileNodeProcessorStore);
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error("The filenode processor {} writes restore information error when merging.",
++// getProcessorName(), e);
++// writeUnlock();
++// throw new FileNodeProcessorException(e);
++// }
++// }
++// // add numOfMergeFile to control the number of the merge file
++// List<TsFileResource> backupIntervalFiles;
++//
++// backupIntervalFiles = switchFileNodeToMerge();
++// //
++// // clear empty file
++// //
++// boolean needEmtpy = false;
++// if (emptyTsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
++// needEmtpy = true;
++// }
++// emptyTsFileResource.clear();
++// // attention
++// try {
++// if (overflowProcessor.isClosed()) {
++// overflowProcessor.reopen();
++// }
++// overflowProcessor.switchWorkToMerge();
++// } catch (ProcessorException | IOException e) {
++// LOGGER.error("The filenode processor {} can't switch overflow processor from work to merge.",
++// getProcessorName(), e);
++// writeUnlock();
++// throw new FileNodeProcessorException(e);
++// }
++// LOGGER.info("The filenode processor {} switches from {} to {}.", getProcessorName(),
++// FileNodeProcessorStatus.NONE, FileNodeProcessorStatus.MERGING_WRITE);
++// writeUnlock();
++//
++// // query tsfile data and overflow data, and merge them
++// int numOfMergeFiles = 0;
++// int allNeedMergeFiles = backupIntervalFiles.size();
++// for (TsFileResource backupIntervalFile : backupIntervalFiles) {
++// numOfMergeFiles++;
++// if (backupIntervalFile.getOverflowChangeType() == OverflowChangeType.CHANGED) {
++// // query data and merge
++// String filePathBeforeMerge = backupIntervalFile.getRelativePath();
++// try {
++// LOGGER.info(
++// "The filenode processor {} begins merging the {}/{} tsfile[{}] with "
++// + "overflow file, the process is {}%",
++// getProcessorName(), numOfMergeFiles, allNeedMergeFiles, filePathBeforeMerge,
++// (int) (((numOfMergeFiles - 1) / (float) allNeedMergeFiles) * 100));
++// long startTime = System.currentTimeMillis();
++// String newFile = queryAndWriteDataForMerge(backupIntervalFile);
++// long endTime = System.currentTimeMillis();
++// long timeConsume = endTime - startTime;
++// ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
++// LOGGER.info(
++// "The fileNode processor {} has merged the {}/{} tsfile[{}->{}] over, "
++// + "start time of merge is {}, end time of merge is {}, "
++// + "time consumption is {}ms,"
++// + " the process is {}%",
++// getProcessorName(), numOfMergeFiles, allNeedMergeFiles, filePathBeforeMerge,
++// newFile, ofInstant(Instant.ofEpochMilli(startTime),
++// zoneId), ofInstant(Instant.ofEpochMilli(endTime), zoneId), timeConsume,
++// numOfMergeFiles / (float) allNeedMergeFiles * 100);
++// } catch (IOException | PathErrorException e) {
++// LOGGER.error("Merge: query and insert data error.", e);
++// throw new FileNodeProcessorException(e);
++// }
++// } else if (backupIntervalFile.getOverflowChangeType() == OverflowChangeType.MERGING_CHANGE) {
++// LOGGER.error("The overflowChangeType of backupIntervalFile must not be {}",
++// OverflowChangeType.MERGING_CHANGE);
++// // handle this error, throw one runtime exception
++// throw new FileNodeProcessorException(
++// "The overflowChangeType of backupIntervalFile must not be "
++// + OverflowChangeType.MERGING_CHANGE);
++// } else {
++// LOGGER.debug(
++// "The filenode processor {} is merging, the interval file {} doesn't "
++// + "need to be merged.",
++// getProcessorName(), backupIntervalFile.getRelativePath());
++// }
++// }
++//
++// // change status from merge to wait
++// switchMergeToWaiting(backupIntervalFiles, needEmtpy);
++//
++// // change status from wait to work
++// switchWaitingToWorking();
++// }
++//
++// private void mergeCheckEmptyFile(Map<String, Long> startTimeMap) {
++// if (emptyTsFileResource.getOverflowChangeType() == OverflowChangeType.NO_CHANGE) {
++// return;
++// }
++// Iterator<Entry<String, Long>> iterator = emptyTsFileResource.getEndTimeMap().entrySet()
++// .iterator();
++// while (iterator.hasNext()) {
++// Entry<String, Long> entry = iterator.next();
++// String deviceId = entry.getKey();
++// if (invertedIndexOfFiles.containsKey(deviceId)) {
++// invertedIndexOfFiles.get(deviceId).get(0).setOverflowChangeType(OverflowChangeType.CHANGED);
++// startTimeMap.remove(deviceId);
++// iterator.remove();
++// }
++// }
++// if (emptyTsFileResource.checkEmpty()) {
++// emptyTsFileResource.clear();
++// } else {
++// if (!newFileNodes.isEmpty()) {
++// TsFileResource first = newFileNodes.get(0);
++// for (String deviceId : emptyTsFileResource.getStartTimeMap().keySet()) {
++// first.setStartTime(deviceId, emptyTsFileResource.getStartTime(deviceId));
++// first.setEndTime(deviceId, emptyTsFileResource.getEndTime(deviceId));
++// first.setOverflowChangeType(OverflowChangeType.CHANGED);
++// }
++// emptyTsFileResource.clear();
++// } else {
++// emptyTsFileResource.setOverflowChangeType(OverflowChangeType.CHANGED);
++// }
++// }
++// }
++//
++// private List<TsFileResource> switchFileNodeToMerge() throws FileNodeProcessorException {
++// List<TsFileResource> result = new ArrayList<>();
++// if (emptyTsFileResource.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
++// // add empty
++// result.add(emptyTsFileResource.backUp());
++// if (!newFileNodes.isEmpty()) {
++// throw new FileNodeProcessorException(
++// String.format("The status of empty file is %s, but the new file list is not empty",
++// emptyTsFileResource.getOverflowChangeType()));
++// }
++// return result;
++// }
++// if (newFileNodes.isEmpty()) {
++// LOGGER.error("No file was changed when merging, the filenode is {}", getProcessorName());
++// throw new FileNodeProcessorException(
++// "No file was changed when merging, the filenode is " + getProcessorName());
++// }
++// for (TsFileResource tsFileResource : newFileNodes) {
++// updateFileNode(tsFileResource, result);
++// }
++// return result;
++// }
++//
++// private void updateFileNode(TsFileResource tsFileResource, List<TsFileResource> result) {
++// if (tsFileResource.getOverflowChangeType() == OverflowChangeType.NO_CHANGE) {
++// result.add(tsFileResource.backUp());
++// } else {
++// Map<String, Long> startTimeMap = new HashMap<>();
++// Map<String, Long> endTimeMap = new HashMap<>();
++// for (String deviceId : tsFileResource.getEndTimeMap().keySet()) {
++// List<TsFileResource> temp = invertedIndexOfFiles.get(deviceId);
++// int index = temp.indexOf(tsFileResource);
++// int size = temp.size();
++// // start time
++// if (index == 0) {
++// startTimeMap.put(deviceId, 0L);
++// } else {
++// startTimeMap.put(deviceId, tsFileResource.getStartTime(deviceId));
++// }
++// // end time
++// if (index < size - 1) {
++// endTimeMap.put(deviceId, temp.get(index + 1).getStartTime(deviceId) - 1);
++// } else {
++// endTimeMap.put(deviceId, tsFileResource.getEndTime(deviceId));
++// }
++// }
++// TsFileResource node = new TsFileResource(startTimeMap, endTimeMap,
++// tsFileResource.getOverflowChangeType(), tsFileResource.getFile());
++// result.add(node);
++// }
++// }
++//
++// private void switchMergeToWaiting(List<TsFileResource> backupIntervalFiles, boolean needEmpty)
++// throws FileNodeProcessorException {
++// LOGGER.info("The status of filenode processor {} switches from {} to {}.", getProcessorName(),
++// FileNodeProcessorStatus.MERGING_WRITE, FileNodeProcessorStatus.WAITING);
++// writeLock();
++// try {
++// oldMultiPassTokenSet = newMultiPassTokenSet;
++// oldMultiPassCount = new CountDownLatch(newMultiPassCount.get());
++// newMultiPassTokenSet = new HashSet<>();
++// newMultiPassCount = new AtomicInteger(0);
++// List<TsFileResource> result = new ArrayList<>();
++// int beginIndex = 0;
++// if (needEmpty) {
++// TsFileResource empty = backupIntervalFiles.get(0);
++// if (!empty.checkEmpty()) {
++// updateEmpty(empty, result);
++// beginIndex++;
++// }
++// }
++// // reconstruct the file index
++// addAllFileIntoIndex(backupIntervalFiles);
++// // check the merge changed file
++// for (int i = beginIndex; i < backupIntervalFiles.size(); i++) {
++// TsFileResource newFile = newFileNodes.get(i - beginIndex);
++// TsFileResource temp = backupIntervalFiles.get(i);
++// if (newFile.getOverflowChangeType() == OverflowChangeType.MERGING_CHANGE) {
++// updateMergeChanged(newFile, temp);
++// }
++// if (!temp.checkEmpty()) {
++// result.add(temp);
++// }
++// }
++// // add new file when merge
++// for (int i = backupIntervalFiles.size() - beginIndex; i < newFileNodes.size(); i++) {
++// TsFileResource fileNode = newFileNodes.get(i);
++// if (fileNode.isClosed()) {
++// result.add(fileNode.backUp());
++// } else {
++// result.add(fileNode);
++// }
++// }
++//
++// isMerging = FileNodeProcessorStatus.WAITING;
++// newFileNodes = result;
++// // reconstruct the index
++// addAllFileIntoIndex(newFileNodes);
++// // clear merge changed
++// for (TsFileResource fileNode : newFileNodes) {
++// fileNode.clearMergeChanged();
++// }
++//
++// synchronized (fileNodeProcessorStore) {
++// fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
++// fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
++// fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++// try {
++// writeStoreToDisk(fileNodeProcessorStore);
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error(
++// "Merge: failed to insert filenode information to revocery file, the filenode is " +
++// "{}.",
++// getProcessorName(), e);
++// throw new FileNodeProcessorException(
++// "Merge: insert filenode information to revocery file failed, the filenode is "
++// + getProcessorName());
++// }
++// }
++// } finally {
++// writeUnlock();
++// }
++// }
++//
++// private void updateEmpty(TsFileResource empty, List<TsFileResource> result) {
++// for (String deviceId : empty.getStartTimeMap().keySet()) {
++// if (invertedIndexOfFiles.containsKey(deviceId)) {
++// TsFileResource temp = invertedIndexOfFiles.get(deviceId).get(0);
++// if (temp.getMergeChanged().contains(deviceId)) {
++// empty.setOverflowChangeType(OverflowChangeType.CHANGED);
++// break;
++// }
++// }
++// }
++// empty.clearMergeChanged();
++// result.add(empty.backUp());
++// }
++//
++// private void updateMergeChanged(TsFileResource newFile, TsFileResource temp) {
++// for (String deviceId : newFile.getMergeChanged()) {
++// if (temp.getStartTimeMap().containsKey(deviceId)) {
++// temp.setOverflowChangeType(OverflowChangeType.CHANGED);
++// } else {
++// changeTypeToChanged(deviceId, newFile.getStartTime(deviceId),
++// newFile.getEndTime(deviceId));
++// }
++// }
++// }
++//
++//
++// private void switchWaitingToWorking()
++// throws FileNodeProcessorException {
++//
++// LOGGER.info("The status of filenode processor {} switches from {} to {}.", getProcessorName(),
++// FileNodeProcessorStatus.WAITING, FileNodeProcessorStatus.NONE);
++//
++// if (oldMultiPassCount != null) {
++// LOGGER.info("The old Multiple Pass Token set is {}, the old Multiple Pass Count is {}",
++// oldMultiPassTokenSet,
++// oldMultiPassCount);
++// try {
++// oldMultiPassCount.await();
++// } catch (InterruptedException e) {
++// LOGGER.info(
++// "The filenode processor {} encountered an error when it waits for all old queries over.",
++// getProcessorName());
++// throw new FileNodeProcessorException(e);
++// }
++// }
++//
++// try {
++// writeLock();
++// try {
++// // delete the all files which are in the newFileNodes
++// // notice: the last restore file of the interval file
++//
++// List<String> bufferwriteDirPathList = DIRECTORY_MANAGER.getAllTsFileFolders();
++// List<File> bufferwriteDirList = new ArrayList<>();
++// collectBufferWriteDirs(bufferwriteDirPathList, bufferwriteDirList);
++//
++// Set<String> bufferFiles = new HashSet<>();
++// collectBufferWriteFiles(bufferFiles);
++//
++// // add the restore file, if the last file is not closed
++// if (!newFileNodes.isEmpty() && !newFileNodes.get(newFileNodes.size() - 1).isClosed()) {
++// String bufferFileRestorePath =
++// newFileNodes.get(newFileNodes.size() - 1).getFile().getAbsolutePath() + RESTORE_FILE_SUFFIX;
++// bufferFiles.add(bufferFileRestorePath);
++// }
++//
++// deleteBufferWriteFiles(bufferwriteDirList, bufferFiles);
++//
++// // merge switch
++// changeFileNodes();
++//
++// // overflow switch from merge to work
++// overflowProcessor.switchMergeToWork();
++//
++// // insert status to file
++// isMerging = FileNodeProcessorStatus.NONE;
++// synchronized (fileNodeProcessorStore) {
++// fileNodeProcessorStore.setFileNodeProcessorStatus(isMerging);
++// fileNodeProcessorStore.setNewFileNodes(newFileNodes);
++// fileNodeProcessorStore.setEmptyTsFileResource(emptyTsFileResource);
++// writeStoreToDisk(fileNodeProcessorStore);
++// }
++// } catch (IOException e) {
++// LOGGER.info(
++// "The filenode processor {} encountered an error when its "
++// + "status switched from {} to {}.",
++// getProcessorName(), FileNodeProcessorStatus.NONE,
++// FileNodeProcessorStatus.MERGING_WRITE);
++// throw new FileNodeProcessorException(e);
++// } finally {
++// writeUnlock();
++// }
++// } finally {
++// oldMultiPassTokenSet = null;
++// oldMultiPassCount = null;
++// }
++//
++// }
++//
++// private void collectBufferWriteDirs(List<String> bufferwriteDirPathList,
++// List<File> bufferwriteDirList) {
++// for (String bufferwriteDirPath : bufferwriteDirPathList) {
++// if (bufferwriteDirPath.length() > 0
++// && bufferwriteDirPath.charAt(bufferwriteDirPath.length() - 1)
++// != File.separatorChar) {
++// bufferwriteDirPath = bufferwriteDirPath + File.separatorChar;
++// }
++// bufferwriteDirPath = bufferwriteDirPath + getProcessorName();
++// File bufferwriteDir = new File(bufferwriteDirPath);
++// bufferwriteDirList.add(bufferwriteDir);
++// if (!bufferwriteDir.exists()) {
++// bufferwriteDir.mkdirs();
++// }
++// }
++// }
++//
++// private void collectBufferWriteFiles(Set<String> bufferFiles) {
++// for (TsFileResource bufferFileNode : newFileNodes) {
++// String bufferFilePath = bufferFileNode.getFile().getAbsolutePath();
++// if (bufferFilePath != null) {
++// bufferFiles.add(bufferFilePath);
++// }
++// }
++// }
++//
++// private void deleteBufferWriteFiles(List<File> bufferwriteDirList, Set<String> bufferFiles)
++// throws IOException {
++// for (File bufferwriteDir : bufferwriteDirList) {
++// File[] files = bufferwriteDir.listFiles();
++// if (files == null) {
++// continue;
++// }
++// for (File file : files) {
++// if (!bufferFiles.contains(file.getPath())) {
++// FileReaderManager.getInstance().closeFileAndRemoveReader(file.getPath());
++// if (!file.delete()) {
++// LOGGER.warn("Cannot delete BufferWrite file {}", file.getPath());
++// }
++// }
++// }
++// }
++// }
++//
++// private void changeFileNodes() {
++// for (TsFileResource fileNode : newFileNodes) {
++// if (fileNode.getOverflowChangeType() != OverflowChangeType.NO_CHANGE) {
++// fileNode.setOverflowChangeType(OverflowChangeType.CHANGED);
++// }
++// }
++// }
++//
++// private String queryAndWriteDataForMerge(TsFileResource backupIntervalFile)
++// throws IOException, FileNodeProcessorException, PathErrorException {
++// Map<String, Long> startTimeMap = new HashMap<>();
++// Map<String, Long> endTimeMap = new HashMap<>();
++//
++// mergeFileWriter = null;
++// mergeOutputPath = null;
++// mergeBaseDir = null;
++// mergeFileName = null;
++// // modifications are blocked before mergeModification is created to avoid
++// // losing some modification.
++// mergeDeleteLock.lock();
++// QueryContext context = new QueryContext();
++// try {
++// FileReaderManager.getInstance().increaseFileReaderReference(backupIntervalFile.getFilePath(),
++// true);
++// for (String deviceId : backupIntervalFile.getStartTimeMap().keySet()) {
++// // query one deviceId
++// List<Path> pathList = new ArrayList<>();
++// mergeIsChunkGroupHasData = false;
++// mergeStartPos = -1;
++// ChunkGroupFooter footer;
++// int numOfChunk = 0;
++// try {
++// List<String> pathStrings = mManager.getLeafNodePathInNextLevel(deviceId);
++// for (String string : pathStrings) {
++// pathList.add(new Path(string));
++// }
++// } catch (PathErrorException e) {
++// LOGGER.error("Can't get all the paths from MManager, the deviceId is {}", deviceId);
++// throw new FileNodeProcessorException(e);
++// }
++// if (pathList.isEmpty()) {
++// continue;
++// }
++// for (Path path : pathList) {
++// // query one measurement in the special deviceId
++// String measurementId = path.getMeasurement();
++// TSDataType dataType = mManager.getSeriesType(path.getFullPath());
++// OverflowSeriesDataSource overflowSeriesDataSource = overflowProcessor.queryMerge(deviceId,
++// measurementId, dataType, true, context);
++// Filter timeFilter = FilterFactory
++// .and(TimeFilter.gtEq(backupIntervalFile.getStartTime(deviceId)),
++// TimeFilter.ltEq(backupIntervalFile.getEndTime(deviceId)));
++// SingleSeriesExpression seriesFilter = new SingleSeriesExpression(path, timeFilter);
++//
++// for (OverflowInsertFile overflowInsertFile : overflowSeriesDataSource
++// .getOverflowInsertFileList()) {
++// FileReaderManager.getInstance()
++// .increaseFileReaderReference(overflowInsertFile.getFilePath(),
++// false);
++// }
++//
++// IReader seriesReader = SeriesReaderFactory.getInstance()
++// .createSeriesReaderForMerge(backupIntervalFile,
++// overflowSeriesDataSource, seriesFilter, context);
++// numOfChunk += queryAndWriteSeries(seriesReader, path, seriesFilter, dataType,
++// startTimeMap, endTimeMap, overflowSeriesDataSource);
++// }
++// if (mergeIsChunkGroupHasData) {
++// // end the new rowGroupMetadata
++// mergeFileWriter.endChunkGroup(0);
++// }
++// }
++// } finally {
++// FileReaderManager.getInstance().decreaseFileReaderReference(backupIntervalFile.getFilePath(),
++// true);
++//
++// if (mergeDeleteLock.isLocked()) {
++// mergeDeleteLock.unlock();
++// }
++// }
++//
++// if (mergeFileWriter != null) {
++// mergeFileWriter.endFile(fileSchema);
++// }
++// backupIntervalFile.setFile(new File(mergeBaseDir + File.separator + mergeFileName));
++// backupIntervalFile.setOverflowChangeType(OverflowChangeType.NO_CHANGE);
++// backupIntervalFile.setStartTimeMap(startTimeMap);
++// backupIntervalFile.setEndTimeMap(endTimeMap);
++// backupIntervalFile.setModFile(mergingModification);
++// mergingModification = null;
++// return mergeFileName;
++// }
++//
++// private int queryAndWriteSeries(IReader seriesReader, Path path,
++// SingleSeriesExpression seriesFilter, TSDataType dataType,
++// Map<String, Long> startTimeMap, Map<String, Long> endTimeMap,
++// OverflowSeriesDataSource overflowSeriesDataSource)
++// throws IOException {
++// int numOfChunk = 0;
++// try {
++// if (!seriesReader.hasNext()) {
++// LOGGER.debug(
++// "The time-series {} has no data with the filter {} in the filenode processor {}",
++// path, seriesFilter, getProcessorName());
++// } else {
++// numOfChunk++;
++// TimeValuePair timeValuePair = seriesReader.next();
++// if (mergeFileWriter == null) {
++// mergeBaseDir = DIRECTORY_MANAGER.getNextFolderForTsfile();
++// mergeFileName = timeValuePair.getTimestamp()
++// + FileNodeConstants.BUFFERWRITE_FILE_SEPARATOR + System.currentTimeMillis();
++// mergeOutputPath = constructOutputFilePath(mergeBaseDir, getProcessorName(),
++// mergeFileName);
++// mergeFileName = getProcessorName() + File.separatorChar + mergeFileName;
++// mergeFileWriter = new TsFileIOWriter(new File(mergeOutputPath));
++// mergingModification = new ModificationFile(mergeOutputPath
++// + ModificationFile.FILE_SUFFIX);
++// mergeDeleteLock.unlock();
++// }
++// if (!mergeIsChunkGroupHasData) {
++// // start a new rowGroupMetadata
++// mergeIsChunkGroupHasData = true;
++// // the datasize and numOfChunk is fake
++// // the accurate datasize and numOfChunk will get after insert all this device data.
++// mergeFileWriter.startFlushChunkGroup(path.getDevice());// TODO please check me.
++// mergeStartPos = mergeFileWriter.getPos();
++// }
++// // init the serieswWriteImpl
++// MeasurementSchema measurementSchema = fileSchema
++// .getMeasurementSchema(path.getMeasurement());
++// ChunkBuffer pageWriter = new ChunkBuffer(measurementSchema);
++// int pageSizeThreshold = TSFileConfig.pageSizeInByte;
++// ChunkWriterImpl seriesWriterImpl = new ChunkWriterImpl(measurementSchema, pageWriter,
++// pageSizeThreshold);
++// // insert the series data
++// writeOneSeries(path.getDevice(), seriesWriterImpl, dataType,
++// seriesReader,
++// startTimeMap, endTimeMap, timeValuePair);
++// // flushMetadata the series data
++// seriesWriterImpl.writeToFileWriter(mergeFileWriter);
++// }
++// } finally {
++// for (OverflowInsertFile overflowInsertFile : overflowSeriesDataSource
++// .getOverflowInsertFileList()) {
++// FileReaderManager.getInstance()
++// .decreaseFileReaderReference(overflowInsertFile.getFilePath(),
++// false);
++// }
++// }
++// return numOfChunk;
++// }
++//
++//
++// private void writeOneSeries(String deviceId, ChunkWriterImpl seriesWriterImpl,
++// TSDataType dataType, IReader seriesReader, Map<String, Long> startTimeMap,
++// Map<String, Long> endTimeMap, TimeValuePair firstTVPair) throws IOException {
++// long startTime;
++// long endTime;
++// TimeValuePair localTV = firstTVPair;
++// writeTVPair(seriesWriterImpl, dataType, localTV);
++// startTime = endTime = localTV.getTimestamp();
++// if (!startTimeMap.containsKey(deviceId) || startTimeMap.get(deviceId) > startTime) {
++// startTimeMap.put(deviceId, startTime);
++// }
++// if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) < endTime) {
++// endTimeMap.put(deviceId, endTime);
++// }
++// while (seriesReader.hasNext()) {
++// localTV = seriesReader.next();
++// endTime = localTV.getTimestamp();
++// writeTVPair(seriesWriterImpl, dataType, localTV);
++// }
++// if (!endTimeMap.containsKey(deviceId) || endTimeMap.get(deviceId) < endTime) {
++// endTimeMap.put(deviceId, endTime);
++// }
++// }
++//
++// private void writeTVPair(ChunkWriterImpl seriesWriterImpl, TSDataType dataType,
++// TimeValuePair timeValuePair) throws IOException {
++// switch (dataType) {
++// case BOOLEAN:
++// seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getBoolean());
++// break;
++// case INT32:
++// seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getInt());
++// break;
++// case INT64:
++// seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getLong());
++// break;
++// case FLOAT:
++// seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getFloat());
++// break;
++// case DOUBLE:
++// seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getDouble());
++// break;
++// case TEXT:
++// seriesWriterImpl.write(timeValuePair.getTimestamp(), timeValuePair.getValue().getBinary());
++// break;
++// default:
++// LOGGER.error("Not support data type: {}", dataType);
++// break;
++// }
++// }
++//
++//
++// private String constructOutputFilePath(String baseDir, String processorName, String fileName) {
++//
++// String localBaseDir = baseDir;
++// if (localBaseDir.charAt(localBaseDir.length() - 1) != File.separatorChar) {
++// localBaseDir = localBaseDir + File.separatorChar + processorName;
++// }
++// File dataDir = new File(localBaseDir);
++// if (!dataDir.exists()) {
++// LOGGER.warn("The bufferwrite processor data dir doesn't exists, create new directory {}",
++// localBaseDir);
++// dataDir.mkdirs();
++// }
++// File outputFile = new File(dataDir, fileName);
++// return outputFile.getPath();
++// }
++//
++// private FileSchema constructFileSchema(String processorName) throws WriteProcessException {
++//
++// List<MeasurementSchema> columnSchemaList;
++// columnSchemaList = mManager.getSchemaForFileName(processorName);
++//
++// FileSchema schema = new FileSchema();
++// for (MeasurementSchema measurementSchema : columnSchemaList) {
++// schema.registerMeasurement(measurementSchema);
++// }
++// return schema;
++//
++// }
++//
++// @Override
++// public boolean canBeClosed() {
++// if (isMerging != FileNodeProcessorStatus.NONE) {
++// LOGGER.info("The filenode {} can't be closed, because the filenode status is {}",
++// getProcessorName(),
++// isMerging);
++// return false;
++// }
++// if (newMultiPassCount.get() != 0) {
++// LOGGER.warn("The filenode {} can't be closed, because newMultiPassCount is {}. The newMultiPassTokenSet is {}",
++// getProcessorName(), newMultiPassCount, newMultiPassTokenSet);
++// return false;
++// }
++//
++// if (oldMultiPassCount == null) {
++// return true;
++// }
++// if (oldMultiPassCount.getCount() == 0) {
++// return true;
++// } else {
++// LOGGER.info("The filenode {} can't be closed, because oldMultiPassCount is {}",
++// getProcessorName(), oldMultiPassCount.getCount());
++// return false;
++// }
++// }
++//
++// @Override
++// public FileNodeFlushFuture flush() throws IOException {
++// Future<Boolean> bufferWriteFlushFuture = null;
++// Future<Boolean> overflowFlushFuture = null;
++// if (bufferWriteProcessor != null) {
++// bufferWriteFlushFuture = bufferWriteProcessor.flush();
++// }
++// if (overflowProcessor != null && !overflowProcessor.isClosed()) {
++// overflowFlushFuture = overflowProcessor.flush();
++// }
++// return new FileNodeFlushFuture(bufferWriteFlushFuture, overflowFlushFuture);
++// }
++//
++// /**
++// * Close the bufferwrite processor.
++// */
++// public Future<Boolean> closeBufferWrite() throws FileNodeProcessorException {
++// if (bufferWriteProcessor == null) {
++// return new ImmediateFuture<>(true);
++// }
++// try {
++// while (!bufferWriteProcessor.canBeClosed()) {
++// waitForBufferWriteClose();
++// }
++// bufferWriteProcessor.close();
++// Future<Boolean> result = bufferWriteProcessor.getCloseFuture();
++// closingBufferWriteProcessor.add(bufferWriteProcessor);
++// bufferWriteProcessor = null;
++// return result;
++// } catch (BufferWriteProcessorException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// }
++//
++//
++//
++// private void waitForBufferWriteClose() {
++// try {
++// LOGGER.info("The bufferwrite {} can't be closed, wait 100ms",
++// bufferWriteProcessor.getProcessorName());
++// TimeUnit.MICROSECONDS.sleep(100);
++// } catch (InterruptedException e) {
++// LOGGER.error("Unexpected interruption", e);
++// Thread.currentThread().interrupt();
++// }
++// }
++//
++// /**
++// * Close the overflow processor.
++// */
++// public void closeOverflow() throws FileNodeProcessorException {
++// if (overflowProcessor == null || overflowProcessor.isClosed()) {
++// return;
++// }
++// try {
++// while (!overflowProcessor.canBeClosed()) {
++// waitForOverflowClose();
++// }
++// overflowProcessor.close();
++// } catch (OverflowProcessorException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// }
++//
++// private void waitForOverflowClose() {
++// try {
++// LOGGER.info("The overflow {} can't be closed, wait 100ms",
++// overflowProcessor.getProcessorName());
++// TimeUnit.MICROSECONDS.sleep(100);
++// } catch (InterruptedException e) {
++// LOGGER.error("Unexpected interruption", e);
++// Thread.currentThread().interrupt();
++// }
++// }
++//
++// @Override
++// public void close() throws FileNodeProcessorException {
++// LOGGER.info("Will setCloseMark FileNode Processor {}.", getProcessorName());
++// Future<Boolean> result = closeBufferWrite();
++// try {
++// result.get();
++// } catch (InterruptedException | ExecutionException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// closeOverflow();
++// for (TsFileResource fileNode : newFileNodes) {
++// if (fileNode.getModFile() != null) {
++// try {
++// fileNode.getModFile().close();
++// } catch (IOException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// }
++// }
++// }
++//
++// /**
++// * deregister the filenode processor.
++// */
++// public void delete() throws ProcessorException {
++// if (TsFileDBConf.isEnableStatMonitor()) {
++// // remove the monitor
++// LOGGER.info("Deregister the filenode processor: {} from monitor.", getProcessorName());
++// StatMonitor.getInstance().deregisterStatistics(statStorageDeltaName);
++// }
++// closeBufferWrite();
++// closeOverflow();
++// for (TsFileResource fileNode : newFileNodes) {
++// if (fileNode.getModFile() != null) {
++// try {
++// fileNode.getModFile().close();
++// } catch (IOException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// }
++// }
++// }
++//
++// @Override
++// public long memoryUsage() {
++// long memSize = 0;
++// if (bufferWriteProcessor != null) {
++// memSize += bufferWriteProcessor.memoryUsage();
++// }
++// if (overflowProcessor != null) {
++// memSize += overflowProcessor.memoryUsage();
++// }
++// return memSize;
++// }
++//
++// private void writeStoreToDisk(FileNodeProcessorStore fileNodeProcessorStore)
++// throws FileNodeProcessorException {
++//
++// synchronized (fileNodeRestoreLock) {
++// try (FileOutputStream fileOutputStream = new FileOutputStream(fileNodeRestoreFilePath)) {
++// fileNodeProcessorStore.serialize(fileOutputStream);
++// LOGGER.debug("The filenode processor {} writes restore information to the restore file",
++// getProcessorName());
++// } catch (IOException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// }
++// }
++//
++// private FileNodeProcessorStore readStoreFromDisk() throws FileNodeProcessorException {
++//
++// synchronized (fileNodeRestoreLock) {
++// File restoreFile = new File(fileNodeRestoreFilePath);
++// if (!restoreFile.exists() || restoreFile.length() == 0) {
++// try {
++// return new FileNodeProcessorStore(false, new HashMap<>(),
++// new TsFileResource(null, false),
++// new ArrayList<>(), FileNodeProcessorStatus.NONE, 0);
++// } catch (IOException e) {
++// throw new FileNodeProcessorException(e);
++// }
++// }
++// try (FileInputStream inputStream = new FileInputStream(fileNodeRestoreFilePath)) {
++// return FileNodeProcessorStore.deSerialize(inputStream);
++// } catch (IOException e) {
++// LOGGER
++// .error("Failed to deserialize the FileNodeRestoreFile {}, {}", fileNodeRestoreFilePath,
++// e);
++// throw new FileNodeProcessorException(e);
++// }
++// }
++// }
++//
++// String getFileNodeRestoreFilePath() {
++// return fileNodeRestoreFilePath;
++// }
++//
++// /**
++// * Delete data whose timestamp <= 'timestamp' and belong to timeseries deviceId.measurementId.
++// *
++// * @param deviceId the deviceId of the timeseries to be deleted.
++// * @param measurementId the measurementId of the timeseries to be deleted.
++// * @param timestamp the delete range is (0, timestamp].
++// */
++// public void delete(String deviceId, String measurementId, long timestamp) throws IOException {
++// // TODO: how to avoid partial deletion?
++// mergeDeleteLock.lock();
++// long version = versionController.nextVersion();
++//
++// // record what files are updated so we can roll back them in case of exception
++// List<ModificationFile> updatedModFiles = new ArrayList<>();
++//
++// try {
++// String fullPath = deviceId +
++// IoTDBConstant.PATH_SEPARATOR + measurementId;
++// Deletion deletion = new Deletion(fullPath, version, timestamp);
++// if (mergingModification != null) {
++// mergingModification.write(deletion);
++// updatedModFiles.add(mergingModification);
++// }
++// deleteBufferWriteFiles(deviceId, deletion, updatedModFiles);
++// // delete data in memory
++// OverflowProcessor ofProcessor = getOverflowProcessor(getProcessorName());
++// ofProcessor.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
++// if (bufferWriteProcessor != null) {
++// bufferWriteProcessor.delete(deviceId, measurementId, timestamp);
++// }
++// } catch (Exception e) {
++// // roll back
++// for (ModificationFile modFile : updatedModFiles) {
++// modFile.abort();
++// }
++// throw new IOException(e);
++// } finally {
++// mergeDeleteLock.unlock();
++// }
++// }
++//
++// private void deleteBufferWriteFiles(String deviceId, Deletion deletion,
++// List<ModificationFile> updatedModFiles) throws IOException {
++// BufferWriteProcessor bufferWriteProcessor = getBufferWriteProcessor();
++// TsFileResource resource = null;
++// if (bufferWriteProcessor != null) {
++// //bufferWriteProcessor == null means the bufferWriteProcessor is closed now.
++// resource = bufferWriteProcessor.getCurrentTsFileResource();
++// if (resource != null && resource.containsDevice(deviceId)) {
++// resource.getModFile().write(deletion);
++// updatedModFiles.add(resource.getModFile());
++// }
++// }
++//
++// for (TsFileResource fileNode : newFileNodes) {
++// if (fileNode != resource && fileNode.containsDevice(deviceId)
++// && fileNode.getStartTime(deviceId) <= deletion.getTimestamp()) {
++// fileNode.getModFile().write(deletion);
++// updatedModFiles.add(fileNode.getModFile());
++// }
++// }
++// }
++//
++// /**
++// * Similar to delete(), but only deletes data in BufferWrite. Only used by WAL recovery.
++// */
++// public void deleteBufferWrite(String deviceId, String measurementId, long timestamp)
++// throws IOException, BufferWriteProcessorException {
++// String fullPath = deviceId +
++// IoTDBConstant.PATH_SEPARATOR + measurementId;
++// long version = versionController.nextVersion();
++// Deletion deletion = new Deletion(fullPath, version, timestamp);
++//
++// List<ModificationFile> updatedModFiles = new ArrayList<>();
++// try {
++// deleteBufferWriteFiles(deviceId, deletion, updatedModFiles);
++// } catch (IOException e) {
++// for (ModificationFile modificationFile : updatedModFiles) {
++// modificationFile.abort();
++// }
++// throw e;
++// }
++// if (bufferWriteProcessor != null) {
++// try {
++// bufferWriteProcessor.delete(deviceId, measurementId, timestamp);
++// } catch (BufferWriteProcessorException e) {
++// throw new IOException(e);
++// }
++// }
++// }
++//
++// /**
++// * Similar to delete(), but only deletes data in Overflow. Only used by WAL recovery.
++// */
++// public void deleteOverflow(String deviceId, String measurementId, long timestamp)
++// throws ProcessorException {
++// long version = versionController.nextVersion();
++//
++// OverflowProcessor overflowProcessor = getOverflowProcessor(getProcessorName());
++// List<ModificationFile> updatedModFiles = new ArrayList<>();
++// try {
++// overflowProcessor.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
++// } catch (IOException e) {
++// for (ModificationFile modificationFile : updatedModFiles) {
++// try {
++// modificationFile.abort();
++// } catch (IOException e1) {
++// throw new ProcessorException(e);
++// }
++// }
++// throw new ProcessorException(e);
++// }
++// }
++//
++// public CopyOnReadLinkedList<BufferWriteProcessor> getClosingBufferWriteProcessor() {
++// for (BufferWriteProcessor processor: closingBufferWriteProcessor.cloneList()) {
++// if (processor.isClosed()) {
++// closingBufferWriteProcessor.remove(processor);
++// }
++// }
++// closingBufferWriteProcessor.reset();
++// return closingBufferWriteProcessor;
++// }
++//
++// @Override
++// public boolean equals(Object o) {
++// if (this == o) {
++// return true;
++// }
++// if (o == null || getClass() != o.getClass()) {
++// return false;
++// }
++// if (!super.equals(o)) {
++// return false;
++// }
++// FileNodeProcessor that = (FileNodeProcessor) o;
++// return isOverflowed == that.isOverflowed &&
++// numOfMergeFile == that.numOfMergeFile &&
++// lastMergeTime == that.lastMergeTime &&
++// multiPassLockToken == that.multiPassLockToken &&
++// Objects.equals(statStorageDeltaName, that.statStorageDeltaName) &&
++// Objects.equals(statParamsHashMap, that.statParamsHashMap) &&
++// Objects.equals(lastUpdateTimeMap, that.lastUpdateTimeMap) &&
++// Objects.equals(flushLastUpdateTimeMap, that.flushLastUpdateTimeMap) &&
++// Objects.equals(invertedIndexOfFiles, that.invertedIndexOfFiles) &&
++// Objects.equals(emptyTsFileResource, that.emptyTsFileResource) &&
++// Objects.equals(newFileNodes, that.newFileNodes) &&
++// isMerging == that.isMerging &&
++// Objects.equals(fileNodeProcessorStore, that.fileNodeProcessorStore) &&
++// Objects.equals(fileNodeRestoreFilePath, that.fileNodeRestoreFilePath) &&
++// Objects.equals(bufferWriteProcessor, that.bufferWriteProcessor) &&
++// Objects.equals(overflowProcessor, that.overflowProcessor) &&
++// Objects.equals(oldMultiPassTokenSet, that.oldMultiPassTokenSet) &&
++// Objects.equals(newMultiPassTokenSet, that.newMultiPassTokenSet) &&
++// Objects.equals(oldMultiPassCount, that.oldMultiPassCount) &&
++// Objects.equals(newMultiPassCount, that.newMultiPassCount) &&
++// Objects.equals(parameters, that.parameters) &&
++// Objects.equals(fileSchema, that.fileSchema) &&
++// Objects.equals(fileNodeFlushAction, that.fileNodeFlushAction) &&
++// Objects.equals(bufferwriteFlushAction, that.bufferwriteFlushAction) &&
++// Objects.equals(overflowFlushAction, that.overflowFlushAction);
++// }
++//
++// @Override
++// public int hashCode() {
++// return processorName.hashCode();
++// }
++//
++// public class MergeRunnale implements Runnable {
++//
++// @Override
++// public void run() {
++// try {
++// ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
++// long mergeStartTime = System.currentTimeMillis();
++// merge();
++// long mergeEndTime = System.currentTimeMillis();
++// long intervalTime = mergeEndTime - mergeStartTime;
++// LOGGER.info(
++// "The filenode processor {} merge start time is {}, "
++// + "merge end time is {}, merge consumes {}ms.",
++// getProcessorName(), ofInstant(Instant.ofEpochMilli(mergeStartTime),
++// zoneId), ofInstant(Instant.ofEpochMilli(mergeEndTime),
++// zoneId), intervalTime);
++// } catch (FileNodeProcessorException e) {
++// LOGGER.error("The filenode processor {} encountered an error when merging.",
++// getProcessorName(), e);
++// throw new ErrorDebugException(e);
++// }
++// }
++// }
++//
++// /**
++// * wait for all closing processors finishing their tasks
++// */
++// public void waitforAllClosed() throws FileNodeProcessorException {
++// close();
++// while (getClosingBufferWriteProcessor().size() != 0) {
++// checkAllClosingProcessors();
++// try {
++// Thread.sleep(10);
++// } catch (InterruptedException e) {
++// LOGGER.error("Filenode Processor {} is interrupted when waiting for all closed.", processorName, e);
++// }
++// }
++// }
++//
++//
++// void checkAllClosingProcessors() {
++// Iterator<BufferWriteProcessor> iterator =
++// this.getClosingBufferWriteProcessor().iterator();
++// while (iterator.hasNext()) {
++// BufferWriteProcessor processor = iterator.next();
++// try {
++// if (processor.getCloseFuture().get(10, TimeUnit.MILLISECONDS)) {
++// //if finished, we can remove it.
++// iterator.remove();
++// }
++// } catch (InterruptedException | ExecutionException e) {
++// LOGGER.error("Close bufferwrite processor {} failed.", processor.getProcessorName(), e);
++// } catch (TimeoutException e) {
++// //do nothing.
++// }
++// }
++// this.getClosingBufferWriteProcessor().reset();
++// }
++//}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
index 2d63912,dcd3924..541ad15
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerV2.java
@@@ -28,15 -28,14 +28,15 @@@ import java.util.concurrent.ConcurrentH
import org.apache.commons.io.FileUtils;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeProcessor;
import org.apache.iotdb.db.engine.filenode.TsFileResource;
import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.FileNodeProcessorException;
import org.apache.iotdb.db.exception.PathErrorException;
++import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.StartupException;
import org.apache.iotdb.db.metadata.MManager;
+import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.service.IService;
import org.apache.iotdb.db.service.ServiceType;
@@@ -115,7 -114,7 +115,7 @@@ public class FileNodeManagerV2 implemen
private FileNodeProcessorV2 getProcessor(String devicePath)
-- throws FileNodeManagerException {
++ throws FileNodeManagerException, ProcessorException {
String storageGroup = "";
try {
// return the storage group name
@@@ -217,7 -216,7 +217,7 @@@
}
private void delete(String processorName,
-- Iterator<Entry<String, FileNodeProcessor>> processorIterator)
++ Iterator<Entry<String, FileNodeProcessorV2>> processorIterator)
throws FileNodeManagerException {
// TODO
}
@@@ -245,7 -244,7 +245,7 @@@
* query data.
*/
public QueryDataSourceV2 query(SingleSeriesExpression seriesExpression, QueryContext context)
-- throws FileNodeManagerException {
++ throws FileNodeManagerException, ProcessorException {
String deviceId = seriesExpression.getSeriesPath().getDevice();
String measurementId = seriesExpression.getSeriesPath().getMeasurement();
FileNodeProcessorV2 fileNodeProcessor = getProcessor(deviceId);
@@@ -340,7 -339,7 +340,7 @@@
*/
public void addTimeSeries(Path path, TSDataType dataType, TSEncoding encoding,
CompressionType compressor,
-- Map<String, String> props) throws FileNodeManagerException {
++ Map<String, String> props) throws FileNodeManagerException, ProcessorException {
FileNodeProcessorV2 fileNodeProcessor = getProcessor(path.getFullPath());
fileNodeProcessor.addTimeSeries(path.getMeasurement(), dataType, encoding, compressor, props);
}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
index 12fa2d9,0b03327..f958c1c
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2.java
@@@ -96,7 -93,7 +96,7 @@@ public class FileNodeProcessorV2
private VersionController versionController;
-- public FileNodeProcessorV2(String baseDir, String storageGroupName) throws FileNodeProcessorException {
++ public FileNodeProcessorV2(String baseDir, String storageGroupName) throws ProcessorException {
this.storageGroupName = storageGroupName;
lock = new ReentrantReadWriteLock();
closeFileNodeCondition = lock.writeLock().newCondition();
@@@ -122,43 -119,10 +122,42 @@@
this.fileSchema = constructFileSchema(storageGroupName);
}
- // TODO: Jiang Tian
- private void recovery(){
+ private void recovery() throws ProcessorException {
+ List<String> tsfiles = new ArrayList<>();
+ List<String> fileFolders = directoryManager.getAllTsFileFolders();
+ for (String baseDir: fileFolders) {
+ File fileFolder = new File(baseDir, storageGroupName);
+ if (!fileFolder.exists()) {
+ continue;
+ }
+ for (File tsfile: fileFolder.listFiles()) {
+ tsfiles.add(tsfile.getPath());
+ }
+ }
+
+// Collections.sort(tsfiles, );
+
+ for (String tsfile: tsfiles) {
+ TsFileResourceV2 tsFileResource = new TsFileResourceV2(new File(tsfile));
+ SeqTsFileRecoverPerformer recoverPerformer = new SeqTsFileRecoverPerformer(storageGroupName + "-", fileSchema, versionController, tsFileResource);
+ recoverPerformer.recover();
+ }
+
+ tsfiles.clear();
+ String unseqFileFolder = IoTDBDescriptor.getInstance().getConfig().getOverflowDataDir();
+ File fileFolder = new File(unseqFileFolder, storageGroupName);
+ if (!fileFolder.exists()) {
+ return;
+ }
+ for (File unseqFile: fileFolder.listFiles()) {
+ tsfiles.add(unseqFile.getPath());
+ }
+
- for
+
}
+
+
private FileSchema constructFileSchema(String storageGroupName) {
List<MeasurementSchema> columnSchemaList;
columnSchemaList = mManager.getSchemaForFileName(storageGroupName);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/FlushPartialPolicy.java
index 5bef4b5,5bef4b5..54ccba2
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/FlushPartialPolicy.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/FlushPartialPolicy.java
@@@ -20,7 -20,7 +20,8 @@@ package org.apache.iotdb.db.engine.memc
import org.apache.iotdb.db.concurrent.ThreadName;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
++import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.utils.MemUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@@ -60,7 -60,7 +61,11 @@@ public class FlushPartialPolicy impleme
private Thread createWorkerThread() {
return new Thread(() -> {
-- FileNodeManager.getInstance().forceFlush(BasicMemController.UsageLevel.SAFE);
++ try {
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
++ } catch (FileNodeManagerException e) {
++ LOGGER.error("sync close all file node processor failed", e);
++ }
try {
Thread.sleep(sleepInterval);
} catch (InterruptedException e) {
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/ForceFLushAllPolicy.java
index 14cc637,14cc637..423e63c
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/ForceFLushAllPolicy.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/engine/memcontrol/ForceFLushAllPolicy.java
@@@ -19,7 -19,7 +19,8 @@@
package org.apache.iotdb.db.engine.memcontrol;
import org.apache.iotdb.db.concurrent.ThreadName;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
++import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.utils.MemUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@@ -55,7 -55,7 +56,13 @@@ public class ForceFLushAllPolicy implem
private Thread createWorkerThread() {
return new Thread(() ->
-- FileNodeManager.getInstance().forceFlush(BasicMemController.UsageLevel.DANGEROUS),
++ {
++ try {
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
++ } catch (FileNodeManagerException e) {
++ logger.error("sync close all file node processor failed", e);
++ }
++ },
ThreadName.FORCE_FLUSH_ALL_POLICY.getName());
}
}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowMemtable.java
index dc83333,dc83333..0000000
deleted file mode 100644,100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowMemtable.java
+++ /dev/null
@@@ -1,111 -1,111 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.overflow.io;
--
--import java.util.HashMap;
--import java.util.Map;
--import org.apache.iotdb.db.engine.memtable.IMemTable;
--import org.apache.iotdb.db.engine.memtable.PrimitiveMemTable;
--import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.file.metadata.statistics.LongStatistics;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--
--/**
-- * This class is used to store and query all overflow data in memory.<br>
-- */
--public class OverflowMemtable extends PrimitiveMemTable {
--
-- /**
-- * store update and delete data
-- */
-- private Map<String, Map<String, LongStatistics>> indexTrees;
--
-- /**
-- * store insert data
-- */
-- private IMemTable memTable;
--
-- public OverflowMemtable() {
-- indexTrees = new HashMap<>();
-- memTable = new PrimitiveMemTable();
-- }
--
-- @Override
-- public void insert(TSRecord tsRecord) {
-- for (DataPoint dataPoint : tsRecord.dataPointList) {
-- memTable.write(tsRecord.deviceId, dataPoint.getMeasurementId(), dataPoint.getType(),
-- tsRecord.time,
-- dataPoint.getValue().toString());
-- }
-- }
--
-- /**
-- * @deprecated update time series data
-- */
-- @Deprecated
-- public void update(String deviceId, String measurementId, long startTime, long endTime,
-- TSDataType dataType,
-- byte[] value) {
-- if (!indexTrees.containsKey(deviceId)) {
-- indexTrees.put(deviceId, new HashMap<>());
-- }
-- if (!indexTrees.get(deviceId).containsKey(measurementId)) {
-- indexTrees.get(deviceId).put(measurementId, new LongStatistics());
-- }
-- indexTrees.get(deviceId).get(measurementId).updateStats(startTime, endTime);
-- }
--
-- public void delete(String deviceId, String measurementId, long timestamp, boolean isFlushing) {
-- super.delete(deviceId, measurementId, timestamp);
-- }
--
-- public ReadOnlyMemChunk queryOverflowInsertInMemory(String deviceId, String measurementId,
-- TSDataType dataType, Map<String, String> props) {
-- return super.query(deviceId, measurementId, dataType, props);
-- }
--
-- public boolean isEmptyOfOverflowSeriesMap() {
-- return super.isEmpty();
-- }
--
--// public Map<String, Map<String, LongStatistics>> getOverflowSeriesMap() {
--// return super;
--// }
--
-- public boolean isEmptyOfMemTable() {
-- return memTable.isEmpty();
-- }
--
-- public IMemTable getMemTabale() {
-- return memTable;
-- }
--
-- public long getSize() {
-- // TODO: calculate the size of this overflow support
-- return 0;
-- }
--
-- @Override
-- public void clear() {
--// indexTrees.clear();
-- super.clear();
-- }
--}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessor.java
index 9bc42d0,ebbcfc4..0000000
deleted file mode 100644,100644
--- a/iotdb/src/main/java/org/apache/iotdb/db/engine/overflow/io/OverflowProcessor.java
+++ /dev/null
@@@ -1,820 -1,819 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.overflow.io;
--
--import static org.apache.iotdb.db.conf.IoTDBConstant.PATH_SEPARATOR;
--
--import java.io.File;
--import java.io.IOException;
--import java.time.Instant;
--import java.time.ZonedDateTime;
--import java.util.ArrayList;
--import java.util.Arrays;
--import java.util.Collections;
--import java.util.List;
--import java.util.Map;
--import java.util.Objects;
--import java.util.concurrent.ExecutionException;
--import java.util.concurrent.Future;
--import java.util.concurrent.atomic.AtomicLong;
--import java.util.concurrent.locks.ReentrantLock;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.Processor;
--import org.apache.iotdb.db.engine.bufferwrite.Action;
--import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
--import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
--import org.apache.iotdb.db.engine.memcontrol.BasicMemController.UsageLevel;
--import org.apache.iotdb.db.engine.memtable.IMemTable;
--import org.apache.iotdb.db.engine.memtable.MemSeriesLazyMerger;
--import org.apache.iotdb.db.engine.memtable.MemTableFlushCallBack;
--import org.apache.iotdb.db.engine.memtable.MemTablePool;
--import org.apache.iotdb.db.engine.memtable.PrimitiveMemTable;
--import org.apache.iotdb.db.engine.modification.Deletion;
--import org.apache.iotdb.db.engine.modification.ModificationFile;
--import org.apache.iotdb.db.engine.pool.FlushPoolManager;
--import org.apache.iotdb.db.engine.querycontext.MergeSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.OverflowInsertFile;
--import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
--import org.apache.iotdb.db.engine.version.VersionController;
--import org.apache.iotdb.db.exception.OverflowProcessorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.monitor.collector.MemTableWriteTimeCost;
--import org.apache.iotdb.db.qp.constant.DatetimeUtils;
--import org.apache.iotdb.db.query.context.QueryContext;
--import org.apache.iotdb.db.query.control.FileReaderManager;
--import org.apache.iotdb.db.utils.ImmediateFuture;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.db.writelog.node.WriteLogNode;
- import org.apache.iotdb.db.writelog.recover.UnseqTsFileRecoverPerformer;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.read.common.Path;
--import org.apache.iotdb.tsfile.utils.BytesUtils;
--import org.apache.iotdb.tsfile.utils.Pair;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.schema.FileSchema;
--import org.apache.iotdb.tsfile.write.writer.TsFileIOWriter;
--import org.slf4j.Logger;
--import org.slf4j.LoggerFactory;
--
--public class OverflowProcessor extends Processor {
--
-- private static final Logger LOGGER = LoggerFactory.getLogger(OverflowProcessor.class);
-- private static final IoTDBConfig TsFileDBConf = IoTDBDescriptor.getInstance().getConfig();
-- private OverflowResource workResource;
-- private OverflowResource mergeResource;
--
-- private List<IMemTable> overflowFlushMemTables = new ArrayList<>();
-- private IMemTable workSupport;
--// private OverflowMemtable flushSupport;
-- private long flushId = -1;
-- private volatile Future<Boolean> flushFuture = new ImmediateFuture<>(true);
-- private volatile boolean isMerge;
-- private int valueCount;
-- private String parentPath;
-- private long lastFlushTime = -1;
-- private AtomicLong dataPathCount = new AtomicLong();
-- private ReentrantLock queryFlushLock = new ReentrantLock();
--
-- private Action overflowFlushAction;
-- private Action filenodeFlushAction;
-- private FileSchema fileSchema;
--
-- private long memThreshold = TSFileConfig.groupSizeInByte;
-- private AtomicLong memSize = new AtomicLong();
--
-- private VersionController versionController;
--
-- private boolean isClosed = true;
-- private boolean isFlush = false;
--
-- public OverflowProcessor(String processorName, Map<String, Action> parameters,
-- FileSchema fileSchema, VersionController versionController)
-- throws ProcessorException {
-- super(processorName);
-- this.fileSchema = fileSchema;
-- this.versionController = versionController;
-- String overflowDirPath = TsFileDBConf.getOverflowDataDir();
-- if (overflowDirPath.length() > 0
-- && overflowDirPath.charAt(overflowDirPath.length() - 1) != File.separatorChar) {
-- overflowDirPath = overflowDirPath + File.separatorChar;
-- }
-- this.parentPath = overflowDirPath + processorName;
--
-- overflowFlushAction = parameters.get(FileNodeConstants.OVERFLOW_FLUSH_ACTION);
-- filenodeFlushAction = parameters
-- .get(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION);
-- reopen();
-- try {
-- getLogNode();
-- } catch (IOException e) {
-- throw new ProcessorException(e);
-- }
-- }
--
-- public void reopen() throws ProcessorException {
-- if (!isClosed) {
-- return;
-- }
-- // recover file
-- File processorDataDir = new File(parentPath);
-- if (!processorDataDir.exists()) {
-- processorDataDir.mkdirs();
-- }
-- recover(processorDataDir);
--
-- // memory
-- if (workSupport == null) {
-- workSupport = new PrimitiveMemTable();
-- } else {
-- workSupport.clear();
-- }
-- isClosed = false;
-- isFlush = false;
-- }
--
-- public void checkOpen() throws OverflowProcessorException {
-- if (isClosed) {
-- throw new OverflowProcessorException("OverflowProcessor already closed");
-- }
-- }
--
--
-- private void recover(File parentFile) throws ProcessorException {
-- String[] subFilePaths = clearFile(parentFile.list());
--
-- try {
-- if (subFilePaths.length == 0) {
-- workResource = new OverflowResource(parentPath,
-- String.valueOf(dataPathCount.getAndIncrement()), versionController, processorName);
-- } else if (subFilePaths.length == 1) {
-- long count = Long.parseLong(subFilePaths[0]);
-- dataPathCount.addAndGet(count + 1);
-- workResource = new OverflowResource(parentPath, String.valueOf(count), versionController,
-- processorName);
-- LOGGER.info("The overflow processor {} recover from work status.", getProcessorName());
-- } else {
-- long count1 = Long.parseLong(subFilePaths[0]);
-- long count2 = Long.parseLong(subFilePaths[1]);
-- if (count1 > count2) {
-- long temp = count1;
-- count1 = count2;
-- count2 = temp;
-- }
-- dataPathCount.addAndGet(count2 + 1);
-- // work dir > merge dir
-- workResource = new OverflowResource(parentPath, String.valueOf(count2), versionController,
-- processorName);
-- mergeResource = new OverflowResource(parentPath, String.valueOf(count1), versionController,
-- processorName);
-- LOGGER.info("The overflow processor {} recover from merge status.", getProcessorName());
-- }
-- } catch (IOException e) {
-- throw new ProcessorException(e);
-- }
--
- UnseqTsFileRecoverPerformer recoverPerformer =
- new UnseqTsFileRecoverPerformer(workResource, fileSchema);
- recoverPerformer.recover();
-// UnseqTsFileRecoverPerformer recoverPerformer =
-// new UnseqTsFileRecoverPerformer(workResource, fileSchema);
-// recoverPerformer.recover();
-- }
--
-- private String[] clearFile(String[] subFilePaths) {
-- // just clear the files whose name are number.
-- List<String> files = new ArrayList<>();
-- for (String file : subFilePaths) {
-- try {
-- Long.valueOf(file);
-- files.add(file);
-- } catch (NumberFormatException e) {
-- // ignore the exception, if the name of file is not a number.
--
-- }
-- }
-- return files.toArray(new String[files.size()]);
-- }
--
-- /**
-- * insert one time-series record
-- */
-- public void insert(TSRecord tsRecord) throws IOException {
-- MemTableWriteTimeCost.getInstance().init();
-- try {
-- checkOpen();
-- } catch (OverflowProcessorException e) {
-- throw new IOException(e);
-- }
-- // memory control
-- long memUage = MemUtils.getRecordSize(tsRecord);
-- UsageLevel usageLevel = BasicMemController.getInstance().acquireUsage(this, memUage);
-- switch (usageLevel) {
-- case SAFE:
-- // insert data
-- workSupport.insert(tsRecord);
-- valueCount++;
-- // check asyncFlush
-- memUage = memSize.addAndGet(memUage);
-- if (memUage > memThreshold) {
-- if (LOGGER.isWarnEnabled()) {
-- LOGGER.warn("The usage of memory {} in overflow processor {} reaches the threshold {}",
-- MemUtils.bytesCntToStr(memUage), getProcessorName(),
-- MemUtils.bytesCntToStr(memThreshold));
-- }
-- flush();
-- }
-- break;
-- case WARNING:
-- // insert data
-- workSupport.insert(tsRecord);
-- valueCount++;
-- // asyncFlush
-- memSize.addAndGet(memUage);
-- flush();
-- break;
-- case DANGEROUS:
-- throw new IOException("The insertion is rejected because dangerous memory level hit");
-- }
--
--
-- }
--
-- /**
-- * @deprecated update one time-series data which time range is from startTime from endTime.
-- */
-- @Deprecated
-- public void update(String deviceId, String measurementId, long startTime, long endTime,
-- TSDataType type, byte[] value) {
--// workSupport.update(deviceId, measurementId, startTime, endTime, type, value);
--// valueCount++;
-- throw new UnsupportedOperationException("update has been deprecated");
-- }
--
-- /**
-- * @deprecated this function need to be re-implemented.
-- */
-- @Deprecated
-- public void update(String deviceId, String measurementId, long startTime, long endTime,
-- TSDataType type, String value) {
--// workSupport.update(deviceId, measurementId, startTime, endTime, type,
--// convertStringToBytes(type, value));
--// valueCount++;
-- throw new UnsupportedOperationException("update has been deprecated");
-- }
--
-- private byte[] convertStringToBytes(TSDataType type, String o) {
-- switch (type) {
-- case INT32:
-- return BytesUtils.intToBytes(Integer.valueOf(o));
-- case INT64:
-- return BytesUtils.longToBytes(Long.valueOf(o));
-- case BOOLEAN:
-- return BytesUtils.boolToBytes(Boolean.valueOf(o));
-- case FLOAT:
-- return BytesUtils.floatToBytes(Float.valueOf(o));
-- case DOUBLE:
-- return BytesUtils.doubleToBytes(Double.valueOf(o));
-- case TEXT:
-- return BytesUtils.stringToBytes(o);
-- default:
-- LOGGER.error("Unsupport data type: {}", type);
-- throw new UnsupportedOperationException("Unsupport data type:" + type);
-- }
-- }
--
-- /**
-- * Delete data of a timeseries whose time ranges from 0 to timestamp.
-- *
-- * @param deviceId the deviceId of the timeseries.
-- * @param measurementId the measurementId of the timeseries.
-- * @param timestamp the upper-bound of deletion time.
-- * @param version the version number of this deletion.
-- * @param updatedModFiles add successfully updated Modification files to the list, and abort them
-- * when exception is raised
-- */
-- public void delete(String deviceId, String measurementId, long timestamp, long version,
-- List<ModificationFile> updatedModFiles) throws IOException {
-- try {
-- checkOpen();
-- } catch (OverflowProcessorException e) {
-- throw new IOException(e);
-- }
-- workResource.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
-- workSupport.delete(deviceId, measurementId, timestamp);
-- if (isFlush()) {
-- mergeResource.delete(deviceId, measurementId, timestamp, version, updatedModFiles);
-- for (IMemTable memTable : overflowFlushMemTables) {
-- if (memTable.containSeries(deviceId, measurementId)) {
-- memTable.delete(new Deletion(deviceId + PATH_SEPARATOR + measurementId, 0, timestamp));
-- }
-- }
-- }
-- }
--
-- /**
-- * query all overflow data which contain insert data in memory, insert data in file, update/delete
-- * data in memory, update/delete data in file.
-- *
-- * @return OverflowSeriesDataSource
-- */
-- public OverflowSeriesDataSource query(String deviceId, String measurementId,
-- TSDataType dataType, Map<String, String> props, QueryContext context)
-- throws IOException {
-- try {
-- checkOpen();
-- } catch (OverflowProcessorException e) {
-- throw new IOException(e);
-- }
-- queryFlushLock.lock();
-- try {
-- // query insert data in memory and unseqTsFiles
-- // memory
-- ReadOnlyMemChunk insertInMem = queryOverflowInsertInMemory(deviceId, measurementId,
-- dataType, props);
-- List<OverflowInsertFile> overflowInsertFileList = new ArrayList<>();
-- // work file
-- Pair<String, List<ChunkMetaData>> insertInDiskWork = queryWorkDataInOverflowInsert(deviceId,
-- measurementId,
-- dataType, context);
-- if (insertInDiskWork.left != null) {
-- overflowInsertFileList
-- .add(0, new OverflowInsertFile(insertInDiskWork.left,
-- insertInDiskWork.right));
-- }
-- // merge file
-- Pair<String, List<ChunkMetaData>> insertInDiskMerge = queryMergeDataInOverflowInsert(deviceId,
-- measurementId, dataType, context);
-- if (insertInDiskMerge.left != null) {
-- overflowInsertFileList
-- .add(0, new OverflowInsertFile(insertInDiskMerge.left
-- , insertInDiskMerge.right));
-- }
-- // work file
-- return new OverflowSeriesDataSource(new Path(deviceId + "." + measurementId), dataType,
-- overflowInsertFileList, insertInMem);
-- } finally {
-- queryFlushLock.unlock();
-- }
-- }
--
-- /**
-- * query insert data in memory table. while flushing, merge the work memory table with asyncFlush
-- * memory table.
-- *
-- * @return insert data in SeriesChunkInMemTable
-- */
-- private ReadOnlyMemChunk queryOverflowInsertInMemory(String deviceId, String measurementId,
-- TSDataType dataType, Map<String, String> props) {
--
-- MemSeriesLazyMerger memSeriesLazyMerger = new MemSeriesLazyMerger();
-- queryFlushLock.lock();
-- try {
-- if (!overflowFlushMemTables.isEmpty() && isFlush()) {
-- for (int i = overflowFlushMemTables.size() - 1; i >= 0; i--) {
-- memSeriesLazyMerger.addMemSeries(
-- overflowFlushMemTables.get(i).query(deviceId, measurementId, dataType, props));
-- }
-- }
-- memSeriesLazyMerger
-- .addMemSeries(workSupport.query(deviceId, measurementId, dataType, props));
-- // memSeriesLazyMerger has handled the props,
-- // so we do not need to handle it again in the following readOnlyMemChunk
-- return new ReadOnlyMemChunk(dataType, memSeriesLazyMerger, Collections.emptyMap());
-- } finally {
-- queryFlushLock.unlock();
-- }
-- }
--
-- /**
-- * Get the insert data which is WORK in unseqTsFile.
-- *
-- * @param deviceId deviceId of the target time-series
-- * @param measurementId measurementId of the target time-series
-- * @param dataType data type of the target time-series
-- * @return the seriesPath of unseqTsFile, List of TimeSeriesChunkMetaData for the special
-- * time-series.
-- */
-- private Pair<String, List<ChunkMetaData>> queryWorkDataInOverflowInsert(String deviceId,
-- String measurementId, TSDataType dataType, QueryContext context) {
-- return new Pair<>(
-- workResource.getInsertFilePath(),
-- workResource.getInsertMetadatas(deviceId, measurementId, dataType, context));
-- }
--
-- /**
-- * Get the all merge data in unseqTsFile and overflowFile.
-- *
-- * @return MergeSeriesDataSource
-- */
-- public MergeSeriesDataSource queryMerge(String deviceId, String measurementId,
-- TSDataType dataType, QueryContext context) {
-- Pair<String, List<ChunkMetaData>> mergeInsert = queryMergeDataInOverflowInsert(deviceId,
-- measurementId,
-- dataType, context);
-- return new MergeSeriesDataSource(new OverflowInsertFile(mergeInsert.left, mergeInsert.right));
-- }
--
-- public OverflowSeriesDataSource queryMerge(String deviceId, String measurementId,
-- TSDataType dataType, boolean isMerge, QueryContext context) {
-- Pair<String, List<ChunkMetaData>> mergeInsert = queryMergeDataInOverflowInsert(deviceId,
-- measurementId,
-- dataType, context);
-- OverflowSeriesDataSource overflowSeriesDataSource = new OverflowSeriesDataSource(
-- new Path(deviceId + "." + measurementId));
-- overflowSeriesDataSource.setReadableMemChunk(null);
-- overflowSeriesDataSource
-- .setOverflowInsertFileList(
-- Arrays.asList(new OverflowInsertFile(mergeInsert.left, mergeInsert.right)));
-- return overflowSeriesDataSource;
-- }
--
-- /**
-- * Get the insert data which is MERGE in unseqTsFile
-- *
-- * @return the seriesPath of unseqTsFile, List of TimeSeriesChunkMetaData for the special
-- * time-series.
-- **/
-- private Pair<String, List<ChunkMetaData>> queryMergeDataInOverflowInsert(String deviceId,
-- String measurementId, TSDataType dataType, QueryContext context) {
-- if (!isMerge) {
-- return new Pair<>(null, null);
-- }
-- return new Pair<>(
-- mergeResource.getInsertFilePath(),
-- mergeResource.getInsertMetadatas(deviceId, measurementId, dataType, context));
-- }
--
--
-- private void switchFlushToWork() {
-- LOGGER.info("Overflow Processor {} try to get flushQueryLock for switchFlushToWork", getProcessorName());
-- queryFlushLock.lock();
-- LOGGER.info("Overflow Processor {} get flushQueryLock for switchFlushToWork", getProcessorName());
-- try {
--// flushSupport.clear();
-- workResource.appendMetadatas();
-- isFlush = false;
-- } finally {
-- queryFlushLock.unlock();
-- }
-- }
--
-- public void switchWorkToMerge() throws IOException {
-- if (mergeResource == null) {
-- mergeResource = workResource;
-- workResource = new OverflowResource(parentPath,
-- String.valueOf(dataPathCount.getAndIncrement()), versionController, processorName);
-- }
-- isMerge = true;
-- LOGGER.info("The overflow processor {} switch from WORK to MERGE", getProcessorName());
-- }
--
-- public void switchMergeToWork() throws IOException {
-- if (mergeResource != null) {
-- FileReaderManager.getInstance().closeFileAndRemoveReader(mergeResource.getInsertFilePath());
-- mergeResource.close();
-- mergeResource.deleteResource();
-- mergeResource = null;
-- }
-- isMerge = false;
-- LOGGER.info("The overflow processor {} switch from MERGE to WORK", getProcessorName());
-- }
--
-- public boolean isMerge() {
-- return isMerge;
-- }
--
-- public boolean isFlush() {
-- return isFlush;
-- }
--
-- private void removeFlushedMemTable(IMemTable memTable, TsFileIOWriter overflowIOWriter) {
-- this.writeLock();
-- //TODO check this implementation in BufferWriteProcessor
-- try {
-- overflowFlushMemTables.remove(memTable);
-- } finally {
-- this.writeUnlock();
-- }
-- }
--
-- private boolean flushTask(String displayMessage, IMemTable currentMemTableToFlush,
-- long flushId, MemTableFlushCallBack removeFlushedMemTable) {
-- boolean result;
-- long flushStartTime = System.currentTimeMillis();
-- try {
-- LOGGER.info("The overflow processor {} starts flushing {}.", getProcessorName(),
-- displayMessage);
-- // asyncFlush data
-- workResource
-- .flush(fileSchema, currentMemTableToFlush, getProcessorName(), flushId, removeFlushedMemTable);
-- filenodeFlushAction.act();
-- // insert-ahead log
-- if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
-- getLogNode().notifyEndFlush();
-- }
-- result = true;
-- } catch (IOException e) {
-- LOGGER.error("Flush overflow processor {} rowgroup to file error in {}. Thread {} exits.",
-- getProcessorName(), displayMessage, Thread.currentThread().getName(), e);
-- result = false;
-- } catch (Exception e) {
-- LOGGER.error("FilenodeFlushAction action failed. Thread {} exits.",
-- Thread.currentThread().getName(), e);
-- result = false;
-- } finally {
-- // switch from asyncFlush to work.
-- switchFlushToWork();
-- }
-- // log asyncFlush time
-- if (LOGGER.isInfoEnabled()) {
-- LOGGER
-- .info("The overflow processor {} ends flushing {}.", getProcessorName(), displayMessage);
-- long flushEndTime = System.currentTimeMillis();
-- LOGGER.info(
-- "The overflow processor {} asyncFlush {}, start time is {}, asyncFlush end time is {}," +
-- " time consumption is {}ms",
-- getProcessorName(), displayMessage,
-- DatetimeUtils.convertMillsecondToZonedDateTime(flushStartTime),
-- DatetimeUtils.convertMillsecondToZonedDateTime(flushEndTime),
-- flushEndTime - flushStartTime);
-- }
-- return result;
-- }
--
-- @Override
-- public synchronized Future<Boolean> flush() throws IOException {
-- // statistic information for asyncFlush
-- if (lastFlushTime > 0 && LOGGER.isInfoEnabled()) {
-- long thisFLushTime = System.currentTimeMillis();
-- ZonedDateTime lastDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(lastFlushTime),
-- IoTDBDescriptor.getInstance().getConfig().getZoneID());
-- ZonedDateTime thisDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(thisFLushTime),
-- IoTDBDescriptor.getInstance().getConfig().getZoneID());
-- LOGGER.info(
-- "The overflow processor {} last asyncFlush time is {}, this asyncFlush time is {},"
-- + " asyncFlush time interval is {}s",
-- getProcessorName(), lastDateTime, thisDateTime,
-- (thisFLushTime - lastFlushTime) / 1000);
-- }
-- lastFlushTime = System.currentTimeMillis();
--// try {
--// flushFuture.get();
--// } catch (InterruptedException | ExecutionException e) {
--// throw new IOException(e);
--// }
-- if (valueCount > 0) {
-- try {
-- // backup newIntervalFile list and emptyIntervalFileNode
-- overflowFlushAction.act();
-- } catch (Exception e) {
-- LOGGER.error("Flush the overflow rowGroup to file faied, when overflowFlushAction act");
-- throw new IOException(e);
-- }
-- if (IoTDBDescriptor.getInstance().getConfig().isEnableWal()) {
-- try {
-- getLogNode().notifyStartFlush();
-- } catch (IOException e) {
-- LOGGER.error("Overflow processor {} encountered an error when notifying log node, {}",
-- getProcessorName(), e);
-- }
-- }
-- BasicMemController.getInstance().releaseUsage(this, memSize.get());
-- memSize.set(0);
-- valueCount = 0;
--
--// long version = versionController.nextVersion();
-- //add mmd
-- overflowFlushMemTables.add(workSupport);
-- IMemTable tmpMemTableToFlush = workSupport;
-- workSupport = MemTablePool.getInstance().getEmptyMemTable(this);
-- flushId++;
-- flushFuture = FlushPoolManager.getInstance().submit(() -> flushTask("asynchronously",
-- tmpMemTableToFlush, flushId, this::removeFlushedMemTable));
--
-- // switch from work to asyncFlush
--// switchWorkToFlush();
--// flushFuture = FlushPoolManager.getInstance().submit(() ->
--// flushTask("asynchronously", walTaskId));
-- } else {
--// flushFuture = new ImmediateFuture(true);
-- LOGGER.info("Nothing data points to be flushed");
-- }
-- return flushFuture;
--
-- }
--
-- @Override
-- public void close() throws OverflowProcessorException {
-- if (isClosed) {
-- return;
-- }
-- LOGGER.info("The overflow processor {} starts setCloseMark operation.", getProcessorName());
-- long closeStartTime = System.currentTimeMillis();
-- // asyncFlush data
-- try {
-- flush().get();
-- } catch (InterruptedException | ExecutionException e) {
-- LOGGER.error("Encounter an interrupt error when waitting for the flushing, "
-- + "the bufferwrite processor is {}.",
-- getProcessorName(), e);
-- Thread.currentThread().interrupt();
-- } catch (IOException e) {
-- throw new OverflowProcessorException(e);
-- }
-- if (LOGGER.isInfoEnabled()) {
-- LOGGER.info("The overflow processor {} ends setCloseMark operation.", getProcessorName());
-- // log setCloseMark time
-- long closeEndTime = System.currentTimeMillis();
-- LOGGER.info(
-- "The setCloseMark operation of overflow processor {} starts at {} and ends at {}."
-- + " It comsumes {}ms.",
-- getProcessorName(), DatetimeUtils.convertMillsecondToZonedDateTime(closeStartTime),
-- DatetimeUtils.convertMillsecondToZonedDateTime(closeEndTime),
-- closeEndTime - closeStartTime);
-- }
-- try {
-- clear();
-- } catch (IOException e) {
-- throw new OverflowProcessorException(e);
-- }
-- isClosed = true;
-- }
--
-- public void clear() throws IOException {
-- if (workResource != null) {
-- workResource.close();
-- workResource = null;
-- }
-- if (mergeResource != null) {
-- mergeResource.close();
-- mergeResource = null;
-- }
-- }
--
-- @Override
-- public boolean canBeClosed() {
-- // TODO: consider merge
-- return !isMerge;
-- }
--
-- @Override
-- public long memoryUsage() {
-- return memSize.get();
-- }
--
-- public String getOverflowRestoreFile() {
-- return workResource.getPositionFilePath();
-- }
--
-- /**
-- * @return The sum of all timeseries's metadata size within this file.
-- */
-- public long getMetaSize() {
-- // TODO : [MemControl] implement this
-- return 0;
-- }
--
-- /**
-- * @return The size of overflow file corresponding to this processor.
-- */
-- public long getFileSize() {
-- return workResource.getInsertFile().length() + memoryUsage();
-- }
--
-- /**
-- * Check whether current overflow file contains too many metadata or size of current overflow file
-- * is too large If true, setCloseMark current file and open a new one.
-- */
-- private boolean checkSize() {
-- IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
-- long metaSize = getMetaSize();
-- long fileSize = getFileSize();
-- LOGGER.info(
-- "The overflow processor {}, the size of metadata reaches {},"
-- + " the size of file reaches {}.",
-- getProcessorName(), MemUtils.bytesCntToStr(metaSize), MemUtils.bytesCntToStr(fileSize));
-- if (metaSize >= config.getOverflowMetaSizeThreshold()
-- || fileSize >= config.getOverflowFileSizeThreshold()) {
-- LOGGER.info(
-- "The overflow processor {}, size({}) of the file {} reaches threshold {},"
-- + " size({}) of metadata reaches threshold {}.",
-- getProcessorName(), MemUtils.bytesCntToStr(fileSize), workResource.getInsertFilePath(),
-- MemUtils.bytesCntToStr(config.getOverflowMetaSizeThreshold()),
-- MemUtils.bytesCntToStr(metaSize),
-- MemUtils.bytesCntToStr(config.getOverflowMetaSizeThreshold()));
-- return true;
-- } else {
-- return false;
-- }
-- }
--
-- public WriteLogNode getLogNode() throws IOException {
-- return workResource.getLogNode();
-- }
--
-- public OverflowResource getWorkResource() {
-- return workResource;
-- }
--
-- @Override
-- public boolean equals(Object o) {
-- return this == o;
-- }
--
-- @Override
-- public int hashCode() {
-- return Objects.hash(super.hashCode());
-- }
--
-- /**
-- * used for test. We can block to wait for finishing flushing.
-- *
-- * @return the future of the asyncFlush() task.
-- */
-- public Future<Boolean> getFlushFuture() {
-- return flushFuture;
-- }
--
-- /**
-- * used for test. We can know when the asyncFlush() is called.
-- *
-- * @return the last asyncFlush() time.
-- */
-- public long getLastFlushTime() {
-- return lastFlushTime;
-- }
--
-- @Override
-- public String toString() {
-- return "OverflowProcessor in " + parentPath;
-- }
--
-- public boolean isClosed() {
-- return isClosed;
-- }
--
--
--// private void switchWorkToFlush() {
--// queryFlushLock.lock();
--// try {
--// Pair<> workSupport;
--// workSupport = new OverflowMemtable();
--// if(isFlushing){
--// // isFlushing = true, indicating an AsyncFlushThread has been running, only add Current overflowInfo
--// // into List.
--//
--//
--// }else {
--// isFlushing = true;
--//// flushFuture = FlushPoolManager.getInstance().submit(() ->
--// flushTask("asynchronously", walTaskId));
--// }
--// } finally {
--// queryFlushLock.unlock();
--// }
--// }
--
--// private List<Pair<OverflowMemtable, OverflowResource>> flushTaskList;
--//
--// private class AsyncFlushThread implements Runnable {
--//
--// @Override
--// public void run() {
--// Pair<OverflowMemtable, OverflowResource> flushInfo;
--// while (true) {
--// queryFlushLock.lock();
--// try {
--// if (flushTaskList.isEmpty()) {
--// // flushTaskList is empty, thus all asyncFlush tasks have done and switch
--// OverflowMemtable temp = flushSupport == null ? new OverflowMemtable() : flushSupport;
--// flushSupport = workSupport;
--// workSupport = temp;
--// isFlushing = true;
--// break;
--// }
--// flushInfo = flushTaskList.remove(0);
--// } finally {
--// queryFlushLock.unlock();
--// }
--// asyncFlush(flushInfo);
--// }
--// }
--// }
--}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/monitor/StatMonitor.java
index 7888b6f,7888b6f..bb5f04d
--- a/iotdb/src/main/java/org/apache/iotdb/db/monitor/StatMonitor.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/monitor/StatMonitor.java
@@@ -31,7 -31,7 +31,7 @@@ import org.apache.iotdb.db.concurrent.I
import org.apache.iotdb.db.concurrent.ThreadName;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
@@@ -40,6 -40,6 +40,7 @@@ import org.apache.iotdb.db.metadata.MMa
import org.apache.iotdb.db.monitor.MonitorConstants.FileNodeManagerStatConstants;
import org.apache.iotdb.db.monitor.MonitorConstants.FileNodeProcessorStatConstants;
import org.apache.iotdb.db.monitor.collector.FileSize;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.service.IService;
import org.apache.iotdb.db.service.ServiceType;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
@@@ -349,7 -349,7 +350,7 @@@ public class StatMonitor implements ISe
if (seconds >= statMonitorDetectFreqSec) {
runningTimeMillis = currentTimeMillis;
// delete time-series data
-- FileNodeManager fManager = FileNodeManager.getInstance();
++ FileNodeManagerV2 fManager = FileNodeManagerV2.getInstance();
try {
for (Map.Entry<String, IStatistic> entry : statisticMap.entrySet()) {
for (String statParamName : entry.getValue().getStatParamsHashMap().keySet()) {
@@@ -374,11 -374,11 +375,11 @@@
}
public void insert(Map<String, TSRecord> tsRecordHashMap) {
-- FileNodeManager fManager = FileNodeManager.getInstance();
++ FileNodeManagerV2 fManager = FileNodeManagerV2.getInstance();
int pointNum;
for (Map.Entry<String, TSRecord> entry : tsRecordHashMap.entrySet()) {
try {
-- fManager.insert(entry.getValue(), true);
++ fManager.insert(new InsertPlan(entry.getValue()));
numInsert.incrementAndGet();
pointNum = entry.getValue().dataPointList.size();
numPointsInsert.addAndGet(pointNum);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/monitor/collector/FileSize.java
index f3e3ffe,f3e3ffe..e9ecba2
--- a/iotdb/src/main/java/org/apache/iotdb/db/monitor/collector/FileSize.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/monitor/collector/FileSize.java
@@@ -30,8 -30,8 +30,9 @@@ import java.util.concurrent.atomic.Atom
import org.apache.commons.io.FileUtils;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
++import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.monitor.IStatistic;
import org.apache.iotdb.db.monitor.MonitorConstants;
import org.apache.iotdb.db.monitor.MonitorConstants.FileSizeConstants;
@@@ -54,7 -54,7 +55,7 @@@ public class FileSize implements IStati
private static final Logger LOGGER = LoggerFactory.getLogger(FileSize.class);
private static final long ABNORMAL_VALUE = -1L;
private static final long INIT_VALUE_IF_FILE_NOT_EXIST = 0L;
-- private FileNodeManager fileNodeManager;
++ private FileNodeManagerV2 fileNodeManager;
@Override
public Map<String, TSRecord> getAllStatisticsValue() {
@@@ -80,7 -80,7 +81,7 @@@
fileNodeManager.addTimeSeries(path, TSDataType.valueOf(MonitorConstants.DATA_TYPE_INT64),
TSEncoding.valueOf("RLE"), CompressionType.valueOf(TSFileConfig.compressor),
Collections.emptyMap());
-- } catch (FileNodeManagerException e) {
++ } catch (FileNodeManagerException | ProcessorException e) {
LOGGER.error("Register File Size Stats into fileNodeManager Failed.", e);
}
}
@@@ -114,7 -114,7 +115,7 @@@
}
private FileSize() {
-- fileNodeManager = FileNodeManager.getInstance();
++ fileNodeManager = FileNodeManagerV2.getInstance();
if (config.isEnableStatMonitor()) {
StatMonitor statMonitor = StatMonitor.getInstance();
registerStatMetadata();
diff --cc iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
index 24f2f1b,6d58365..806b693
--- a/iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/qp/physical/crud/InsertPlan.java
@@@ -26,8 -26,7 +26,9 @@@ import java.util.Objects
import org.apache.iotdb.db.qp.logical.Operator;
import org.apache.iotdb.db.qp.logical.Operator.OperatorType;
import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.read.common.Path;
++import org.apache.iotdb.tsfile.write.record.TSRecord;
public class InsertPlan extends PhysicalPlan {
@@@ -50,6 -48,6 +51,20 @@@
this.values = new String[] {insertValue};
}
++ public InsertPlan(TSRecord tsRecord) {
++ super(false, OperatorType.INSERT);
++ this.deviceId = tsRecord.deviceId;
++ this.time = tsRecord.time;
++ this.measurements = new String[tsRecord.dataPointList.size()];
++ this.dataTypes = new TSDataType[tsRecord.dataPointList.size()];
++ this.values = new String[tsRecord.dataPointList.size()];
++ for (int i = 0; i < tsRecord.dataPointList.size(); i++) {
++ measurements[i] = tsRecord.dataPointList.get(i).getMeasurementId();
++ dataTypes[i] = tsRecord.dataPointList.get(i).getType();
++ values[i] = tsRecord.dataPointList.get(i).getValue().toString();
++ }
++ }
++
public InsertPlan(String deviceId, long insertTime, String[] measurementList,
String[] insertValues) {
super(false, Operator.OperatorType.INSERT);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
index f1a570d,f1a570d..20c9018
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
@@@ -18,22 -18,22 +18,18 @@@
*/
package org.apache.iotdb.db.query.control;
--import java.io.IOException;
import java.util.ArrayList;
--import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
--import org.apache.iotdb.db.engine.querycontext.GlobalSortedSeriesDataSource;
--import org.apache.iotdb.db.engine.querycontext.OverflowSeriesDataSource;
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
++import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.tsfile.read.common.Path;
import org.apache.iotdb.tsfile.read.expression.ExpressionType;
@@@ -66,29 -66,29 +62,29 @@@ public class QueryResourceManager
* <p>
* For example, during a query process Q1, given a query sql <sql>select device_1.sensor_1,
* device_1.sensor_2, device_2.sensor_1, device_2.sensor_2</sql>, we will invoke
-- * <code>FileNodeManager.getInstance().beginQuery(device_1)</code> and
-- * <code>FileNodeManager.getInstance().beginQuery(device_2)</code> both once. Although there
++ * <code>FileNodeManagerV2.getInstance().beginQuery(device_1)</code> and
++ * <code>FileNodeManagerV2.getInstance().beginQuery(device_2)</code> both once. Although there
* exists four paths, but the unique devices are only `device_1` and `device_2`. When invoking
-- * <code>FileNodeManager.getInstance().beginQuery(device_1)</code>, it returns result token `1`.
++ * <code>FileNodeManagerV2.getInstance().beginQuery(device_1)</code>, it returns result token `1`.
* Similarly,
-- * <code>FileNodeManager.getInstance().beginQuery(device_2)</code> returns result token `2`.
++ * <code>FileNodeManagerV2.getInstance().beginQuery(device_2)</code> returns result token `2`.
*
* In the meanwhile, another query process Q2 aroused by other client is triggered, whose sql
-- * statement is same to Q1. Although <code>FileNodeManager.getInstance().beginQuery(device_1)
++ * statement is same to Q1. Although <code>FileNodeManagerV2.getInstance().beginQuery(device_1)
* </code>
* and
-- * <code>FileNodeManager.getInstance().beginQuery(device_2)</code> will be invoked again, it
++ * <code>FileNodeManagerV2.getInstance().beginQuery(device_2)</code> will be invoked again, it
* returns result token `3` and `4` .
*
-- * <code>FileNodeManager.getInstance().endQueryForGivenJob(device_1, 1)</code> and
-- * <code>FileNodeManager.getInstance().endQueryForGivenJob(device_2, 2)</code> must be invoked no matter how
++ * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob(device_1, 1)</code> and
++ * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob(device_2, 2)</code> must be invoked no matter how
* query process Q1 exits normally or abnormally. So is Q2,
-- * <code>FileNodeManager.getInstance().endQueryForGivenJob(device_1, 3)</code> and
-- * <code>FileNodeManager.getInstance().endQueryForGivenJob(device_2, 4)</code> must be invoked
++ * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob(device_1, 3)</code> and
++ * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob(device_2, 4)</code> must be invoked
*
* Last but no least, to ensure the correctness of insert process and query process of IoTDB,
-- * <code>FileNodeManager.getInstance().beginQuery()</code> and
-- * <code>FileNodeManager.getInstance().endQueryForGivenJob()</code> must be executed rightly.
++ * <code>FileNodeManagerV2.getInstance().beginQuery()</code> and
++ * <code>FileNodeManagerV2.getInstance().endQueryForGivenJob()</code> must be executed rightly.
* </p>
*/
private ConcurrentHashMap<Long, ConcurrentHashMap<String, List<Integer>>> queryTokensMap;
@@@ -126,7 -126,7 +122,7 @@@
for (String deviceId : deviceIdSet) {
putQueryTokenForCurrentRequestThread(jobId, deviceId,
-- FileNodeManager.getInstance().beginQuery(deviceId));
++ FileNodeManagerV2.getInstance().beginQuery(deviceId));
}
}
@@@ -140,7 -140,7 +136,7 @@@
getUniquePaths(expression, deviceIdSet);
for (String deviceId : deviceIdSet) {
putQueryTokenForCurrentRequestThread(jobId, deviceId,
-- FileNodeManager.getInstance().beginQuery(deviceId));
++ FileNodeManagerV2.getInstance().beginQuery(deviceId));
}
}
@@@ -157,16 -157,16 +153,16 @@@
deviceIdSet.removeAll(remoteDeviceIdSet);
for (String deviceId : deviceIdSet) {
putQueryTokenForCurrentRequestThread(jobId, deviceId,
-- FileNodeManager.getInstance().beginQuery(deviceId));
++ FileNodeManagerV2.getInstance().beginQuery(deviceId));
}
}
-- public QueryDataSource getQueryDataSource(Path selectedPath,
++ public QueryDataSourceV2 getQueryDataSource(Path selectedPath,
QueryContext context)
-- throws FileNodeManagerException {
++ throws FileNodeManagerException, ProcessorException {
SingleSeriesExpression singleSeriesExpression = new SingleSeriesExpression(selectedPath, null);
-- QueryDataSource queryDataSource = FileNodeManager.getInstance()
++ QueryDataSourceV2 queryDataSource = FileNodeManagerV2.getInstance()
.query(singleSeriesExpression, context);
// add used files to current thread request cached map
@@@ -177,7 -177,7 +173,7 @@@
public QueryDataSourceV2 getQueryDataSourceV2(Path selectedPath,
QueryContext context)
-- throws FileNodeManagerException {
++ throws FileNodeManagerException, ProcessorException {
SingleSeriesExpression singleSeriesExpression = new SingleSeriesExpression(selectedPath, null);
QueryDataSourceV2 queryDataSource = FileNodeManagerV2.getInstance().query(singleSeriesExpression, context);
@@@ -199,7 -199,7 +195,7 @@@
}
for (Map.Entry<String, List<Integer>> entry : queryTokensMap.get(jobId).entrySet()) {
for (int token : entry.getValue()) {
-- FileNodeManager.getInstance().endQuery(entry.getKey(), token);
++ FileNodeManagerV2.getInstance().endQuery(entry.getKey(), token);
}
}
queryTokensMap.remove(jobId);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
index 03c600d,03c600d..9c61dcb
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/executor/EngineQueryRouter.java
@@@ -69,13 -69,13 +69,17 @@@ public class EngineQueryRouter implemen
return engineExecutor.execute(context);
}
-- } catch (QueryFilterOptimizationException e) {
++ } catch (QueryFilterOptimizationException | IOException e) {
throw new FileNodeManagerException(e);
}
} else {
EngineExecutorWithoutTimeGenerator engineExecutor = new EngineExecutorWithoutTimeGenerator(
queryExpression);
-- return engineExecutor.execute(context);
++ try {
++ return engineExecutor.execute(context);
++ } catch (IOException e) {
++ throw new FileNodeManagerException(e);
++ }
}
}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactory.java
index ffd335b,ffd335b..2fa34f4
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactory.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactory.java
@@@ -29,6 -29,6 +29,7 @@@ import org.apache.iotdb.db.engine.query
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
++import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.query.control.FileReaderManager;
import org.apache.iotdb.db.query.control.QueryResourceManager;
@@@ -189,40 -189,40 +190,41 @@@ public class SeriesReaderFactory
return new SealedTsFilesReader(seriesInTsFileReader, context);
}
-- /**
-- * construct ByTimestampReader, include sequential data and unsequential data.
-- *
-- * @param paths selected series path
-- * @param context query context
-- * @return the list of EngineReaderByTimeStamp
-- */
-- public static List<EngineReaderByTimeStamp> getByTimestampReadersOfSelectedPaths(
-- List<Path> paths, QueryContext context) throws IOException, FileNodeManagerException {
--
-- List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
--
-- for (Path path : paths) {
--
-- QueryDataSource queryDataSource = QueryResourceManager.getInstance().getQueryDataSource(path,
-- context);
--
-- PriorityMergeReaderByTimestamp mergeReaderByTimestamp = new PriorityMergeReaderByTimestamp();
--
-- // reader for sequence data
-- SequenceDataReaderByTimestamp tsFilesReader = new SequenceDataReaderByTimestamp(
-- queryDataSource.getSeqDataSource(), context);
-- mergeReaderByTimestamp.addReaderWithPriority(tsFilesReader, 1);
--
-- // reader for unSequence data
-- PriorityMergeReaderByTimestamp unSeqMergeReader = SeriesReaderFactory.getInstance()
-- .createUnSeqMergeReaderByTimestamp(queryDataSource.getOverflowSeriesDataSource());
-- mergeReaderByTimestamp.addReaderWithPriority(unSeqMergeReader, 2);
--
-- readersOfSelectedSeries.add(mergeReaderByTimestamp);
-- }
--
-- return readersOfSelectedSeries;
-- }
++// /**
++// * construct ByTimestampReader, include sequential data and unsequential data.
++// *
++// * @param paths selected series path
++// * @param context query context
++// * @return the list of EngineReaderByTimeStamp
++// */
++// public static List<EngineReaderByTimeStamp> getByTimestampReadersOfSelectedPaths(
++// List<Path> paths, QueryContext context)
++// throws IOException, FileNodeManagerException, ProcessorException {
++//
++// List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
++//
++// for (Path path : paths) {
++//
++// QueryDataSource queryDataSource = QueryResourceManager.getInstance().getQueryDataSource(path,
++// context);
++//
++// PriorityMergeReaderByTimestamp mergeReaderByTimestamp = new PriorityMergeReaderByTimestamp();
++//
++// // reader for sequence data
++// SequenceDataReaderByTimestamp tsFilesReader = new SequenceDataReaderByTimestamp(
++// queryDataSource.getSeqDataSource(), context);
++// mergeReaderByTimestamp.addReaderWithPriority(tsFilesReader, 1);
++//
++// // reader for unSequence data
++// PriorityMergeReaderByTimestamp unSeqMergeReader = SeriesReaderFactory.getInstance()
++// .createUnSeqMergeReaderByTimestamp(queryDataSource.getOverflowSeriesDataSource());
++// mergeReaderByTimestamp.addReaderWithPriority(unSeqMergeReader, 2);
++//
++// readersOfSelectedSeries.add(mergeReaderByTimestamp);
++// }
++//
++// return readersOfSelectedSeries;
++// }
/**
* construct ByTimestampReader, include sequential data and unsequential data.
@@@ -232,7 -232,7 +234,8 @@@
* @return the list of EngineReaderByTimeStamp
*/
public static List<EngineReaderByTimeStamp> getByTimestampReadersOfSelectedPathsV2(
-- List<Path> paths, QueryContext context) throws IOException, FileNodeManagerException {
++ List<Path> paths, QueryContext context)
++ throws IOException, FileNodeManagerException, ProcessorException {
List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
diff --cc iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactoryImpl.java
index 455254c,79d63f8..40969f7
--- a/iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactoryImpl.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/query/factory/SeriesReaderFactoryImpl.java
@@@ -1,30 -19,33 +19,34 @@@
package org.apache.iotdb.db.query.factory;
import java.io.IOException;
+ import java.util.ArrayList;
import java.util.List;
import org.apache.iotdb.db.engine.filenodeV2.TsFileResourceV2;
- import org.apache.iotdb.db.engine.querycontext.GlobalSortedSeriesDataSourceV2;
+ import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
+ import org.apache.iotdb.db.exception.FileNodeManagerException;
++import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.query.context.QueryContext;
- import org.apache.iotdb.db.query.reader.IAggregateReader;
+ import org.apache.iotdb.db.query.control.QueryResourceManager;
+ import org.apache.iotdb.db.query.reader.AllDataReader;
import org.apache.iotdb.db.query.reader.IPointReader;
import org.apache.iotdb.db.query.reader.merge.EngineReaderByTimeStamp;
+ import org.apache.iotdb.db.query.reader.merge.PriorityMergeReaderByTimestamp;
+ import org.apache.iotdb.db.query.reader.sequence.SequenceDataReaderByTimestampV2;
+ import org.apache.iotdb.db.query.reader.sequence.SequenceDataReaderV2;
import org.apache.iotdb.tsfile.read.common.Path;
- import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression;
import org.apache.iotdb.tsfile.read.filter.basic.Filter;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
- public class SeriesReaderFactoryImpl implements ISeriesReaderFactory{
+ public class SeriesReaderFactoryImpl implements ISeriesReaderFactory {
- @Override
- public IPointReader createUnSeqReader(GlobalSortedSeriesDataSourceV2 overflowSeriesDataSource,
- Filter filter) throws IOException {
+ private static final Logger logger = LoggerFactory.getLogger(SeriesReaderFactory.class);
- return null;
+ private SeriesReaderFactoryImpl() {
}
- @Override
- public IAggregateReader createSeqReader(GlobalSortedSeriesDataSourceV2 overflowSeriesDataSource,
- Filter filter) throws IOException {
- return null;
+ public static SeriesReaderFactoryImpl getInstance() {
+ return SeriesReaderFactoryHelper.INSTANCE;
}
@Override
@@@ -36,12 -56,70 +57,80 @@@
@Override
public List<EngineReaderByTimeStamp> createByTimestampReadersOfSelectedPaths(List<Path> paths,
- QueryContext context) {
+ QueryContext context) throws FileNodeManagerException {
+ List<EngineReaderByTimeStamp> readersOfSelectedSeries = new ArrayList<>();
+
+ for (Path path : paths) {
+
- QueryDataSourceV2 queryDataSource = QueryResourceManager.getInstance()
- .getQueryDataSourceV2(path,
- context);
++ QueryDataSourceV2 queryDataSource = null;
++ try {
++ queryDataSource = QueryResourceManager.getInstance()
++ .getQueryDataSourceV2(path,
++ context);
++ } catch (ProcessorException e) {
++ throw new FileNodeManagerException(e);
++ }
+
+ PriorityMergeReaderByTimestamp mergeReaderByTimestamp = new PriorityMergeReaderByTimestamp();
+
+ // reader for sequence data
+ SequenceDataReaderByTimestampV2 tsFilesReader = new SequenceDataReaderByTimestampV2(path,
+ queryDataSource.getSeqResources(), context);
+ mergeReaderByTimestamp.addReaderWithPriority(tsFilesReader, 1);
+
+ // reader for unSequence data
+ //TODO add create unseq reader
+ PriorityMergeReaderByTimestamp unSeqMergeReader = createUnSeqByTimestampReader(
+ queryDataSource.getUnseqResources());
+ mergeReaderByTimestamp.addReaderWithPriority(unSeqMergeReader, 2);
+
+ readersOfSelectedSeries.add(mergeReaderByTimestamp);
+ }
+
+ return readersOfSelectedSeries;
+ }
+
+ private PriorityMergeReaderByTimestamp createUnSeqByTimestampReader(
+ List<TsFileResourceV2> unseqResources) {
return null;
}
@Override
- public List<IPointReader> createReadersOfSelectedPaths(List<Path> paths, QueryContext context) {
- return null;
+ public IPointReader createAllDataReader(Path path, Filter timeFilter, QueryContext context)
+ throws FileNodeManagerException, IOException {
- QueryDataSourceV2 queryDataSource = QueryResourceManager.getInstance()
- .getQueryDataSourceV2(path, context);
++ QueryDataSourceV2 queryDataSource = null;
++ try {
++ queryDataSource = QueryResourceManager.getInstance()
++ .getQueryDataSourceV2(path, context);
++ } catch (ProcessorException e) {
++ throw new FileNodeManagerException(e);
++ }
+
+ // sequence reader for one sealed tsfile
+ SequenceDataReaderV2 tsFilesReader;
+
+ tsFilesReader = new SequenceDataReaderV2(queryDataSource.getSeriesPath(),
+ queryDataSource.getSeqResources(),
+ timeFilter, context);
+
+ // unseq reader for all chunk groups in unSeqFile
+ IPointReader unSeqMergeReader = null;
+ unSeqMergeReader = createUnSeqReader(path, queryDataSource.getUnseqResources(), timeFilter);
+
+ if (!tsFilesReader.hasNext()) {
+ //only have unsequence data.
+ return unSeqMergeReader;
+ } else {
+ //merge sequence data with unsequence data.
+ return new AllDataReader(tsFilesReader, unSeqMergeReader);
+ }
+ }
+
+ private static class SeriesReaderFactoryHelper {
+
+ private static final SeriesReaderFactoryImpl INSTANCE = new SeriesReaderFactoryImpl();
+
+ private SeriesReaderFactoryHelper() {
+ }
}
}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/service/CloseMergeService.java
index 5db4a78,5db4a78..70c69a8
--- a/iotdb/src/main/java/org/apache/iotdb/db/service/CloseMergeService.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/service/CloseMergeService.java
@@@ -26,7 -26,7 +26,7 @@@ import org.apache.iotdb.db.concurrent.I
import org.apache.iotdb.db.concurrent.ThreadName;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.exception.StartupException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@@ -176,7 -176,7 +176,7 @@@ public class CloseMergeService implemen
+ "time interval is {}s.", startDateTime, endDateTime, timeInterval / 1000);
mergeAllLastTime = System.currentTimeMillis();
try {
-- FileNodeManager.getInstance().mergeAll();
++ FileNodeManagerV2.getInstance().mergeAll();
} catch (Exception e) {
LOGGER.error("Merge all error.", e);
}
@@@ -202,7 -202,7 +202,7 @@@
+ "time interval is {}s.", startDateTime, endDateTime, timeInterval / 1000);
closeAllLastTime = System.currentTimeMillis();
try {
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
} catch (Exception e) {
LOGGER.error("setCloseMark all error.", e);
}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/service/IoTDB.java
index 2e8f626,2e8f626..3e70542
--- a/iotdb/src/main/java/org/apache/iotdb/db/service/IoTDB.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/service/IoTDB.java
@@@ -18,24 -18,24 +18,17 @@@
*/
package org.apache.iotdb.db.service;
--import java.io.IOException;
--import java.util.List;
import org.apache.iotdb.db.concurrent.IoTDBDefaultThreadExceptionHandler;
--import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
import org.apache.iotdb.db.exception.FileNodeManagerException;
--import org.apache.iotdb.db.exception.PathErrorException;
--import org.apache.iotdb.db.exception.RecoverException;
import org.apache.iotdb.db.exception.StartupException;
import org.apache.iotdb.db.exception.builder.ExceptionBuilder;
--import org.apache.iotdb.db.metadata.MManager;
import org.apache.iotdb.db.monitor.StatMonitor;
import org.apache.iotdb.db.sync.receiver.SyncServiceManager;
import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
--import org.apache.iotdb.db.writelog.manager.WriteLogNodeManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@@ -84,7 -84,7 +77,7 @@@ public class IoTDB implements IoTDBMBea
boolean enableWAL = IoTDBDescriptor.getInstance().getConfig().isEnableWal();
IoTDBDescriptor.getInstance().getConfig().setEnableWal(false);
-- FileNodeManager.getInstance().recovery();
++ FileNodeManagerV2.getInstance().recovery();
IoTDBDescriptor.getInstance().getConfig().setEnableWal(enableWAL);
// When registering statMonitor, we should start recovering some statistics
@@@ -94,7 -94,7 +87,7 @@@
StatMonitor.getInstance().recovery();
}
-- registerManager.register(FileNodeManager.getInstance());
++ registerManager.register(FileNodeManagerV2.getInstance());
registerManager.register(MultiFileLogNodeManager.getInstance());
registerManager.register(JMXService.getInstance());
registerManager.register(JDBCService.getInstance());
diff --cc iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
index 77746e1,77746e1..d0f13de
--- a/iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
@@@ -39,7 -39,7 +39,7 @@@ import org.apache.iotdb.db.auth.authori
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.PathErrorException;
@@@ -416,7 -416,7 +416,7 @@@ public class TSServiceImpl implements T
switch (statement) {
case "flushMetadata":
try {
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
} catch (FileNodeManagerException e) {
LOGGER.error("meet error while FileNodeManager closing all!", e);
throw new IOException(e);
@@@ -424,7 -424,7 +424,8 @@@
return true;
case "merge":
try {
-- FileNodeManager.getInstance().mergeAll();
++ // TODO change to merge!!!
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
} catch (FileNodeManagerException e) {
LOGGER.error("meet error while FileNodeManager merging all!", e);
throw new IOException(e);
diff --cc iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
index 2eb4c46,5d06a13..c98b7d6
--- a/iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/sync/receiver/SyncServiceImpl.java
@@@ -42,10 -42,10 +42,10 @@@ import org.apache.commons.lang3.StringU
import org.apache.iotdb.db.concurrent.ThreadName;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
-import org.apache.iotdb.db.engine.filenode.FileNodeManager;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
- import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.engine.filenode.OverflowChangeType;
import org.apache.iotdb.db.engine.filenode.TsFileResource;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
@@@ -81,7 -81,7 +81,7 @@@ public class SyncServiceImpl implement
private static final Logger logger = LoggerFactory.getLogger(SyncServiceImpl.class);
-- private static final FileNodeManager fileNodeManager = FileNodeManager.getInstance();
++ private static final FileNodeManagerV2 fileNodeManager = FileNodeManagerV2.getInstance();
/**
* Metadata manager
**/
@@@ -556,9 -556,9 +556,8 @@@
}
}
}
-- if (insertExecutor
- .insert(deviceId, record.getTimestamp(), measurementList.toArray(new String[]{}),
- .multiInsert(deviceId, record.getTimestamp(), measurementList.toArray(new String[]{}),
-- insertValues.toArray(new String[]{})) <= 0) {
++ if (insertExecutor.insert(new InsertPlan(deviceId, record.getTimestamp(),
++ measurementList.toArray(new String[0]), insertValues.toArray(new String[0]))) <= 0) {
throw new IOException("Inserting series data to IoTDB engine has failed.");
}
}
@@@ -634,8 -634,8 +633,7 @@@
/** If there has no overlap data with the timeseries, inserting all data in the sync file **/
if (originDataPoints.isEmpty()) {
for (InsertPlan insertPlan : newDataPoints) {
- if (insertExecutor.insert(insertPlan.getDeviceId(), insertPlan.getTime(),
- if (insertExecutor.multiInsert(insertPlan.getDeviceId(), insertPlan.getTime(),
-- insertPlan.getMeasurements(), insertPlan.getValues()) <= 0) {
++ if (insertExecutor.insert(insertPlan) <= 0) {
throw new IOException("Inserting series data to IoTDB engine has failed.");
}
}
@@@ -643,8 -643,8 +641,7 @@@
/** Compare every data to get valid data **/
for (InsertPlan insertPlan : newDataPoints) {
if (!originDataPoints.contains(insertPlan)) {
- if (insertExecutor.insert(insertPlan.getDeviceId(), insertPlan.getTime(),
- if (insertExecutor.multiInsert(insertPlan.getDeviceId(), insertPlan.getTime(),
-- insertPlan.getMeasurements(), insertPlan.getValues()) <= 0) {
++ if (insertExecutor.insert(insertPlan) <= 0) {
throw new IOException("Inserting series data to IoTDB engine has failed.");
}
}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/utils/LoadDataUtils.java
index da08382,da08382..7f1e660
--- a/iotdb/src/main/java/org/apache/iotdb/db/utils/LoadDataUtils.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/utils/LoadDataUtils.java
@@@ -31,11 -31,11 +31,12 @@@ import java.util.List
import java.util.Set;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.metadata.MManager;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
import org.apache.iotdb.tsfile.write.record.TSRecord;
import org.apache.iotdb.tsfile.write.schema.FileSchema;
@@@ -57,7 -57,7 +58,7 @@@ public class LoadDataUtils
private int writeInstanceThreshold;
private boolean hasExtra = false;
private long totalPointCount = 0;
-- private FileNodeManager fileNodeManager;
++ private FileNodeManagerV2 fileNodeManager;
private IoTDBConfig conf = IoTDBDescriptor.getInstance().getConfig();
/**
@@@ -65,7 -65,7 +66,7 @@@
*/
public LoadDataUtils() {
writeInstanceMap = new HashSet<>();
-- fileNodeManager = FileNodeManager.getInstance();
++ fileNodeManager = FileNodeManagerV2.getInstance();
writeInstanceThreshold = conf.getWriteInstanceThreshold();
}
@@@ -151,7 -151,7 +152,7 @@@
}
// appeared before, insert directly
try {
-- fileNodeManager.insert(record, false);
++ fileNodeManager.insert(new InsertPlan(record));
} catch (FileNodeManagerException e) {
logger.error("failed when insert into fileNodeManager, record:{}", line, e);
}
diff --cc iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
index e074a31,960b78d..ff40053
--- a/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/LogReplayer.java
@@@ -60,8 -62,8 +63,8 @@@ public class LogReplayer
public LogReplayer(String logNodePrefix, String insertFilePath,
ModificationFile modFile,
VersionController versionController,
- TsFileResource currentTsFileResource,
+ TsFileResourceV2 currentTsFileResource,
- FileSchema fileSchema, IMemTable memTable) {
+ FileSchema fileSchema, IMemTable memTable, boolean acceptDuplication) {
this.logNodePrefix = logNodePrefix;
this.insertFilePath = insertFilePath;
this.modFile = modFile;
@@@ -111,9 -114,11 +115,10 @@@
}
private void replayInsert(InsertPlan insertPlan) {
- TSRecord tsRecord = new TSRecord(insertPlan.getTime(), insertPlan.getDeviceId());
if (currentTsFileResource != null) {
- // the last chunk group may contain the same data with the logs, ignore such logs
- if (currentTsFileResource.getEndTimeMap().get(insertPlan.getDeviceId()) >= insertPlan.getTime()) {
+ // the last chunk group may contain the same data with the logs, ignore such logs in seq file
- if (currentTsFileResource.getEndTime(insertPlan.getDeviceId()) >= insertPlan.getTime() &&
- !acceptDuplication) {
++ if (currentTsFileResource.getEndTimeMap().get(insertPlan.getDeviceId()) >= insertPlan.getTime() &&
++ !acceptDuplication) {
return;
}
currentTsFileResource.updateTime(insertPlan.getDeviceId(), insertPlan.getTime());
diff --cc iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/UnSeqTsFileRecoverPerformer.java
index ab0736e,756f0a7..116fadf
--- a/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/UnSeqTsFileRecoverPerformer.java
+++ b/iotdb/src/main/java/org/apache/iotdb/db/writelog/recover/UnSeqTsFileRecoverPerformer.java
@@@ -47,12 -46,12 +47,12 @@@ public class UnSeqTsFileRecoverPerforme
private FileSchema fileSchema;
private VersionController versionController;
private LogReplayer logReplayer;
- private TsFileResource tsFileResource;
+ private TsFileResourceV2 tsFileResource;
- public SeqTsFileRecoverPerformer(String logNodePrefix,
+ public UnSeqTsFileRecoverPerformer(String logNodePrefix,
FileSchema fileSchema, VersionController versionController,
- TsFileResource currentTsFileResource) {
- this.insertFilePath = currentTsFileResource.getFilePath();
+ TsFileResourceV2 currentTsFileResource) {
+ this.insertFilePath = currentTsFileResource.getFile().getPath();
this.logNodePrefix = logNodePrefix;
this.fileSchema = fileSchema;
this.versionController = versionController;
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorStoreTest.java
index 891695d,891695d..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorStoreTest.java
+++ /dev/null
@@@ -1,91 -1,91 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.filenode;
--
--import static org.junit.Assert.assertEquals;
--
--import java.io.ByteArrayInputStream;
--import java.io.ByteArrayOutputStream;
--import java.util.ArrayList;
--import java.util.HashMap;
--import java.util.List;
--import java.util.Map;
--import org.junit.After;
--import org.junit.Before;
--import org.junit.Test;
--
--public class FileNodeProcessorStoreTest {
--
-- private boolean isOverflowed;
-- private Map<String, Long> lastUpdateTimeMap;
-- private TsFileResource emptyTsFileResource;
-- private List<TsFileResource> newFileNodes;
-- private int numOfMergeFile;
-- private FileNodeProcessorStatus fileNodeProcessorStatus;
--
-- private FileNodeProcessorStore fileNodeProcessorStore;
--
-- @Before
-- public void setUp() throws Exception {
-- isOverflowed = true;
-- lastUpdateTimeMap = new HashMap<>();
-- for (int i = 0; i < 10; i++) {
-- lastUpdateTimeMap.put("d" + i, (long) i);
-- }
-- emptyTsFileResource = TsFileResourceTest.constructTsfileResource();
-- newFileNodes = new ArrayList<>();
-- for (int i = 0; i < 5; i++) {
-- newFileNodes.add(TsFileResourceTest.constructTsfileResource());
-- }
-- numOfMergeFile = 5;
-- fileNodeProcessorStatus = FileNodeProcessorStatus.MERGING_WRITE;
-- fileNodeProcessorStore = new FileNodeProcessorStore(isOverflowed, lastUpdateTimeMap,
-- emptyTsFileResource, newFileNodes, fileNodeProcessorStatus, numOfMergeFile);
-- }
--
-- @After
-- public void tearDown() throws Exception {
--
-- }
--
-- @Test
-- public void testSerDeialize() throws Exception {
-- ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
-- fileNodeProcessorStore.serialize(outputStream);
-- ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
-- FileNodeProcessorStore deFileNodeProcessorStore = FileNodeProcessorStore
-- .deSerialize(inputStream);
--
-- assertEquals(fileNodeProcessorStore.getLastUpdateTimeMap(),
-- deFileNodeProcessorStore.getLastUpdateTimeMap());
-- assertEquals(fileNodeProcessorStore.getNumOfMergeFile(),
-- deFileNodeProcessorStore.getNumOfMergeFile());
-- assertEquals(fileNodeProcessorStore.getFileNodeProcessorStatus(),
-- deFileNodeProcessorStore.getFileNodeProcessorStatus());
-- TsFileResourceTest.assertTsfileRecource(fileNodeProcessorStore.getEmptyTsFileResource(),
-- deFileNodeProcessorStore.getEmptyTsFileResource());
-- assertEquals(fileNodeProcessorStore.getNewFileNodes().size(),
-- deFileNodeProcessorStore.getNewFileNodes().size());
-- for (int i = 0; i < fileNodeProcessorStore.getNewFileNodes().size(); i++) {
-- TsFileResourceTest.assertTsfileRecource(fileNodeProcessorStore.getNewFileNodes().get(i),
-- deFileNodeProcessorStore.getNewFileNodes().get(i));
-- }
-- }
--
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorTest.java
index e135d42,e135d42..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessorTest.java
+++ /dev/null
@@@ -1,134 -1,134 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--
--package org.apache.iotdb.db.engine.filenode;
--
--import static org.junit.Assert.assertEquals;
--import static org.junit.Assert.assertTrue;
--
--import java.io.IOException;
--import java.util.concurrent.ExecutionException;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.MetadataManagerHelper;
--import org.apache.iotdb.db.engine.bufferwrite.BufferWriteProcessor;
--import org.apache.iotdb.db.exception.ArgsErrorException;
--import org.apache.iotdb.db.exception.BufferWriteProcessorException;
--import org.apache.iotdb.db.exception.FileNodeManagerException;
--import org.apache.iotdb.db.exception.FileNodeProcessorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.exception.StartupException;
--import org.apache.iotdb.db.exception.qp.QueryProcessorException;
--import org.apache.iotdb.db.qp.QueryProcessor;
--import org.apache.iotdb.db.qp.executor.OverflowQPExecutor;
--import org.apache.iotdb.db.qp.executor.QueryProcessExecutor;
--import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
--import org.apache.iotdb.db.utils.EnvironmentUtils;
--import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.read.common.Path;
--import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--import org.junit.After;
--import org.junit.Assert;
--import org.junit.Before;
--import org.junit.Test;
--
--public class FileNodeProcessorTest {
--
-- FileNodeManager fileNodeManager;
-- FileNodeProcessor processor;
-- private QueryProcessExecutor queryExecutor;
-- private QueryProcessor queryProcessor;
-- private String deviceId = "root.vehicle.d0";
-- private String measurementId = "s0";
-- private TSDataType dataType = TSDataType.INT32;
-- private String processName = "root.vehicle";
--
-- @Before
-- public void setUp() throws FileNodeProcessorException, StartupException, IOException {
-- // init metadata
-- EnvironmentUtils.envSetUp();
-- MetadataManagerHelper.initMetadata();
-- fileNodeManager = FileNodeManager.getInstance();
-- processor = new FileNodeProcessor(IoTDBDescriptor.getInstance().getConfig().getFileNodeDir(), processName);
-- queryExecutor = new OverflowQPExecutor();
-- queryProcessor = new QueryProcessor(queryExecutor);
-- }
--
-- @After
-- public void tearDown() throws IOException, FileNodeManagerException {
-- EnvironmentUtils.cleanEnv();
-- }
--
-- @Test
-- public void testAsyncClose()
-- throws FileNodeProcessorException, BufferWriteProcessorException, ExecutionException, InterruptedException {
--
-- BufferWriteProcessor bwProcessor;
-- int i =1;
-- for (int j = 1; j < 5; j++) {
-- bwProcessor = processor.getBufferWriteProcessor(processName, System.currentTimeMillis());
-- for (; i <= 100 * j; i++) {
-- bwProcessor.write(deviceId, measurementId, i, dataType, String.valueOf(i));
-- }
-- processor.closeBufferWrite();
-- }
-- Assert.assertNotEquals(0, processor.getClosingBufferWriteProcessor().size());
-- processor.waitforAllClosed();
-- Assert.assertEquals(0, processor.getClosingBufferWriteProcessor().size());
--
-- }
--
-- @Test
-- public void testBufferWriteQuery()
-- throws ProcessorException, ArgsErrorException, QueryProcessorException, FileNodeManagerException, QueryFilterOptimizationException, IOException {
--
-- int i =1;
-- for (int j = 1; j <= 5; j++) {
-- for (; i <= 100 * j; i++) {
-- TSRecord tsRecord = new TSRecord(i, deviceId).addTuple(DataPoint.getDataPoint(dataType, measurementId, String.valueOf(i)));
-- fileNodeManager.insert(tsRecord, false);
-- }
-- fileNodeManager.closeAll();
-- }
-- QueryPlan queryPlan = (QueryPlan) queryProcessor
-- .parseSQLToPhysicalPlan("select " + new Path(deviceId.replace("root.", ""), measurementId).getFullPath() + " from root");
--
-- int count = 0;
-- QueryDataSet dataSet = queryExecutor.processQuery(queryPlan, EnvironmentUtils.TEST_QUERY_CONTEXT);
-- assertTrue(dataSet.hasNext());
-- while (dataSet.hasNext()) {
-- count++;
-- assertEquals(count, dataSet.next().getFields().get(0).getIntV());
-- }
-- assertEquals(500, count);
--
-- processor.waitforAllClosed();
--
-- count = 0;
-- dataSet = queryExecutor.processQuery(queryPlan, EnvironmentUtils.TEST_QUERY_CONTEXT);
-- assertTrue(dataSet.hasNext());
-- while (dataSet.hasNext()) {
-- count++;
-- assertEquals(count, dataSet.next().getFields().get(0).getIntV());
-- }
-- assertEquals(500, count);
-- }
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/TsFileResourceTest.java
index a1c9d24,a1c9d24..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenode/TsFileResourceTest.java
+++ /dev/null
@@@ -1,98 -1,98 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.filenode;
--
--import static org.junit.Assert.assertEquals;
--
--import java.io.ByteArrayInputStream;
--import java.io.ByteArrayOutputStream;
--import java.io.File;
--import java.io.IOException;
--import java.util.Collections;
--import java.util.HashMap;
--import java.util.Map;
--import org.junit.After;
--import org.junit.Before;
--import org.junit.Test;
--
--public class TsFileResourceTest {
--
--
-- private TsFileResource tsFileResource;
--
-- public static TsFileResource constructTsfileResource() {
-- TsFileResource tsFileResource;
-- String relativePath = "data/data/settled/b/relativePath";
-- Map<String, Long> startTimes = new HashMap<>();
-- Map<String, Long> endTimes = new HashMap<>();
--
-- tsFileResource = new TsFileResource(Collections.emptyMap(), Collections.emptyMap(),
-- OverflowChangeType.MERGING_CHANGE, new File(relativePath));
-- for (int i = 0; i < 10; i++) {
-- startTimes.put("d" + i, (long) i);
-- }
-- for (int i = 0; i < 10; i++) {
-- endTimes.put("d" + i, (long) (i + 10));
-- }
-- tsFileResource.setStartTimeMap(startTimes);
-- tsFileResource.setEndTimeMap(endTimes);
-- for (int i = 0; i < 5; i++) {
-- tsFileResource.addMergeChanged("d" + i);
-- }
-- return tsFileResource;
-- }
--
-- @Before
-- public void setUp() throws Exception {
-- this.tsFileResource = constructTsfileResource();
-- }
--
-- @After
-- public void tearDown() throws Exception {
--
-- }
--
-- @Test
-- public void testSerDeialize() throws Exception {
-- ByteArrayOutputStream outputStream = new ByteArrayOutputStream(0);
-- tsFileResource.serialize(outputStream);
-- ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
-- TsFileResource deTsfileResource = TsFileResource.deSerialize(inputStream);
-- assertTsfileRecource(tsFileResource, deTsfileResource);
-- }
-- @Test
-- public void testSerdeializeCornerCase() throws IOException {
-- ByteArrayOutputStream outputStream = new ByteArrayOutputStream(0);
-- tsFileResource.setFile(null);
-- tsFileResource.serialize(outputStream);
-- ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
-- TsFileResource deTsfileResource = TsFileResource.deSerialize(inputStream);
-- assertTsfileRecource(tsFileResource,deTsfileResource);
-- }
--
-- public static void assertTsfileRecource(TsFileResource tsFileResource,
-- TsFileResource deTsfileResource) {
-- assertEquals(tsFileResource.getBaseDirIndex(), deTsfileResource.getBaseDirIndex());
-- assertEquals(tsFileResource.getFile(), deTsfileResource.getFile());
-- assertEquals(tsFileResource.getOverflowChangeType(), deTsfileResource.getOverflowChangeType());
-- assertEquals(tsFileResource.getStartTimeMap(), deTsfileResource.getStartTimeMap());
-- assertEquals(tsFileResource.getEndTimeMap(), deTsfileResource.getEndTimeMap());
-- assertEquals(tsFileResource.getMergeChanged(), deTsfileResource.getMergeChanged());
-- }
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerBenchmark.java
index fdd0fe7,fdd0fe7..20e72c3
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerBenchmark.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeManagerBenchmark.java
@@@ -21,11 -21,11 +21,12 @@@ package org.apache.iotdb.db.engine.file
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.metadata.MManager;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.sync.test.RandomNum;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@@ -113,7 -113,7 +114,7 @@@ public class FileNodeManagerBenchmark
long time = RandomNum.getRandomLong(1, seed);
String deltaObject = devices[(int) (time % numOfDevice)];
TSRecord tsRecord = getRecord(deltaObject, time);
-- FileNodeManager.getInstance().insert(tsRecord, true);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(tsRecord));
}
} catch (FileNodeManagerException e) {
e.printStackTrace();
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2Test.java
index 31796c6,31796c6..8c7b761
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2Test.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/FileNodeProcessorV2Test.java
@@@ -20,6 -20,6 +20,7 @@@ package org.apache.iotdb.db.engine.file
import org.apache.iotdb.db.engine.MetadataManagerHelper;
import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.write.record.TSRecord;
@@@ -56,7 -56,7 +57,7 @@@ public class FileNodeProcessorV2Test
for (int j = 1; j <= 100; j++) {
TSRecord record = new TSRecord(j, deviceId);
record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(j)));
-- processor.insert(record);
++ processor.insert(new InsertPlan(record));
processor.asyncForceClose();
}
@@@ -75,7 -75,7 +76,7 @@@
for (int j = 21; j <= 30; j++) {
TSRecord record = new TSRecord(j, deviceId);
record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(j)));
-- processor.insert(record);
++ processor.insert(new InsertPlan(record));
processor.asyncForceClose();
}
processor.syncCloseFileNode();
@@@ -83,7 -83,7 +84,7 @@@
for (int j = 10; j >= 1; j--) {
TSRecord record = new TSRecord(j, deviceId);
record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId, String.valueOf(j)));
-- processor.insert(record);
++ processor.insert(new InsertPlan(record));
processor.asyncForceClose();
}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2Test.java
index 7950a64,7950a64..4620781
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2Test.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/filenodeV2/UnsealedTsFileProcessorV2Test.java
@@@ -31,6 -31,6 +31,7 @@@ import java.util.Map.Entry
import org.apache.iotdb.db.engine.MetadataManagerHelper;
import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
import org.apache.iotdb.db.engine.version.SysTimeVersionController;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.db.utils.FileSchemaUtils;
import org.apache.iotdb.db.utils.TimeValuePair;
@@@ -81,7 -81,7 +82,7 @@@ public class UnsealedTsFileProcessorV2T
for (int i = 1; i <= 100; i++) {
TSRecord record = new TSRecord(i, deviceId);
record.addTuple(DataPoint.getDataPoint(dataType, measurementId, String.valueOf(i)));
-- processor.insert(record);
++ processor.insert(new InsertPlan(record));
}
// query data in memory
@@@ -126,7 -126,7 +127,7 @@@
for (int i = 1; i <= 10; i++) {
TSRecord record = new TSRecord(i, deviceId);
record.addTuple(DataPoint.getDataPoint(dataType, measurementId, String.valueOf(i)));
-- processor.insert(record);
++ processor.insert(new InsertPlan(record));
}
processor.asyncFlush();
}
@@@ -167,7 -167,7 +168,7 @@@
for (int i = 1; i <= 100; i++) {
TSRecord record = new TSRecord(i, deviceId);
record.addTuple(DataPoint.getDataPoint(dataType, measurementId, String.valueOf(i)));
-- processor.insert(record);
++ processor.insert(new InsertPlan(record));
}
// query data in memory
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowFileSizeControlTest.java
index 48557b7,48557b7..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowFileSizeControlTest.java
+++ /dev/null
@@@ -1,145 -1,145 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.memcontrol;
--
--import static org.junit.Assert.assertTrue;
--import static org.junit.Assert.fail;
--
--import java.io.IOException;
--import java.util.HashMap;
--import java.util.Map;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.MetadataManagerHelper;
--import org.apache.iotdb.db.engine.bufferwrite.Action;
--import org.apache.iotdb.db.engine.bufferwrite.ActionException;
--import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
--import org.apache.iotdb.db.engine.version.SysTimeVersionController;
--import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
--import org.apache.iotdb.db.exception.OverflowProcessorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.utils.EnvironmentUtils;
--import org.apache.iotdb.db.utils.FileSchemaUtils;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
--import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--import org.junit.After;
--import org.junit.Before;
--import org.junit.Test;
--
--public class OverflowFileSizeControlTest {
--
-- private String nameSpacePath = "nsp";
-- private Map<String, Action> parameters = null;
-- private OverflowProcessor ofprocessor = null;
-- private TSFileConfig tsconfig = TSFileDescriptor.getInstance().getConfig();
-- private String deviceId = "root.vehicle.d0";
-- private String[] measurementIds = {"s0", "s1", "s2", "s3", "s4", "s5"};
-- private TSDataType[] dataTypes = {TSDataType.INT32, TSDataType.INT64, TSDataType.FLOAT,
-- TSDataType.DOUBLE,
-- TSDataType.BOOLEAN, TSDataType.TEXT};
--
-- private IoTDBConfig dbConfig = IoTDBDescriptor.getInstance().getConfig();
-- private long overflowFileSize;
-- private int groupSize;
--
-- private boolean skip = !false;
--
-- private Action overflowflushaction = new Action() {
--
-- @Override
-- public void act() throws ActionException {
-- }
-- };
--
-- private Action filenodeflushaction = new Action() {
--
-- @Override
-- public void act() throws ActionException {
-- }
-- };
--
-- private Action filenodemanagerbackupaction = new Action() {
--
-- @Override
-- public void act() throws ActionException {
-- }
-- };
--
-- private Action filenodemanagerflushaction = new Action() {
--
-- @Override
-- public void act() throws ActionException {
-- }
-- };
--
-- @Before
-- public void setUp() throws Exception {
-- parameters = new HashMap<>();
-- parameters.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowflushaction);
-- parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, filenodeflushaction);
--
-- overflowFileSize = dbConfig.getOverflowFileSizeThreshold();
-- groupSize = tsconfig.groupSizeInByte;
-- dbConfig.setOverflowFileSizeThreshold(10 * 1024 * 1024);
-- tsconfig.groupSizeInByte = 1024 * 1024;
--
-- MetadataManagerHelper.initMetadata();
-- }
--
-- @After
-- public void tearDown() throws Exception {
-- dbConfig.setOverflowFileSizeThreshold(overflowFileSize);
-- tsconfig.groupSizeInByte = groupSize;
-- EnvironmentUtils.cleanEnv();
-- }
--
-- @Test
-- public void testInsert()
-- throws InterruptedException, IOException, WriteProcessException, ProcessorException {
-- if (skip) {
-- return;
-- }
-- // insert one point: int
-- try {
-- ofprocessor = new OverflowProcessor(nameSpacePath, parameters,
-- FileSchemaUtils.constructFileSchema(deviceId), SysTimeVersionController.INSTANCE);
-- for (int i = 1; i < 1000000; i++) {
-- TSRecord record = new TSRecord(i, deviceId);
-- record.addTuple(DataPoint.getDataPoint(dataTypes[0], measurementIds[0], String.valueOf(i)));
-- if (i % 100000 == 0) {
-- System.out.println(i + "," + MemUtils.bytesCntToStr(ofprocessor.getFileSize()));
-- }
-- }
-- // wait to flushMetadata
-- Thread.sleep(1000);
-- ofprocessor.close();
-- assertTrue(ofprocessor.getFileSize() < dbConfig.getOverflowFileSizeThreshold());
-- fail("Method unimplemented");
-- } catch (OverflowProcessorException e) {
-- e.printStackTrace();
-- fail(e.getMessage());
-- }
--
-- }
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowMetaSizeControlTest.java
index 56afc32,56afc32..0000000
deleted file mode 100644,100644
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/memcontrol/OverflowMetaSizeControlTest.java
+++ /dev/null
@@@ -1,146 -1,146 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing,
-- * software distributed under the License is distributed on an
-- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- * KIND, either express or implied. See the License for the
-- * specific language governing permissions and limitations
-- * under the License.
-- */
--package org.apache.iotdb.db.engine.memcontrol;
--
--import static org.junit.Assert.assertTrue;
--import static org.junit.Assert.fail;
--
--import java.io.IOException;
--import java.util.HashMap;
--import java.util.Map;
--import org.apache.iotdb.db.conf.IoTDBConfig;
--import org.apache.iotdb.db.conf.IoTDBDescriptor;
--import org.apache.iotdb.db.engine.MetadataManagerHelper;
--import org.apache.iotdb.db.engine.bufferwrite.Action;
--import org.apache.iotdb.db.engine.bufferwrite.ActionException;
--import org.apache.iotdb.db.engine.bufferwrite.FileNodeConstants;
--import org.apache.iotdb.db.engine.version.SysTimeVersionController;
--import org.apache.iotdb.db.engine.overflow.io.OverflowProcessor;
--import org.apache.iotdb.db.exception.OverflowProcessorException;
--import org.apache.iotdb.db.exception.ProcessorException;
--import org.apache.iotdb.db.utils.EnvironmentUtils;
--import org.apache.iotdb.db.utils.FileSchemaUtils;
--import org.apache.iotdb.db.utils.MemUtils;
--import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
--import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
--import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
--import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
--import org.apache.iotdb.tsfile.write.record.TSRecord;
--import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
--import org.junit.After;
--import org.junit.Before;
--import org.junit.Test;
--
--public class OverflowMetaSizeControlTest {
--
-- private String nameSpacePath = "nsp";
-- private Map<String, Action> parameters = null;
-- private OverflowProcessor ofprocessor = null;
-- private TSFileConfig tsconfig = TSFileDescriptor.getInstance().getConfig();
-- private String deviceId = "root.vehicle.d0";
-- private String[] measurementIds = {"s0", "s1", "s2", "s3", "s4", "s5"};
-- private TSDataType[] dataTypes = {TSDataType.INT32, TSDataType.INT64, TSDataType.FLOAT,
-- TSDataType.DOUBLE,
-- TSDataType.BOOLEAN, TSDataType.TEXT};
--
-- private IoTDBConfig dbConfig = IoTDBDescriptor.getInstance().getConfig();
-- private long overflowFileSize;
-- private int groupSize;
--
-- private boolean skip = !false;
--
-- private Action overflowflushaction = new Action() {
--
-- @Override
-- public void act() throws ActionException {
-- }
-- };
--
-- private Action filenodeflushaction = new Action() {
--
-- @Override
-- public void act() throws ActionException {
-- }
-- };
--
-- private Action filenodemanagerbackupaction = new Action() {
--
-- @Override
-- public void act() throws ActionException {
-- }
-- };
--
-- private Action filenodemanagerflushaction = new Action() {
--
-- @Override
-- public void act() throws ActionException {
-- }
-- };
--
-- @Before
-- public void setUp() throws Exception {
-- parameters = new HashMap<String, Action>();
-- parameters.put(FileNodeConstants.OVERFLOW_FLUSH_ACTION, overflowflushaction);
-- parameters.put(FileNodeConstants.FILENODE_PROCESSOR_FLUSH_ACTION, filenodeflushaction);
--
-- overflowFileSize = dbConfig.getOverflowMetaSizeThreshold();
-- groupSize = tsconfig.groupSizeInByte;
-- dbConfig.setOverflowMetaSizeThreshold(3 * 1024 * 1024);
-- tsconfig.groupSizeInByte = 1024 * 1024;
--
-- MetadataManagerHelper.initMetadata();
-- }
--
-- @After
-- public void tearDown() throws Exception {
-- dbConfig.setOverflowMetaSizeThreshold(overflowFileSize);
-- tsconfig.groupSizeInByte = groupSize;
-- EnvironmentUtils.cleanEnv();
-- }
--
-- @Test
-- public void testInsert()
-- throws InterruptedException, IOException, WriteProcessException, ProcessorException {
-- if (skip) {
-- return;
-- }
-- // insert one point: int
-- try {
-- ofprocessor = new OverflowProcessor(nameSpacePath, parameters,
-- FileSchemaUtils.constructFileSchema(deviceId), SysTimeVersionController.INSTANCE);
-- for (int i = 1; i < 1000000; i++) {
-- TSRecord record = new TSRecord(i, deviceId);
-- record.addTuple(DataPoint.getDataPoint(dataTypes[0], measurementIds[0], String.valueOf(i)));
-- ofprocessor.insert(record);
-- if (i % 100000 == 0) {
-- System.out.println(i + "," + MemUtils.bytesCntToStr(ofprocessor.getMetaSize()));
-- }
-- }
-- // wait to flushMetadata
-- Thread.sleep(1000);
-- assertTrue(ofprocessor.getMetaSize() < dbConfig.getOverflowMetaSizeThreshold());
-- ofprocessor.close();
-- fail("Method unimplemented");
-- } catch (OverflowProcessorException e) {
-- e.printStackTrace();
-- fail(e.getMessage());
-- }
--
-- }
--}
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
index 55f505f,03d3e57..41a5b7a
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionFileNodeTest.java
@@@ -30,15 -30,15 +30,18 @@@ import java.util.Collection
import java.util.Collections;
import java.util.Iterator;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.directories.Directories;
-import org.apache.iotdb.db.engine.filenode.FileNodeManager;
+import org.apache.iotdb.db.conf.directories.DirectoryManager;
- import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.engine.modification.io.LocalTextModificationAccessor;
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
++import org.apache.iotdb.db.engine.querycontext.QueryDataSourceV2;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
++import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.StartupException;
import org.apache.iotdb.db.metadata.MManager;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.query.control.QueryResourceManager;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.db.utils.TimeValuePair;
@@@ -70,14 -70,14 +73,14 @@@ public class DeletionFileNodeTest
@Before
public void setup() throws MetadataArgsErrorException,
-- PathErrorException, IOException, FileNodeManagerException, StartupException {
++ PathErrorException, IOException, FileNodeManagerException, StartupException, ProcessorException {
EnvironmentUtils.envSetUp();
MManager.getInstance().setStorageLevelToMTree(processorName);
for (int i = 0; i < 10; i++) {
MManager.getInstance().addPathToMTree(processorName + "." + measurements[i], dataType,
encoding);
-- FileNodeManager.getInstance()
++ FileNodeManagerV2.getInstance()
.addTimeSeries(new Path(processorName, measurements[i]), TSDataType.valueOf(dataType),
TSEncoding.valueOf(encoding), CompressionType.valueOf(TSFileConfig.compressor),
Collections.emptyMap());
@@@ -91,29 -91,29 +94,29 @@@
@Test
public void testDeleteInBufferWriteCache() throws
-- FileNodeManagerException {
++ FileNodeManagerException, ProcessorException {
for (int i = 1; i <= 100; i++) {
TSRecord record = new TSRecord(i, processorName);
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
SingleSeriesExpression expression = new SingleSeriesExpression(new Path(processorName,
measurements[5]), null);
QueryResourceManager.getInstance().beginQueryOfGivenExpression(TEST_QUERY_JOB_ID, expression);
-- QueryDataSource dataSource = QueryResourceManager.getInstance()
++ QueryDataSourceV2 dataSource = QueryResourceManager.getInstance()
.getQueryDataSource(expression.getSeriesPath(), TEST_QUERY_CONTEXT);
Iterator<TimeValuePair> timeValuePairs =
-- dataSource.getSeqDataSource().getReadableChunk().getIterator();
++ dataSource.getSeqResources().get(0).getReadOnlyMemChunk().getIterator();
int count = 0;
while (timeValuePairs.hasNext()) {
timeValuePairs.next();
@@@ -130,13 -130,13 +133,13 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 40);
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 40);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 30);
Modification[] realModifications = new Modification[]{
new Deletion(processorName + "." + measurements[5], 102, 50),
@@@ -166,16 -166,16 +169,16 @@@
}
@Test
-- public void testDeleteInOverflowCache() throws FileNodeManagerException {
++ public void testDeleteInOverflowCache() throws FileNodeManagerException, ProcessorException {
// insert into BufferWrite
for (int i = 101; i <= 200; i++) {
TSRecord record = new TSRecord(i, processorName);
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
// insert into Overflow
for (int i = 1; i <= 100; i++) {
@@@ -183,23 -183,23 +186,23 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
SingleSeriesExpression expression = new SingleSeriesExpression(new Path(processorName,
measurements[5]), null);
QueryResourceManager.getInstance().beginQueryOfGivenExpression(TEST_QUERY_JOB_ID, expression);
-- QueryDataSource dataSource = QueryResourceManager.getInstance()
++ QueryDataSourceV2 dataSource = QueryResourceManager.getInstance()
.getQueryDataSource(expression.getSeriesPath(), TEST_QUERY_CONTEXT);
Iterator<TimeValuePair> timeValuePairs =
-- dataSource.getOverflowSeriesDataSource().getReadableMemChunk().getIterator();
++ dataSource.getSeqResources().get(0).getReadOnlyMemChunk().getIterator();
int count = 0;
while (timeValuePairs.hasNext()) {
timeValuePairs.next();
@@@ -218,9 -218,9 +221,9 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
// insert into Overflow
for (int i = 1; i <= 100; i++) {
@@@ -228,13 -228,13 +231,13 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 40);
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 40);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 30);
Modification[] realModifications = new Modification[]{
new Deletion(processorName + "." + measurements[5], 103, 50),
diff --cc iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionQueryTest.java
index 3e9bb99,3e9bb99..c2ff798
--- a/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionQueryTest.java
+++ b/iotdb/src/test/java/org/apache/iotdb/db/engine/modification/DeletionQueryTest.java
@@@ -26,13 -26,13 +26,15 @@@ import java.io.IOException
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
--import org.apache.iotdb.db.engine.filenode.FileNodeManager;
++import org.apache.iotdb.db.engine.filenodeV2.FileNodeManagerV2;
import org.apache.iotdb.db.engine.memcontrol.BasicMemController.UsageLevel;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
++import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.StartupException;
import org.apache.iotdb.db.metadata.MManager;
++import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.query.executor.EngineQueryRouter;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
@@@ -65,14 -65,14 +67,14 @@@ public class DeletionQueryTest
@Before
public void setup() throws MetadataArgsErrorException,
-- PathErrorException, IOException, FileNodeManagerException, StartupException {
++ PathErrorException, IOException, FileNodeManagerException, StartupException, ProcessorException {
EnvironmentUtils.envSetUp();
MManager.getInstance().setStorageLevelToMTree(processorName);
for (int i = 0; i < 10; i++) {
MManager.getInstance().addPathToMTree(processorName + "." + measurements[i], dataType,
encoding);
-- FileNodeManager.getInstance()
++ FileNodeManagerV2.getInstance()
.addTimeSeries(new Path(processorName, measurements[i]), TSDataType.valueOf(dataType),
TSEncoding.valueOf(encoding), CompressionType.valueOf(TSFileConfig.compressor),
Collections.emptyMap());
@@@ -93,13 -93,13 +95,13 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
List<Path> pathList = new ArrayList<>();
pathList.add(new Path(processorName, measurements[3]));
@@@ -124,13 -124,13 +126,13 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 40);
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 40);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 30);
List<Path> pathList = new ArrayList<>();
pathList.add(new Path(processorName, measurements[3]));
@@@ -156,9 -156,9 +158,9 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
// insert into Overflow
for (int i = 1; i <= 100; i++) {
@@@ -166,13 -166,13 +168,13 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
List<Path> pathList = new ArrayList<>();
pathList.add(new Path(processorName, measurements[3]));
@@@ -198,9 -198,9 +200,9 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
// insert into Overflow
for (int i = 1; i <= 100; i++) {
@@@ -208,13 -208,13 +210,13 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().closeAll();
++ FileNodeManagerV2.getInstance().syncCloseAllProcessor();
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 40);
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 40);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 30);
List<Path> pathList = new ArrayList<>();
pathList.add(new Path(processorName, measurements[3]));
@@@ -240,45 -240,45 +242,45 @@@
for (int j = 0; j < 10; j++) {
record.addTuple(new DoubleDataPoint(measurements[j], i * 1.0));
}
-- FileNodeManager.getInstance().insert(record, false);
++ FileNodeManagerV2.getInstance().insert(new InsertPlan(record));
}
-- FileNodeManager.getInstance().delete(processorName, measurements[3], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[4], 50);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 30);
-- FileNodeManager.getInstance().delete(processorName, measurements[5], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[3], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[4], 50);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 30);
++ FileNodeManagerV2.getInstance().delete(processorName, measurements[5], 50);
... 1727 lines suppressed ...