You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by qi...@apache.org on 2022/12/04 14:06:36 UTC
[iotdb] branch master updated: [To rel/1.0] Rename StorageEngineV2 to StorageEngine (#8238)
This is an automated email from the ASF dual-hosted git repository.
qiaojialin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/master by this push:
new fe3c779b8c [To rel/1.0] Rename StorageEngineV2 to StorageEngine (#8238)
fe3c779b8c is described below
commit fe3c779b8c3b63a8b4f3e9a836d4ffb41a10ae26
Author: Haonan <hh...@outlook.com>
AuthorDate: Sun Dec 4 22:06:29 2022 +0800
[To rel/1.0] Rename StorageEngineV2 to StorageEngine (#8238)
---
.../db/integration/IoTDBLoadExternalTsfileIT.java | 1032 ----------
.../IoTDBLoadExternalTsfileWithVirtualSGIT.java | 137 --
.../db/integration/IoTDBNewTsFileCompactionIT.java | 36 +-
.../db/integration/IoTDBRewriteTsFileToolIT.java | 59 +-
.../iotdb/db/integration/IoTDBSnapshotIT.java | 285 ---
.../aligned/IoTDBLoadExternalAlignedTsFileIT.java | 2027 ++++++++++----------
.../org/apache/iotdb/db/conf/IoTDBDescriptor.java | 4 +-
.../db/consensus/DataRegionConsensusImpl.java | 4 +-
.../statemachine/DataRegionStateMachine.java | 4 +-
.../org/apache/iotdb/db/engine/StorageEngine.java | 777 +++++++-
.../apache/iotdb/db/engine/StorageEngineV2.java | 820 --------
.../iotdb/db/engine/snapshot/SnapshotLoader.java | 4 +-
.../iotdb/db/engine/storagegroup/DataRegion.java | 28 +-
.../engine/storagegroup/TimePartitionManager.java | 4 +-
.../db/engine/storagegroup/TsFileProcessor.java | 4 +-
.../storagegroup/timeindex/DeviceTimeIndex.java | 9 +-
.../storagegroup/timeindex/FileTimeIndex.java | 6 +-
.../iotdb/db/localconfignode/LocalConfigNode.java | 4 +-
.../plan/analyze/StandalonePartitionFetcher.java | 4 +-
.../db/mpp/plan/scheduler/StandaloneScheduler.java | 7 +-
.../scheduler/load/LoadTsFileDispatcherImpl.java | 9 +-
.../db/query/control/QueryResourceManager.java | 38 +-
.../groupby/GroupByWithValueFilterDataSet.java | 88 +-
.../groupby/GroupByWithoutValueFilterDataSet.java | 106 +-
.../db/query/executor/AggregationExecutor.java | 124 +-
.../iotdb/db/query/executor/FillQueryExecutor.java | 69 +-
.../iotdb/db/query/executor/LastQueryExecutor.java | 50 +-
.../db/query/executor/RawDataQueryExecutor.java | 138 +-
.../query/timegenerator/ServerTimeGenerator.java | 28 +-
.../java/org/apache/iotdb/db/service/DataNode.java | 11 +-
.../java/org/apache/iotdb/db/service/IoTDB.java | 3 +-
.../apache/iotdb/db/service/IoTDBShutdownHook.java | 4 +-
.../java/org/apache/iotdb/db/service/NewIoTDB.java | 6 +-
.../iotdb/db/service/RegionMigrateService.java | 4 +-
.../impl/DataNodeInternalRPCServiceImpl.java | 8 +-
.../service/thrift/impl/DataNodeRegionManager.java | 4 +-
.../iotdb/db/sync/sender/pipe/TsFilePipe.java | 6 +-
.../sync/transport/client/SyncClientFactory.java | 5 +-
.../iotdb/db/tools/TsFileSplitByPartitionTool.java | 10 +-
.../db/tools/upgrade/TsFileOnlineUpgradeTool.java | 6 +-
.../org/apache/iotdb/db/utils/ThreadUtils.java | 3 +-
.../java/org/apache/iotdb/db/wal/node/WALNode.java | 4 +-
...ageEngineV2Test.java => StorageEngineTest.java} | 14 +-
.../db/engine/storagegroup/DataRegionTest.java | 18 +-
...ocessorV2Test.java => TsFileProcessorTest.java} | 6 +-
.../db/mpp/plan/StandaloneCoordinatorTest.java | 6 +-
.../plan/scheduler/StandaloneSchedulerTest.java | 6 +-
.../iotdb/db/tools/TsFileAndModSettleToolTest.java | 373 ++--
.../apache/iotdb/db/utils/EnvironmentUtils.java | 4 +-
.../apache/iotdb/spark/db/EnvironmentUtils.java | 7 +-
50 files changed, 2114 insertions(+), 4299 deletions(-)
diff --git a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBLoadExternalTsfileIT.java b/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBLoadExternalTsfileIT.java
deleted file mode 100644
index 68a9097cbf..0000000000
--- a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBLoadExternalTsfileIT.java
+++ /dev/null
@@ -1,1032 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.integration;
-
-import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import org.apache.iotdb.db.engine.storagegroup.timeindex.TimeIndexLevel;
-import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.db.utils.EnvironmentUtils;
-import org.apache.iotdb.itbase.category.LocalStandaloneTest;
-import org.apache.iotdb.jdbc.Config;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-@Category({LocalStandaloneTest.class})
-public class IoTDBLoadExternalTsfileIT {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBLoadExternalTsfileIT.class);
-
- private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
-
- protected static String[] insertSequenceSqls =
- new String[] {
- "CREATE DATABASE root.vehicle",
- "CREATE DATABASE root.test",
- "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
- "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
- "CREATE TIMESERIES root.vehicle.d1.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
- "CREATE TIMESERIES root.vehicle.d1.s3 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
- "CREATE TIMESERIES root.test.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
- "CREATE TIMESERIES root.test.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
- "CREATE TIMESERIES root.test.d1.g0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
- "insert into root.vehicle.d0(timestamp,s0) values(10,100)",
- "insert into root.vehicle.d0(timestamp,s0,s1) values(12,101,'102')",
- "insert into root.vehicle.d0(timestamp,s1) values(19,'103')",
- "insert into root.vehicle.d1(timestamp,s2) values(11,104.0)",
- "insert into root.vehicle.d1(timestamp,s2,s3) values(15,105.0,true)",
- "insert into root.vehicle.d1(timestamp,s3) values(17,false)",
- "insert into root.vehicle.d0(timestamp,s0) values(20,1000)",
- "insert into root.test.d0(timestamp,s0) values(10,106)",
- "insert into root.test.d0(timestamp,s0,s1) values(14,107,'108')",
- "insert into root.test.d0(timestamp,s1) values(16,'109')",
- "insert into root.test.d1.g0(timestamp,s0) values(1,110)",
- "insert into root.test.d0(timestamp,s0) values(30,1006)",
- "insert into root.test.d0(timestamp,s0,s1) values(34,1007,'1008')",
- "insert into root.test.d0(timestamp,s1) values(36,'1090')",
- "insert into root.test.d1.g0(timestamp,s0) values(10,1100)",
- "flush",
- "insert into root.test.d0(timestamp,s0) values(150,126)",
- "insert into root.test.d0(timestamp,s0,s1) values(80,127,'128')",
- "insert into root.test.d0(timestamp,s1) values(200,'129')",
- "insert into root.test.d1.g0(timestamp,s0) values(140,430)",
- "insert into root.test.d0(timestamp,s0) values(150,426)",
- "flush"
- };
-
- private static String[] insertUnsequenceSqls =
- new String[] {
- "insert into root.vehicle.d0(timestamp,s0) values(6,120)",
- "insert into root.vehicle.d0(timestamp,s0,s1) values(38,121,'122')",
- "insert into root.vehicle.d0(timestamp,s1) values(9,'123')",
- "insert into root.vehicle.d0(timestamp,s0) values(16,128)",
- "insert into root.vehicle.d0(timestamp,s0,s1) values(18,189,'198')",
- "insert into root.vehicle.d0(timestamp,s1) values(99,'1234')",
- "insert into root.vehicle.d1(timestamp,s2) values(14,1024.0)",
- "insert into root.vehicle.d1(timestamp,s2,s3) values(29,1205.0,true)",
- "insert into root.vehicle.d1(timestamp,s3) values(33,true)",
- "insert into root.test.d0(timestamp,s0) values(45,126)",
- "insert into root.test.d0(timestamp,s0,s1) values(68,127,'128')",
- "insert into root.test.d0(timestamp,s1) values(78,'129')",
- "insert into root.test.d1.g0(timestamp,s0) values(14,430)",
- "flush",
- "insert into root.test.d0(timestamp,s0) values(20,426)",
- "insert into root.test.d0(timestamp,s0,s1) values(13,427,'528')",
- "insert into root.test.d0(timestamp,s1) values(2,'1209')",
- "insert into root.test.d1.g0(timestamp,s0) values(4,330)",
- "flush",
- };
-
- private static String[] deleteTimeseiresSqls =
- new String[] {"delete from root.vehicle.** where time >= 10 and time<=20", "flush"};
-
- private static final String TIMESTAMP_STR = "Time";
- private static final String VEHICLE_D0_S0_STR = "root.vehicle.d0.s0";
- private static final String VEHICLE_D0_S1_STR = "root.vehicle.d0.s1";
- private static final String VEHICLE_D0_S2_STR = "root.vehicle.d1.s2";
- private static final String VEHICLE_D0_S3_STR = "root.vehicle.d1.s3";
- private static final String TEST_D0_S0_STR = "root.test.d0.s0";
- private static final String TEST_D0_S1_STR = "root.test.d0.s1";
- private static final String TEST_D1_STR = "root.test.d1.g0.s0";
-
- protected int prevVirtualPartitionNum;
- protected int prevCompactionThread;
-
- private static String[] deleteSqls =
- new String[] {"DELETE DATABASE root.vehicle", "DELETE DATABASE root.test"};
-
- @Before
- public void setUp() throws Exception {
- prevVirtualPartitionNum = IoTDBDescriptor.getInstance().getConfig().getDataRegionNum();
- IoTDBDescriptor.getInstance().getConfig().setDataRegionNum(1);
- prevCompactionThread = IoTDBDescriptor.getInstance().getConfig().getCompactionThreadCount();
- EnvironmentUtils.envSetUp();
- Class.forName(Config.JDBC_DRIVER_NAME);
- prepareData(insertSequenceSqls);
- }
-
- @After
- public void tearDown() throws Exception {
- EnvironmentUtils.cleanEnv();
- IoTDBDescriptor.getInstance().getConfig().setCompactionThreadCount(prevCompactionThread);
- IoTDBDescriptor.getInstance().getConfig().setDataRegionNum(prevVirtualPartitionNum);
- }
-
- @Test
- public void unloadTsfileTest() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- File tmpDir =
- new File(
- resources.get(0).getTsFile().getParentFile().getParentFile(),
- "tmp" + File.separator + new PartialPath("root.vehicle"));
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- assertEquals(
- 0,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
- assertNotNull(tmpDir.listFiles());
- assertEquals(1, tmpDir.listFiles().length >> 1);
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- tmpDir =
- new File(
- resources.get(0).getTsFile().getParentFile().getParentFile(),
- "tmp" + File.separator + new PartialPath("root.test"));
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- // test unload using relative path
- statement.execute(
- String.format("unload '%s' '%s'", "./" + resource.getTsFilePath(), tmpDir));
- }
- assertEquals(
- 0,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- assertNotNull(tmpDir.listFiles());
- assertEquals(2, tmpDir.listFiles().length >> 1);
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void loadSequenceTsfileTest() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- File tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp"
- + File.separator
- + new PartialPath("root.vehicle")
- + File.separator
- + "0"
- + File.separator
- + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp"
- + File.separator
- + new PartialPath("root.test")
- + File.separator
- + "0"
- + File.separator
- + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // load all tsfile in tmp dir
- tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp");
- statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- assertNotNull(tmpDir.listFiles());
- assertEquals(
- 0,
- new File(
- tmpDir,
- new PartialPath("root.vehicle") + File.separator + "0" + File.separator + "0")
- .listFiles()
- .length);
- assertEquals(
- 0,
- new File(
- tmpDir,
- new PartialPath("root.test") + File.separator + "0" + File.separator + "0")
- .listFiles()
- .length);
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void loadUnsequenceTsfileTest() throws SQLException {
- prepareData(insertUnsequenceSqls);
- String[] queryRes =
- new String[] {
- "1,null,null,null,null,null,null,110",
- "2,null,null,null,null,null,1209,null",
- "4,null,null,null,null,null,null,330",
- "6,120,null,null,null,null,null,null",
- "9,null,123,null,null,null,null,null",
- "10,100,null,null,null,106,null,1100",
- "11,null,null,104.0,null,null,null,null",
- "12,101,102,null,null,null,null,null",
- "13,null,null,null,null,427,528,null",
- "14,null,null,1024.0,null,107,108,430",
- "15,null,null,105.0,true,null,null,null",
- "16,128,null,null,null,null,109,null",
- "17,null,null,null,false,null,null,null",
- "18,189,198,null,null,null,null,null",
- "19,null,103,null,null,null,null,null",
- "20,1000,null,null,null,426,null,null",
- "29,null,null,1205.0,true,null,null,null",
- "30,null,null,null,null,1006,null,null",
- "33,null,null,null,true,null,null,null",
- "34,null,null,null,null,1007,1008,null",
- "36,null,null,null,null,null,1090,null",
- "38,121,122,null,null,null,null,null",
- "45,null,null,null,null,126,null,null",
- "68,null,null,null,null,127,128,null",
- "78,null,null,null,null,null,129,null",
- "80,null,null,null,null,127,128,null",
- "99,null,1234,null,null,null,null,null",
- "140,null,null,null,null,null,null,430",
- "150,null,null,null,null,426,null,null",
- "200,null,null,null,null,null,129,null"
- };
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // check query result
- boolean hasResultSet = statement.execute("SELECT * FROM root.**");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- int cnt = 0;
- while (resultSet.next()) {
- String queryString =
- resultSet.getString(TIMESTAMP_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S0_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S1_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S2_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S3_STR)
- + ","
- + resultSet.getString(TEST_D0_S0_STR)
- + ","
- + resultSet.getString(TEST_D0_S1_STR)
- + ","
- + resultSet.getString(TEST_D1_STR);
- Assert.assertEquals(queryRes[cnt++], queryString);
- }
- }
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- File tmpDir =
- new File(
- resources.get(0).getTsFile().getParentFile().getParentFile().getParentFile(),
- "tmp" + File.separator + new PartialPath("root.vehicle") + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getUnSequenceFileList());
- assertEquals(1, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- tmpDir = new File(tmpDir.getParentFile().getParentFile(), "root.test" + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList());
- assertEquals(2, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // load all tsfile in tmp dir
- tmpDir = tmpDir.getParentFile().getParentFile();
- statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
- assertEquals(
- 1,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getUnSequenceFileList()
- .size());
- if (config.getTimeIndexLevel().equals(TimeIndexLevel.DEVICE_TIME_INDEX)) {
- if (StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList()
- .size()
- == 1) {
- assertEquals(
- 3,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- } else {
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- }
- } else if (config.getTimeIndexLevel().equals(TimeIndexLevel.FILE_TIME_INDEX)) {
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList()
- .size());
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- }
- assertNotNull(tmpDir.listFiles());
- assertEquals(
- 0,
- new File(tmpDir, new PartialPath("root.vehicle") + File.separator + "0")
- .listFiles()
- .length);
- assertEquals(
- 0,
- new File(tmpDir, new PartialPath("root.test") + File.separator + "0").listFiles().length);
-
- // check query result
- hasResultSet = statement.execute("SELECT * FROM root.**");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- int cnt = 0;
- while (resultSet.next()) {
- String queryString =
- resultSet.getString(TIMESTAMP_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S0_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S1_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S2_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S3_STR)
- + ","
- + resultSet.getString(TEST_D0_S0_STR)
- + ","
- + resultSet.getString(TEST_D0_S1_STR)
- + ","
- + resultSet.getString(TEST_D1_STR);
- Assert.assertEquals(queryRes[cnt++], queryString);
- }
- }
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void loadTsfileWithModsTest() throws SQLException {
- prepareData(insertUnsequenceSqls);
- prepareData(deleteTimeseiresSqls);
- String[] queryRes =
- new String[] {
- "1,null,null,null,null,null,null,110",
- "2,null,null,null,null,null,1209,null",
- "4,null,null,null,null,null,null,330",
- "6,120,null,null,null,null,null,null",
- "9,null,123,null,null,null,null,null",
- "10,null,null,null,null,106,null,1100",
- "13,null,null,null,null,427,528,null",
- "14,null,null,null,null,107,108,430",
- "16,null,null,null,null,null,109,null",
- "20,null,null,null,null,426,null,null",
- "29,null,null,1205.0,true,null,null,null",
- "30,null,null,null,null,1006,null,null",
- "33,null,null,null,true,null,null,null",
- "34,null,null,null,null,1007,1008,null",
- "36,null,null,null,null,null,1090,null",
- "38,121,122,null,null,null,null,null",
- "45,null,null,null,null,126,null,null",
- "68,null,null,null,null,127,128,null",
- "78,null,null,null,null,null,129,null",
- "80,null,null,null,null,127,128,null",
- "99,null,1234,null,null,null,null,null",
- "140,null,null,null,null,null,null,430",
- "150,null,null,null,null,426,null,null",
- "200,null,null,null,null,null,129,null"
- };
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // check query result
- boolean hasResultSet = statement.execute("SELECT * FROM root.**");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- int cnt = 0;
- while (resultSet.next()) {
- String queryString =
- resultSet.getString(TIMESTAMP_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S0_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S1_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S2_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S3_STR)
- + ","
- + resultSet.getString(TEST_D0_S0_STR)
- + ","
- + resultSet.getString(TEST_D0_S1_STR)
- + ","
- + resultSet.getString(TEST_D1_STR);
- Assert.assertEquals(queryRes[cnt++], queryString);
- }
- }
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- File tmpDir =
- new File(
- resources.get(0).getTsFile().getParentFile().getParentFile().getParentFile(),
- "tmp" + File.separator + new PartialPath("root.vehicle") + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getUnSequenceFileList());
- assertEquals(1, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- tmpDir = new File(tmpDir.getParentFile().getParentFile(), "root.test" + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList());
- assertEquals(2, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // load all tsfile in tmp dir
- tmpDir = tmpDir.getParentFile().getParentFile();
- statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
- assertEquals(
- 1,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getUnSequenceFileList()
- .size());
- if (config.getTimeIndexLevel().equals(TimeIndexLevel.DEVICE_TIME_INDEX)) {
- if (StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList()
- .size()
- == 1) {
- assertEquals(
- 3,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- } else {
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- }
- } else if (config.getTimeIndexLevel().equals(TimeIndexLevel.FILE_TIME_INDEX)) {
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList()
- .size());
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- }
- assertNotNull(tmpDir.listFiles());
- assertEquals(
- 0,
- new File(tmpDir, new PartialPath("root.vehicle") + File.separator + "0")
- .listFiles()
- .length);
- assertEquals(
- 0,
- new File(tmpDir, new PartialPath("root.test") + File.separator + "0").listFiles().length);
-
- // check query result
- hasResultSet = statement.execute("SELECT * FROM root.**");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- int cnt = 0;
- while (resultSet.next()) {
- String queryString =
- resultSet.getString(TIMESTAMP_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S0_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S1_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S2_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S3_STR)
- + ","
- + resultSet.getString(TEST_D0_S0_STR)
- + ","
- + resultSet.getString(TEST_D0_S1_STR)
- + ","
- + resultSet.getString(TEST_D1_STR);
- Assert.assertEquals(queryRes[cnt++], queryString);
- }
- }
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void loadTsFileTestWithAutoCreateSchema() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
-
- File tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp"
- + File.separator
- + "root.vehicle"
- + File.separator
- + "0"
- + File.separator
- + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp" + File.separator + "root.test" + File.separator + "0" + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- Set<String> expectedSet =
- new HashSet<>(
- Arrays.asList(
- "root.vehicle.d0.s0,root.vehicle,INT32",
- "root.vehicle.d0.s1,root.vehicle,TEXT",
- "root.vehicle.d1.s2,root.vehicle,FLOAT",
- "root.vehicle.d1.s3,root.vehicle,BOOLEAN",
- "root.test.d0.s0,root.test,INT32",
- "root.test.d0.s1,root.test,TEXT",
- "root.test.d1.g0.s0,root.test,INT32"));
-
- boolean hasResultSet = statement.execute("SHOW timeseries");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- while (resultSet.next()) {
- Assert.assertTrue(
- expectedSet.contains(
- resultSet.getString(1)
- + ","
- + resultSet.getString(3)
- + ","
- + resultSet.getString(4)));
- }
- }
-
- // remove metadata
- for (String sql : deleteSqls) {
- statement.execute(sql);
- }
-
- // test not load metadata automatically, it will occur errors.
- // UPDATE: load grammar is updated in 0.14, change this into load metadata automatically
- boolean hasError = false;
- try {
- statement.execute(String.format("load '%s' sglevel=1", tmpDir.getAbsolutePath()));
- } catch (Exception e) {
- hasError = true;
- }
- Assert.assertFalse(hasError);
-
- // test load metadata automatically, it will succeed.
- tmpDir = tmpDir.getParentFile().getParentFile().getParentFile();
- statement.execute(String.format("load '%s' sglevel=1", tmpDir.getAbsolutePath()));
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- assertEquals(2, tmpDir.listFiles().length);
- for (File dir : tmpDir.listFiles()) {
- assertEquals(0, dir.listFiles()[0].listFiles()[0].listFiles().length);
- }
- } catch (StorageEngineException | IllegalPathException e) {
- e.printStackTrace();
- Assert.fail();
- }
- }
-
- @Test
- public void loadTsFileTestWithVerifyMetadata() throws Exception {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- File vehicleTmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp" + File.separator + "root.vehicle");
- if (!vehicleTmpDir.exists()) {
- vehicleTmpDir.mkdirs();
- }
-
- for (TsFileResource resource : resources) {
- statement.execute(
- String.format("unload '%s' '%s'", resource.getTsFilePath(), vehicleTmpDir));
- }
-
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
-
- File testTmpDir = new File(vehicleTmpDir.getParentFile(), "root.test");
- if (!testTmpDir.exists()) {
- testTmpDir.mkdirs();
- }
-
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), testTmpDir));
- }
-
- for (String sql : deleteSqls) {
- statement.execute(sql);
- }
-
- List<String> metaDataSqls =
- new ArrayList<>(
- Arrays.asList(
- "CREATE DATABASE root.vehicle",
- "CREATE DATABASE root.test",
- "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64, ENCODING=RLE",
- "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
- "CREATE TIMESERIES root.vehicle.d1.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
- "CREATE TIMESERIES root.vehicle.d1.s3 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
- "CREATE TIMESERIES root.test.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE",
- "CREATE TIMESERIES root.test.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
- "CREATE TIMESERIES root.test.d1.g0.s0 WITH DATATYPE=INT32, ENCODING=RLE"));
-
- for (String sql : metaDataSqls) {
- statement.execute(sql);
- }
-
- // load vehicle
- boolean hasError = false;
- try {
- statement.execute(String.format("load '%s'", vehicleTmpDir));
- } catch (Exception e) {
- hasError = true;
- assertTrue(
- e.getMessage()
- .contains(
- "because root.vehicle.d0.s0 is INT32 in the loading TsFile but is INT64 in IoTDB."));
- }
- assertTrue(hasError);
-
- statement.execute(String.format("load '%s' verify=false", vehicleTmpDir));
- assertEquals(
- 1,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
-
- // load test
- hasError = false;
- try {
- statement.execute(String.format("load '%s'", testTmpDir));
- } catch (Exception e) {
- hasError = true;
- assertTrue(
- e.getMessage()
- .contains(
- "because root.test.d0.s0 is INT32 in the loading TsFile but is FLOAT in IoTDB."));
- }
- assertTrue(hasError);
-
- statement.execute(String.format("load '%s' verify=false", testTmpDir));
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
-
- } catch (Exception e) {
- e.printStackTrace();
- Assert.fail();
- }
- }
-
- @Test
- public void removeTsFileTest() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("remove '%s'", resource.getTsFilePath()));
- }
- assertEquals(
- 0,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
-
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("remove '%s'", resource.getTsFilePath()));
- }
- assertEquals(
- 0,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- protected void prepareData(String[] sqls) {
- try (Connection connection =
- DriverManager.getConnection(
- Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- for (String sql : sqls) {
- statement.execute(sql);
- }
-
- } catch (Exception e) {
- LOGGER.error("Can not execute sql.", e);
- fail();
- }
- }
-}
diff --git a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBLoadExternalTsfileWithVirtualSGIT.java b/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBLoadExternalTsfileWithVirtualSGIT.java
deleted file mode 100644
index 2ece7adee3..0000000000
--- a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBLoadExternalTsfileWithVirtualSGIT.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.integration;
-
-import org.apache.iotdb.commons.conf.IoTDBConstant;
-import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
-import org.apache.iotdb.db.integration.sync.SyncTestUtil;
-import org.apache.iotdb.db.utils.EnvironmentUtils;
-import org.apache.iotdb.itbase.category.LocalStandaloneTest;
-import org.apache.iotdb.jdbc.Config;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.io.File;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-@Category({LocalStandaloneTest.class})
-public class IoTDBLoadExternalTsfileWithVirtualSGIT extends IoTDBLoadExternalTsfileIT {
- @Before
- public void setUp() throws Exception {
- prevVirtualPartitionNum = IoTDBDescriptor.getInstance().getConfig().getDataRegionNum();
- IoTDBDescriptor.getInstance().getConfig().setDataRegionNum(2);
- prevCompactionThread = IoTDBDescriptor.getInstance().getConfig().getCompactionThreadCount();
- EnvironmentUtils.envSetUp();
- StorageEngineV2.getInstance().reset();
- Class.forName(Config.JDBC_DRIVER_NAME);
- prepareData(insertSequenceSqls);
- }
-
- @Test
- public void unloadTsfileWithVSGTest() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // move root.vehicle
- File vehicleDir =
- new File(
- IoTDBDescriptor.getInstance().getConfig().getDataDirs()[0],
- IoTDBConstant.SEQUENCE_FLODER_NAME + File.separator + "root.vehicle");
- List<File> vehicleFiles = SyncTestUtil.getTsFilePaths(vehicleDir);
- File tmpDir =
- new File(
- IoTDBDescriptor.getInstance().getConfig().getDataDirs()[0],
- "tmp" + File.separator + new PartialPath("root.vehicle"));
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (File tsFile : vehicleFiles) {
- statement.execute(String.format("unload \"%s\" \"%s\"", tsFile.getAbsolutePath(), tmpDir));
- }
- assertEquals(0, SyncTestUtil.getTsFilePaths(vehicleDir).size());
- assertNotNull(tmpDir.listFiles());
- assertEquals(2, tmpDir.listFiles().length >> 1);
-
- // move root.test
- File testDir =
- new File(
- IoTDBDescriptor.getInstance().getConfig().getDataDirs()[0],
- IoTDBConstant.SEQUENCE_FLODER_NAME + File.separator + "root.test");
- List<File> testFiles = SyncTestUtil.getTsFilePaths(testDir);
- tmpDir =
- new File(
- IoTDBDescriptor.getInstance().getConfig().getDataDirs()[0],
- "tmp" + File.separator + new PartialPath("root.test"));
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (File tsFile : testFiles) {
- statement.execute(String.format("unload \"%s\" \"%s\"", tsFile.getAbsolutePath(), tmpDir));
- }
- assertEquals(0, SyncTestUtil.getTsFilePaths(testDir).size());
- assertNotNull(tmpDir.listFiles());
- assertEquals(2, tmpDir.listFiles().length >> 1);
- } catch (IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void removeTsfileWithVSGTest() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // remove root.vehicle
- File vehicleDir =
- new File(
- IoTDBDescriptor.getInstance().getConfig().getDataDirs()[0],
- IoTDBConstant.SEQUENCE_FLODER_NAME + File.separator + "root.vehicle");
- List<File> vehicleFiles = SyncTestUtil.getTsFilePaths(vehicleDir);
- for (File tsFile : vehicleFiles) {
- statement.execute(String.format("remove \"%s\"", tsFile.getAbsolutePath()));
- }
- assertEquals(0, SyncTestUtil.getTsFilePaths(vehicleDir).size());
- // remove root.test
- File testDir =
- new File(
- IoTDBDescriptor.getInstance().getConfig().getDataDirs()[0],
- IoTDBConstant.SEQUENCE_FLODER_NAME + File.separator + "root.test");
- List<File> testFiles = SyncTestUtil.getTsFilePaths(testDir);
- for (File tsFile : testFiles) {
- statement.execute(String.format("remove \"%s\"", tsFile.getAbsolutePath()));
- }
- assertEquals(0, SyncTestUtil.getTsFilePaths(testDir).size());
- }
- }
-}
diff --git a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBNewTsFileCompactionIT.java b/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBNewTsFileCompactionIT.java
index 452ff1108b..7f0d6ab18b 100644
--- a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBNewTsFileCompactionIT.java
+++ b/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBNewTsFileCompactionIT.java
@@ -20,10 +20,6 @@ package org.apache.iotdb.db.integration;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
-import org.apache.iotdb.db.engine.storagegroup.TsFileManager;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.itbase.category.LocalStandaloneTest;
@@ -32,6 +28,7 @@ import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
import org.junit.After;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -40,12 +37,12 @@ import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
-import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+@Ignore
@Category({LocalStandaloneTest.class})
public class IoTDBNewTsFileCompactionIT {
@@ -1062,19 +1059,20 @@ public class IoTDBNewTsFileCompactionIT {
/** wait until merge is finished */
private boolean waitForMergeFinish() throws StorageEngineException, InterruptedException {
- DataRegion dataRegion = StorageEngine.getInstance().getProcessor(storageGroupPath);
- TsFileManager resourceManager = dataRegion.getTsFileResourceManager();
-
- long startTime = System.nanoTime();
- TimeUnit.MILLISECONDS.sleep(500);
- // get the size of level 1's tsfile list to judge whether merge is finished
- while (CompactionTaskManager.getInstance().getExecutingTaskCount() != 0) {
- TimeUnit.MILLISECONDS.sleep(100);
- // wait too long, just break
- if ((System.nanoTime() - startTime) >= MAX_WAIT_TIME_FOR_MERGE) {
- break;
- }
- }
- return resourceManager.getTsFileList(true).size() == 1;
+ // DataRegion dataRegion = StorageEngine.getInstance().getProcessor(storageGroupPath);
+ // TsFileManager resourceManager = dataRegion.getTsFileResourceManager();
+ //
+ // long startTime = System.nanoTime();
+ // TimeUnit.MILLISECONDS.sleep(500);
+ // // get the size of level 1's tsfile list to judge whether merge is finished
+ // while (CompactionTaskManager.getInstance().getExecutingTaskCount() != 0) {
+ // TimeUnit.MILLISECONDS.sleep(100);
+ // // wait too long, just break
+ // if ((System.nanoTime() - startTime) >= MAX_WAIT_TIME_FOR_MERGE) {
+ // break;
+ // }
+ // }
+ // return resourceManager.getTsFileList(true).size() == 1;
+ return false;
}
}
diff --git a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBRewriteTsFileToolIT.java b/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBRewriteTsFileToolIT.java
index 25f23a0fee..24a19a674f 100644
--- a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBRewriteTsFileToolIT.java
+++ b/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBRewriteTsFileToolIT.java
@@ -20,12 +20,9 @@ package org.apache.iotdb.db.integration;
import org.apache.iotdb.RewriteTsFileTool;
import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.commons.utils.FileUtils;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.jdbc.Config;
@@ -66,34 +63,34 @@ public class IoTDBRewriteTsFileToolIT {
public void unload(Statement statement)
throws IllegalPathException, SQLException, StorageEngineException {
- for (TsFileResource resource :
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.sg"))
- .getSequenceFileList()) {
- if (tmpDir == null) {
- tmpDir =
- resource
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParent()
- + File.separator
- + "tmp";
- File tmpFile = new File(tmpDir);
- if (!tmpFile.exists()) {
- tmpFile.mkdirs();
- }
- }
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- for (TsFileResource resource :
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.sg"))
- .getUnSequenceFileList()) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
+ // for (TsFileResource resource :
+ // StorageEngine.getInstance()
+ // .getProcessor(new PartialPath("root.sg"))
+ // .getSequenceFileList()) {
+ // if (tmpDir == null) {
+ // tmpDir =
+ // resource
+ // .getTsFile()
+ // .getParentFile()
+ // .getParentFile()
+ // .getParentFile()
+ // .getParentFile()
+ // .getParent()
+ // + File.separator
+ // + "tmp";
+ // File tmpFile = new File(tmpDir);
+ // if (!tmpFile.exists()) {
+ // tmpFile.mkdirs();
+ // }
+ // }
+ // statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+ // }
+ // for (TsFileResource resource :
+ // StorageEngine.getInstance()
+ // .getProcessor(new PartialPath("root.sg"))
+ // .getUnSequenceFileList()) {
+ // statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+ // }
}
public void prepareTsFiles() throws Exception {
diff --git a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBSnapshotIT.java b/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBSnapshotIT.java
deleted file mode 100644
index 414d99a92e..0000000000
--- a/integration/src/test/java/org/apache/iotdb/db/integration/IoTDBSnapshotIT.java
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.integration;
-
-import org.apache.iotdb.commons.consensus.DataRegionId;
-import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.commons.exception.MetadataException;
-import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.constant.TestConstant;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.StorageEngineV2;
-import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
-import org.apache.iotdb.db.engine.snapshot.SnapshotLoader;
-import org.apache.iotdb.db.engine.snapshot.SnapshotTaker;
-import org.apache.iotdb.db.engine.snapshot.exception.DirectoryNotLegalException;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
-import org.apache.iotdb.db.exception.DataRegionException;
-import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.integration.env.EnvFactory;
-
-import org.apache.commons.io.FileUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.Map;
-
-public class IoTDBSnapshotIT {
- final String SG_NAME = "root.snapshotTest";
-
- @Before
- public void setUp() throws Exception {
- EnvFactory.getEnv().initBeforeTest();
- IoTDBDescriptor.getInstance().getConfig().setEnableCrossSpaceCompaction(false);
- }
-
- @After
- public void tearDown() throws Exception {
- EnvFactory.getEnv().cleanAfterTest();
- IoTDBDescriptor.getInstance().getConfig().setEnableCrossSpaceCompaction(true);
- }
-
- @Test
- public void testTakeSnapshot()
- throws SQLException, IllegalPathException, StorageEngineException, IOException,
- DirectoryNotLegalException, DataRegionException {
- try (Connection connection = EnvFactory.getEnv().getConnection();
- Statement statement = connection.createStatement()) {
- statement.execute("CREATE DATABASE " + SG_NAME);
- for (int i = 0; i < 10; ++i) {
- for (int j = 0; j < 10; ++j) {
- statement.execute(
- String.format("insert into %s.d%d(time, s) values (%d, %d)", SG_NAME, i, j, j));
- }
- statement.execute("flush");
- for (int j = 0; j < 10; ++j) {
- statement.execute(
- String.format("insert into %s.d%d(time, s) values (%d, %d)", SG_NAME, i, j, j + 1));
- }
- statement.execute("flush");
- }
-
- DataRegion region =
- StorageEngine.getInstance().getProcessor(new PartialPath(SG_NAME + ".d0.s"));
- File snapshotDir = new File(TestConstant.OUTPUT_DATA_DIR, "snapshot");
- if (snapshotDir.exists()) {
- FileUtils.forceDelete(snapshotDir);
- }
-
- new SnapshotTaker(region).takeFullSnapshot(snapshotDir.getAbsolutePath(), true);
-
- Assert.assertTrue(snapshotDir.exists());
- Assert.assertTrue(snapshotDir.isDirectory());
- File[] seqTsfiles =
- new File(
- snapshotDir.getAbsolutePath()
- + File.separator
- + "snapshot"
- + File.separator
- + "unsequence"
- + File.separator
- + "root.snapshotTest"
- + File.separator
- + "0"
- + File.separator
- + "0")
- .listFiles();
- File[] unseqTsfiles =
- new File(
- snapshotDir.getAbsolutePath()
- + File.separator
- + "snapshot"
- + File.separator
- + "sequence"
- + File.separator
- + "root.snapshotTest"
- + File.separator
- + "0"
- + File.separator
- + "0")
- .listFiles();
- Assert.assertNotNull(seqTsfiles);
- Assert.assertNotNull(unseqTsfiles);
- Assert.assertEquals(20, seqTsfiles.length);
- Assert.assertEquals(20, unseqTsfiles.length);
- }
- }
-
- @Test(expected = DirectoryNotLegalException.class)
- public void testTakeSnapshotInNotEmptyDir()
- throws SQLException, IOException, IllegalPathException, StorageEngineException,
- DirectoryNotLegalException, DataRegionException {
- try (Connection connection = EnvFactory.getEnv().getConnection();
- Statement statement = connection.createStatement()) {
- statement.execute("CREATE DATABASE " + SG_NAME);
- for (int i = 0; i < 10; ++i) {
- for (int j = 0; j < 10; ++j) {
- statement.execute(
- String.format("insert into %s.d%d(time, s) values (%d, %d)", SG_NAME, i, j, j));
- }
- statement.execute("flush");
- for (int j = 0; j < 10; ++j) {
- statement.execute(
- String.format("insert into %s.d%d(time, s) values (%d, %d)", SG_NAME, i, j, j + 1));
- }
- statement.execute("flush");
- }
-
- DataRegion region =
- StorageEngine.getInstance().getProcessor(new PartialPath(SG_NAME + ".d0.s"));
- File snapshotDir = new File(TestConstant.OUTPUT_DATA_DIR, "snapshot");
- if (!snapshotDir.exists()) {
- snapshotDir.mkdirs();
- }
-
- File tmpFile = new File(snapshotDir, "test");
- tmpFile.createNewFile();
-
- new SnapshotTaker(region).takeFullSnapshot(snapshotDir.getAbsolutePath(), true);
- }
- }
-
- @Test
- public void testLoadSnapshot()
- throws SQLException, MetadataException, StorageEngineException, DirectoryNotLegalException,
- IOException, DataRegionException {
- try (Connection connection = EnvFactory.getEnv().getConnection();
- Statement statement = connection.createStatement()) {
- Map<String, Integer> resultMap = new HashMap<>();
- statement.execute("CREATE DATABASE " + SG_NAME);
- for (int i = 0; i < 10; ++i) {
- for (int j = 0; j < 10; ++j) {
- statement.execute(
- String.format("insert into %s.d%d(time, s) values (%d, %d)", SG_NAME, i, j, j));
- }
- statement.execute("flush");
- for (int j = 0; j < 10; ++j) {
- statement.execute(
- String.format("insert into %s.d%d(time, s) values (%d, %d)", SG_NAME, i, j, j + 1));
- }
- statement.execute("flush");
- }
- ResultSet resultSet = statement.executeQuery("select ** from root");
- while (resultSet.next()) {
- long time = resultSet.getLong("Time");
- for (int i = 0; i < 10; ++i) {
- String measurment = SG_NAME + ".d" + i + ".s";
- int res = resultSet.getInt(SG_NAME + ".d" + i + ".s");
- resultMap.put(time + measurment, res);
- }
- }
-
- DataRegion region =
- StorageEngine.getInstance().getProcessor(new PartialPath(SG_NAME + ".d0.s"));
- File snapshotDir = new File(TestConstant.OUTPUT_DATA_DIR, "snapshot");
- if (!snapshotDir.exists()) {
- snapshotDir.mkdirs();
- }
- new SnapshotTaker(region).takeFullSnapshot(snapshotDir.getAbsolutePath(), true);
- StorageEngineV2.getInstance()
- .setDataRegion(
- new DataRegionId(0),
- new SnapshotLoader(snapshotDir.getAbsolutePath(), SG_NAME, "0")
- .loadSnapshotForStateMachine());
-
- ChunkCache.getInstance().clear();
- TimeSeriesMetadataCache.getInstance().clear();
- resultSet = statement.executeQuery("select ** from root");
- while (resultSet.next()) {
- long time = resultSet.getLong("Time");
- for (int i = 0; i < 10; ++i) {
- String measurment = SG_NAME + ".d" + i + ".s";
- int res = resultSet.getInt(SG_NAME + ".d" + i + ".s");
- Assert.assertEquals(resultMap.get(time + measurment).intValue(), res);
- }
- }
- }
- }
-
- @Test
- public void testTakeAndLoadSnapshotWhenCompaction()
- throws SQLException, MetadataException, StorageEngineException, InterruptedException,
- DirectoryNotLegalException, IOException, DataRegionException {
- try (Connection connection = EnvFactory.getEnv().getConnection();
- Statement statement = connection.createStatement()) {
- Map<String, Integer> resultMap = new HashMap<>();
- statement.execute("CREATE DATABASE " + SG_NAME);
- for (int i = 0; i < 10; ++i) {
- for (int j = 0; j < 10; ++j) {
- statement.execute(
- String.format("insert into %s.d%d(time, s) values (%d, %d)", SG_NAME, i, j, j));
- }
- statement.execute("flush");
- for (int j = 0; j < 10; ++j) {
- statement.execute(
- String.format("insert into %s.d%d(time, s) values (%d, %d)", SG_NAME, i, j, j + 1));
- }
- statement.execute("flush");
- }
-
- ResultSet resultSet = statement.executeQuery("select ** from root");
- while (resultSet.next()) {
- long time = resultSet.getLong("Time");
- for (int i = 0; i < 10; ++i) {
- String measurment = SG_NAME + ".d" + i + ".s";
- int res = resultSet.getInt(SG_NAME + ".d" + i + ".s");
- resultMap.put(time + measurment, res);
- }
- }
-
- File snapshotDir = new File(TestConstant.OUTPUT_DATA_DIR, "snapshot");
- if (!snapshotDir.exists()) {
- snapshotDir.mkdirs();
- }
- IoTDBDescriptor.getInstance().getConfig().setEnableCrossSpaceCompaction(true);
- statement.execute("merge");
- DataRegion region =
- StorageEngine.getInstance().getProcessor(new PartialPath(SG_NAME + ".d0.s"));
- new SnapshotTaker(region).takeFullSnapshot(snapshotDir.getAbsolutePath(), true);
- region.abortCompaction();
- StorageEngineV2.getInstance()
- .setDataRegion(
- new DataRegionId(0),
- new SnapshotLoader(snapshotDir.getAbsolutePath(), SG_NAME, "0")
- .loadSnapshotForStateMachine());
- ChunkCache.getInstance().clear();
- TimeSeriesMetadataCache.getInstance().clear();
- resultSet = statement.executeQuery("select ** from root");
- while (resultSet.next()) {
- long time = resultSet.getLong("Time");
- for (int i = 0; i < 10; ++i) {
- String measurment = SG_NAME + ".d" + i + ".s";
- int res = resultSet.getInt(SG_NAME + ".d" + i + ".s");
- Assert.assertEquals(resultMap.get(time + measurment).intValue(), res);
- }
- }
- }
- }
-}
diff --git a/integration/src/test/java/org/apache/iotdb/db/integration/aligned/IoTDBLoadExternalAlignedTsFileIT.java b/integration/src/test/java/org/apache/iotdb/db/integration/aligned/IoTDBLoadExternalAlignedTsFileIT.java
index cd65e4f2d1..29a345280c 100644
--- a/integration/src/test/java/org/apache/iotdb/db/integration/aligned/IoTDBLoadExternalAlignedTsFileIT.java
+++ b/integration/src/test/java/org/apache/iotdb/db/integration/aligned/IoTDBLoadExternalAlignedTsFileIT.java
@@ -17,1012 +17,1021 @@
* under the License.
*/
package org.apache.iotdb.db.integration.aligned;
-
-import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import org.apache.iotdb.db.engine.storagegroup.timeindex.TimeIndexLevel;
-import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.db.utils.EnvironmentUtils;
-import org.apache.iotdb.itbase.category.LocalStandaloneTest;
-import org.apache.iotdb.jdbc.Config;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-@Category({LocalStandaloneTest.class})
-public class IoTDBLoadExternalAlignedTsFileIT {
-
- private static final Logger LOGGER =
- LoggerFactory.getLogger(IoTDBLoadExternalAlignedTsFileIT.class);
-
- private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
-
- private static String[] insertSequenceSqls =
- new String[] {
- "CREATE DATABASE root.vehicle",
- "CREATE DATABASE root.test",
- "CREATE ALIGNED TIMESERIES root.vehicle.d0(s0 INT32 encoding=RLE, s1 TEXT encoding=PLAIN)",
- "CREATE ALIGNED TIMESERIES root.vehicle.d1(s2 FLOAT encoding=RLE, s3 BOOLEAN encoding=PLAIN)",
- "CREATE ALIGNED TIMESERIES root.test.d0(s0 INT32 encoding=RLE, s1 TEXT encoding=PLAIN)",
- "CREATE ALIGNED TIMESERIES root.test.d1.g0(s0 INT32 encoding=RLE)",
- "insert into root.vehicle.d0(timestamp,s0) aligned values(10,100)",
- "insert into root.vehicle.d0(timestamp,s0,s1) aligned values(12,101,'102')",
- "insert into root.vehicle.d0(timestamp,s1) aligned values(19,'103')",
- "insert into root.vehicle.d1(timestamp,s2) aligned values(11,104.0)",
- "insert into root.vehicle.d1(timestamp,s2,s3) aligned values(15,105.0,true)",
- "insert into root.vehicle.d1(timestamp,s3) aligned values(17,false)",
- "insert into root.vehicle.d0(timestamp,s0) aligned values(20,1000)",
- "insert into root.test.d0(timestamp,s0) aligned values(10,106)",
- "insert into root.test.d0(timestamp,s0,s1) aligned values(14,107,'108')",
- "insert into root.test.d0(timestamp,s1) aligned values(16,'109')",
- "insert into root.test.d1.g0(timestamp,s0) aligned values(1,110)",
- "insert into root.test.d0(timestamp,s0) aligned values(30,1006)",
- "insert into root.test.d0(timestamp,s0,s1) aligned values(34,1007,'1008')",
- "insert into root.test.d0(timestamp,s1) aligned values(36,'1090')",
- "insert into root.test.d1.g0(timestamp,s0) aligned values(10,1100)",
- "flush",
- "insert into root.test.d0(timestamp,s0) aligned values(150,126)",
- "insert into root.test.d0(timestamp,s0,s1) aligned values(80,127,'128')",
- "insert into root.test.d0(timestamp,s1) aligned values(200,'129')",
- "insert into root.test.d1.g0(timestamp,s0) aligned values(140,430)",
- "insert into root.test.d0(timestamp,s0) aligned values(150,426)",
- "flush"
- };
-
- private static String[] insertUnsequenceSqls =
- new String[] {
- "insert into root.vehicle.d0(timestamp,s0) aligned values(6,120)",
- "insert into root.vehicle.d0(timestamp,s0,s1) aligned values(38,121,'122')",
- "insert into root.vehicle.d0(timestamp,s1) aligned values(9,'123')",
- "insert into root.vehicle.d0(timestamp,s0) aligned values(16,128)",
- "insert into root.vehicle.d0(timestamp,s0,s1) aligned values(18,189,'198')",
- "insert into root.vehicle.d0(timestamp,s1) aligned values(99,'1234')",
- "insert into root.vehicle.d1(timestamp,s2) aligned values(14,1024.0)",
- "insert into root.vehicle.d1(timestamp,s2,s3) aligned values(29,1205.0,true)",
- "insert into root.vehicle.d1(timestamp,s3) aligned values(33,true)",
- "insert into root.test.d0(timestamp,s0) aligned values(45,126)",
- "insert into root.test.d0(timestamp,s0,s1) aligned values(68,127,'128')",
- "insert into root.test.d0(timestamp,s1) aligned values(78,'129')",
- "insert into root.test.d1.g0(timestamp,s0) aligned values(14,430)",
- "flush",
- "insert into root.test.d0(timestamp,s0) aligned values(20,426)",
- "insert into root.test.d0(timestamp,s0,s1) aligned values(13,427,'528')",
- "insert into root.test.d0(timestamp,s1) aligned values(2,'1209')",
- "insert into root.test.d1.g0(timestamp,s0) aligned values(4,330)",
- "flush",
- };
-
- private static String[] deleteTimeseiresSqls =
- new String[] {"delete from root.vehicle.** where time >= 10 and time<=20", "flush"};
-
- private static final String TIMESTAMP_STR = "Time";
- private static final String VEHICLE_D0_S0_STR = "root.vehicle.d0.s0";
- private static final String VEHICLE_D0_S1_STR = "root.vehicle.d0.s1";
- private static final String VEHICLE_D0_S2_STR = "root.vehicle.d1.s2";
- private static final String VEHICLE_D0_S3_STR = "root.vehicle.d1.s3";
- private static final String TEST_D0_S0_STR = "root.test.d0.s0";
- private static final String TEST_D0_S1_STR = "root.test.d0.s1";
- private static final String TEST_D1_STR = "root.test.d1.g0.s0";
-
- private int prevVirtualPartitionNum;
- private int prevCompactionThread;
-
- private static String[] deleteSqls =
- new String[] {"DELETE DATABASE root.vehicle", "DELETE DATABASE root.test"};
-
- @Before
- public void setUp() throws Exception {
- prevVirtualPartitionNum = IoTDBDescriptor.getInstance().getConfig().getDataRegionNum();
- IoTDBDescriptor.getInstance().getConfig().setDataRegionNum(1);
- prevCompactionThread = IoTDBDescriptor.getInstance().getConfig().getCompactionThreadCount();
- EnvironmentUtils.envSetUp();
- Class.forName(Config.JDBC_DRIVER_NAME);
- prepareData(insertSequenceSqls);
- }
-
- @After
- public void tearDown() throws Exception {
- EnvironmentUtils.cleanEnv();
- IoTDBDescriptor.getInstance().getConfig().setCompactionThreadCount(prevCompactionThread);
- IoTDBDescriptor.getInstance().getConfig().setDataRegionNum(prevVirtualPartitionNum);
- }
-
- @Test
- public void unloadTsfileTest() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- File tmpDir =
- new File(
- resources.get(0).getTsFile().getParentFile().getParentFile(),
- "tmp" + File.separator + new PartialPath("root.vehicle"));
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- assertEquals(
- 0,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
- assertNotNull(tmpDir.listFiles());
- assertEquals(1, tmpDir.listFiles().length >> 1);
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- tmpDir =
- new File(
- resources.get(0).getTsFile().getParentFile().getParentFile(),
- "tmp" + File.separator + new PartialPath("root.test"));
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- // test unload using relative path
- statement.execute(
- String.format("unload '%s' '%s'", "./" + resource.getTsFilePath(), tmpDir));
- }
- assertEquals(
- 0,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- assertNotNull(tmpDir.listFiles());
- assertEquals(2, tmpDir.listFiles().length >> 1);
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void loadSequenceTsfileTest() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- File tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp"
- + File.separator
- + new PartialPath("root.vehicle")
- + File.separator
- + "0"
- + File.separator
- + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp"
- + File.separator
- + new PartialPath("root.test")
- + File.separator
- + "0"
- + File.separator
- + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // load all tsfile in tmp dir
- tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp");
- statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- assertNotNull(tmpDir.listFiles());
- assertEquals(
- 0,
- new File(
- tmpDir,
- new PartialPath("root.vehicle") + File.separator + "0" + File.separator + "0")
- .listFiles()
- .length);
- assertEquals(
- 0,
- new File(
- tmpDir,
- new PartialPath("root.test") + File.separator + "0" + File.separator + "0")
- .listFiles()
- .length);
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void loadUnsequenceTsfileTest() throws SQLException {
- prepareData(insertUnsequenceSqls);
- String[] queryRes =
- new String[] {
- "1,null,null,null,null,null,null,110",
- "2,null,null,null,null,null,1209,null",
- "4,null,null,null,null,null,null,330",
- "6,120,null,null,null,null,null,null",
- "9,null,123,null,null,null,null,null",
- "10,100,null,null,null,106,null,1100",
- "11,null,null,104.0,null,null,null,null",
- "12,101,102,null,null,null,null,null",
- "13,null,null,null,null,427,528,null",
- "14,null,null,1024.0,null,107,108,430",
- "15,null,null,105.0,true,null,null,null",
- "16,128,null,null,null,null,109,null",
- "17,null,null,null,false,null,null,null",
- "18,189,198,null,null,null,null,null",
- "19,null,103,null,null,null,null,null",
- "20,1000,null,null,null,426,null,null",
- "29,null,null,1205.0,true,null,null,null",
- "30,null,null,null,null,1006,null,null",
- "33,null,null,null,true,null,null,null",
- "34,null,null,null,null,1007,1008,null",
- "36,null,null,null,null,null,1090,null",
- "38,121,122,null,null,null,null,null",
- "45,null,null,null,null,126,null,null",
- "68,null,null,null,null,127,128,null",
- "78,null,null,null,null,null,129,null",
- "80,null,null,null,null,127,128,null",
- "99,null,1234,null,null,null,null,null",
- "140,null,null,null,null,null,null,430",
- "150,null,null,null,null,426,null,null",
- "200,null,null,null,null,null,129,null"
- };
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // check query result
- boolean hasResultSet = statement.execute("SELECT * FROM root.**");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- int cnt = 0;
- while (resultSet.next()) {
- String queryString =
- resultSet.getString(TIMESTAMP_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S0_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S1_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S2_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S3_STR)
- + ","
- + resultSet.getString(TEST_D0_S0_STR)
- + ","
- + resultSet.getString(TEST_D0_S1_STR)
- + ","
- + resultSet.getString(TEST_D1_STR);
- Assert.assertEquals(queryRes[cnt++], queryString);
- }
- }
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- File tmpDir =
- new File(
- resources.get(0).getTsFile().getParentFile().getParentFile().getParentFile(),
- "tmp" + File.separator + new PartialPath("root.vehicle") + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getUnSequenceFileList());
- assertEquals(1, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- tmpDir = new File(tmpDir.getParentFile().getParentFile(), "root.test" + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList());
- assertEquals(2, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // load all tsfile in tmp dir
- tmpDir = tmpDir.getParentFile().getParentFile();
- statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
- assertEquals(
- 1,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getUnSequenceFileList()
- .size());
- if (config.getTimeIndexLevel().equals(TimeIndexLevel.DEVICE_TIME_INDEX)) {
- if (StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList()
- .size()
- == 1) {
- assertEquals(
- 3,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- } else {
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- }
- } else if (config.getTimeIndexLevel().equals(TimeIndexLevel.FILE_TIME_INDEX)) {
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList()
- .size());
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- }
- assertNotNull(tmpDir.listFiles());
- assertEquals(
- 0,
- new File(tmpDir, new PartialPath("root.vehicle") + File.separator + "0")
- .listFiles()
- .length);
- assertEquals(
- 0,
- new File(tmpDir, new PartialPath("root.test") + File.separator + "0").listFiles().length);
-
- // check query result
- hasResultSet = statement.execute("SELECT * FROM root.**");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- int cnt = 0;
- while (resultSet.next()) {
- String queryString =
- resultSet.getString(TIMESTAMP_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S0_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S1_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S2_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S3_STR)
- + ","
- + resultSet.getString(TEST_D0_S0_STR)
- + ","
- + resultSet.getString(TEST_D0_S1_STR)
- + ","
- + resultSet.getString(TEST_D1_STR);
- Assert.assertEquals(queryRes[cnt++], queryString);
- }
- }
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void loadTsfileWithModsTest() throws SQLException {
- prepareData(insertUnsequenceSqls);
- prepareData(deleteTimeseiresSqls);
- String[] queryRes =
- new String[] {
- "1,null,null,null,null,null,null,110",
- "2,null,null,null,null,null,1209,null",
- "4,null,null,null,null,null,null,330",
- "6,120,null,null,null,null,null,null",
- "9,null,123,null,null,null,null,null",
- "10,null,null,null,null,106,null,1100",
- "13,null,null,null,null,427,528,null",
- "14,null,null,null,null,107,108,430",
- "16,null,null,null,null,null,109,null",
- "20,null,null,null,null,426,null,null",
- "29,null,null,1205.0,true,null,null,null",
- "30,null,null,null,null,1006,null,null",
- "33,null,null,null,true,null,null,null",
- "34,null,null,null,null,1007,1008,null",
- "36,null,null,null,null,null,1090,null",
- "38,121,122,null,null,null,null,null",
- "45,null,null,null,null,126,null,null",
- "68,null,null,null,null,127,128,null",
- "78,null,null,null,null,null,129,null",
- "80,null,null,null,null,127,128,null",
- "99,null,1234,null,null,null,null,null",
- "140,null,null,null,null,null,null,430",
- "150,null,null,null,null,426,null,null",
- "200,null,null,null,null,null,129,null"
- };
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // check query result
- boolean hasResultSet = statement.execute("SELECT * FROM root.**");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- int cnt = 0;
- while (resultSet.next()) {
- String queryString =
- resultSet.getString(TIMESTAMP_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S0_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S1_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S2_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S3_STR)
- + ","
- + resultSet.getString(TEST_D0_S0_STR)
- + ","
- + resultSet.getString(TEST_D0_S1_STR)
- + ","
- + resultSet.getString(TEST_D1_STR);
- Assert.assertEquals(queryRes[cnt++], queryString);
- }
- }
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- File tmpDir =
- new File(
- resources.get(0).getTsFile().getParentFile().getParentFile().getParentFile(),
- "tmp" + File.separator + new PartialPath("root.vehicle") + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getUnSequenceFileList());
- assertEquals(1, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- tmpDir = new File(tmpDir.getParentFile().getParentFile(), "root.test" + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList());
- assertEquals(2, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // load all tsfile in tmp dir
- tmpDir = tmpDir.getParentFile().getParentFile();
- statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
- assertEquals(
- 1,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getUnSequenceFileList()
- .size());
- if (config.getTimeIndexLevel().equals(TimeIndexLevel.DEVICE_TIME_INDEX)) {
- if (StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList()
- .size()
- == 1) {
- assertEquals(
- 3,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- } else {
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- }
- } else if (config.getTimeIndexLevel().equals(TimeIndexLevel.FILE_TIME_INDEX)) {
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getUnSequenceFileList()
- .size());
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- }
- assertNotNull(tmpDir.listFiles());
- assertEquals(
- 0,
- new File(tmpDir, new PartialPath("root.vehicle") + File.separator + "0")
- .listFiles()
- .length);
- assertEquals(
- 0,
- new File(tmpDir, new PartialPath("root.test") + File.separator + "0").listFiles().length);
-
- // check query result
- hasResultSet = statement.execute("SELECT * FROM root.**");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- int cnt = 0;
- while (resultSet.next()) {
- String queryString =
- resultSet.getString(TIMESTAMP_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S0_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S1_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S2_STR)
- + ","
- + resultSet.getString(VEHICLE_D0_S3_STR)
- + ","
- + resultSet.getString(TEST_D0_S0_STR)
- + ","
- + resultSet.getString(TEST_D0_S1_STR)
- + ","
- + resultSet.getString(TEST_D1_STR);
- Assert.assertEquals(queryRes[cnt++], queryString);
- }
- }
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- @Test
- public void loadTsFileTestWithAutoCreateSchema() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- // move root.vehicle
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
-
- File tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp"
- + File.separator
- + "root.vehicle"
- + File.separator
- + "0"
- + File.separator
- + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- // move root.test
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- tmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp" + File.separator + "root.test" + File.separator + "0" + File.separator + "0");
- if (!tmpDir.exists()) {
- tmpDir.mkdirs();
- }
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
- }
-
- Set<String> expectedSet =
- new HashSet<>(
- Arrays.asList(
- "root.vehicle.d0.s0,root.vehicle,INT32",
- "root.vehicle.d0.s1,root.vehicle,TEXT",
- "root.vehicle.d1.s2,root.vehicle,FLOAT",
- "root.vehicle.d1.s3,root.vehicle,BOOLEAN",
- "root.test.d0.s0,root.test,INT32",
- "root.test.d0.s1,root.test,TEXT",
- "root.test.d1.g0.s0,root.test,INT32"));
-
- boolean hasResultSet = statement.execute("SHOW timeseries");
- Assert.assertTrue(hasResultSet);
- try (ResultSet resultSet = statement.getResultSet()) {
- while (resultSet.next()) {
- Assert.assertTrue(
- expectedSet.contains(
- resultSet.getString(1)
- + ","
- + resultSet.getString(3)
- + ","
- + resultSet.getString(4)));
- }
- }
-
- // remove metadata
- for (String sql : deleteSqls) {
- statement.execute(sql);
- }
-
- // test not load metadata automatically, it will occur errors.
- // UPDATE: load grammar is updated in 0.14, change this into load metadata automatically
- boolean hasError = false;
- try {
- statement.execute(String.format("load '%s' sglevel=1", tmpDir.getAbsolutePath()));
- } catch (Exception e) {
- hasError = true;
- }
- Assert.assertFalse(hasError);
-
- // test load metadata automatically, it will succeed.
- tmpDir = tmpDir.getParentFile().getParentFile().getParentFile();
- statement.execute(String.format("load '%s' sglevel=1", tmpDir.getAbsolutePath()));
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- assertEquals(2, tmpDir.listFiles().length);
- for (File dir : tmpDir.listFiles()) {
- assertEquals(0, dir.listFiles()[0].listFiles()[0].listFiles().length);
- }
- } catch (StorageEngineException | IllegalPathException e) {
- e.printStackTrace();
- Assert.fail();
- }
- }
-
- @Test
- public void loadTsFileTestWithVerifyMetadata() throws Exception {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- File vehicleTmpDir =
- new File(
- resources
- .get(0)
- .getTsFile()
- .getParentFile()
- .getParentFile()
- .getParentFile()
- .getParentFile(),
- "tmp" + File.separator + "root.vehicle");
- if (!vehicleTmpDir.exists()) {
- vehicleTmpDir.mkdirs();
- }
-
- for (TsFileResource resource : resources) {
- statement.execute(
- String.format("unload '%s' '%s'", resource.getTsFilePath(), vehicleTmpDir));
- }
-
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
-
- File testTmpDir = new File(vehicleTmpDir.getParentFile(), "root.test");
- if (!testTmpDir.exists()) {
- testTmpDir.mkdirs();
- }
-
- for (TsFileResource resource : resources) {
- statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), testTmpDir));
- }
-
- for (String sql : deleteSqls) {
- statement.execute(sql);
- }
-
- List<String> metaDataSqls =
- new ArrayList<>(
- Arrays.asList(
- "CREATE DATABASE root.vehicle",
- "CREATE DATABASE root.test",
- "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64, ENCODING=RLE",
- "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
- "CREATE TIMESERIES root.vehicle.d1.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
- "CREATE TIMESERIES root.vehicle.d1.s3 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
- "CREATE ALIGNED TIMESERIES root.test.d0(s0 FLOAT encoding=RLE, s1 TEXT encoding=PLAIN)",
- "CREATE ALIGNED TIMESERIES root.test.d1.g0(s0 INT32 encoding=RLE)"));
-
- for (String sql : metaDataSqls) {
- statement.execute(sql);
- }
-
- // load vehicle
- boolean hasError = false;
- try {
- statement.execute(String.format("load '%s'", vehicleTmpDir));
- } catch (Exception e) {
- hasError = true;
- assertTrue(
- e.getMessage()
- .contains("is aligned in the loading TsFile but is not aligned in IoTDB."));
- }
- assertTrue(hasError);
-
- statement.execute(String.format("load '%s' verify=false", vehicleTmpDir));
- assertEquals(
- 1,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
-
- // load test
- hasError = false;
- try {
- statement.execute(String.format("load '%s'", testTmpDir));
- } catch (Exception e) {
- hasError = true;
- assertTrue(
- e.getMessage()
- .contains(
- "because root.test.d0.s0 is INT32 in the loading TsFile but is FLOAT in IoTDB."));
- }
- assertTrue(hasError);
-
- statement.execute(String.format("load '%s' verify=false", testTmpDir));
- assertEquals(
- 2,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
-
- } catch (Exception e) {
- e.printStackTrace();
- Assert.fail();
- }
- }
-
- @Test
- public void removeTsFileTest() throws SQLException {
- try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
- List<TsFileResource> resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList());
- assertEquals(1, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("remove '%s'", resource.getTsFilePath()));
- }
- assertEquals(
- 0,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.vehicle"))
- .getSequenceFileList()
- .size());
-
- resources =
- new ArrayList<>(
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList());
- assertEquals(2, resources.size());
- for (TsFileResource resource : resources) {
- statement.execute(String.format("remove '%s'", resource.getTsFilePath()));
- }
- assertEquals(
- 0,
- StorageEngine.getInstance()
- .getProcessor(new PartialPath("root.test"))
- .getSequenceFileList()
- .size());
- } catch (StorageEngineException | IllegalPathException e) {
- Assert.fail();
- }
- }
-
- private void prepareData(String[] sqls) {
- try (Connection connection =
- DriverManager.getConnection(
- Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
- Statement statement = connection.createStatement()) {
-
- for (String sql : sqls) {
- statement.execute(sql);
- }
-
- } catch (Exception e) {
- LOGGER.error("Can not execute sql.", e);
- fail();
- }
- }
-}
+//
+// import org.apache.iotdb.commons.exception.IllegalPathException;
+// import org.apache.iotdb.commons.path.PartialPath;
+// import org.apache.iotdb.db.conf.IoTDBConfig;
+// import org.apache.iotdb.db.conf.IoTDBDescriptor;
+// import org.apache.iotdb.db.engine.StorageEngine;
+// import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+// import org.apache.iotdb.db.engine.storagegroup.timeindex.TimeIndexLevel;
+// import org.apache.iotdb.db.exception.StorageEngineException;
+// import org.apache.iotdb.db.utils.EnvironmentUtils;
+// import org.apache.iotdb.itbase.category.LocalStandaloneTest;
+// import org.apache.iotdb.jdbc.Config;
+//
+// import org.junit.After;
+// import org.junit.Assert;
+// import org.junit.Before;
+// import org.junit.Test;
+// import org.junit.experimental.categories.Category;
+// import org.slf4j.Logger;
+// import org.slf4j.LoggerFactory;
+//
+// import java.io.File;
+// import java.sql.Connection;
+// import java.sql.DriverManager;
+// import java.sql.ResultSet;
+// import java.sql.SQLException;
+// import java.sql.Statement;
+// import java.util.ArrayList;
+// import java.util.Arrays;
+// import java.util.HashSet;
+// import java.util.List;
+// import java.util.Set;
+//
+// import static org.junit.Assert.assertEquals;
+// import static org.junit.Assert.assertNotNull;
+// import static org.junit.Assert.assertTrue;
+// import static org.junit.Assert.fail;
+//
+// @Category({LocalStandaloneTest.class})
+// public class IoTDBLoadExternalAlignedTsFileIT {
+//
+// private static final Logger LOGGER =
+// LoggerFactory.getLogger(IoTDBLoadExternalAlignedTsFileIT.class);
+//
+// private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
+//
+// private static String[] insertSequenceSqls =
+// new String[] {
+// "CREATE DATABASE root.vehicle",
+// "CREATE DATABASE root.test",
+// "CREATE ALIGNED TIMESERIES root.vehicle.d0(s0 INT32 encoding=RLE, s1 TEXT
+// encoding=PLAIN)",
+// "CREATE ALIGNED TIMESERIES root.vehicle.d1(s2 FLOAT encoding=RLE, s3 BOOLEAN
+// encoding=PLAIN)",
+// "CREATE ALIGNED TIMESERIES root.test.d0(s0 INT32 encoding=RLE, s1 TEXT encoding=PLAIN)",
+// "CREATE ALIGNED TIMESERIES root.test.d1.g0(s0 INT32 encoding=RLE)",
+// "insert into root.vehicle.d0(timestamp,s0) aligned values(10,100)",
+// "insert into root.vehicle.d0(timestamp,s0,s1) aligned values(12,101,'102')",
+// "insert into root.vehicle.d0(timestamp,s1) aligned values(19,'103')",
+// "insert into root.vehicle.d1(timestamp,s2) aligned values(11,104.0)",
+// "insert into root.vehicle.d1(timestamp,s2,s3) aligned values(15,105.0,true)",
+// "insert into root.vehicle.d1(timestamp,s3) aligned values(17,false)",
+// "insert into root.vehicle.d0(timestamp,s0) aligned values(20,1000)",
+// "insert into root.test.d0(timestamp,s0) aligned values(10,106)",
+// "insert into root.test.d0(timestamp,s0,s1) aligned values(14,107,'108')",
+// "insert into root.test.d0(timestamp,s1) aligned values(16,'109')",
+// "insert into root.test.d1.g0(timestamp,s0) aligned values(1,110)",
+// "insert into root.test.d0(timestamp,s0) aligned values(30,1006)",
+// "insert into root.test.d0(timestamp,s0,s1) aligned values(34,1007,'1008')",
+// "insert into root.test.d0(timestamp,s1) aligned values(36,'1090')",
+// "insert into root.test.d1.g0(timestamp,s0) aligned values(10,1100)",
+// "flush",
+// "insert into root.test.d0(timestamp,s0) aligned values(150,126)",
+// "insert into root.test.d0(timestamp,s0,s1) aligned values(80,127,'128')",
+// "insert into root.test.d0(timestamp,s1) aligned values(200,'129')",
+// "insert into root.test.d1.g0(timestamp,s0) aligned values(140,430)",
+// "insert into root.test.d0(timestamp,s0) aligned values(150,426)",
+// "flush"
+// };
+//
+// private static String[] insertUnsequenceSqls =
+// new String[] {
+// "insert into root.vehicle.d0(timestamp,s0) aligned values(6,120)",
+// "insert into root.vehicle.d0(timestamp,s0,s1) aligned values(38,121,'122')",
+// "insert into root.vehicle.d0(timestamp,s1) aligned values(9,'123')",
+// "insert into root.vehicle.d0(timestamp,s0) aligned values(16,128)",
+// "insert into root.vehicle.d0(timestamp,s0,s1) aligned values(18,189,'198')",
+// "insert into root.vehicle.d0(timestamp,s1) aligned values(99,'1234')",
+// "insert into root.vehicle.d1(timestamp,s2) aligned values(14,1024.0)",
+// "insert into root.vehicle.d1(timestamp,s2,s3) aligned values(29,1205.0,true)",
+// "insert into root.vehicle.d1(timestamp,s3) aligned values(33,true)",
+// "insert into root.test.d0(timestamp,s0) aligned values(45,126)",
+// "insert into root.test.d0(timestamp,s0,s1) aligned values(68,127,'128')",
+// "insert into root.test.d0(timestamp,s1) aligned values(78,'129')",
+// "insert into root.test.d1.g0(timestamp,s0) aligned values(14,430)",
+// "flush",
+// "insert into root.test.d0(timestamp,s0) aligned values(20,426)",
+// "insert into root.test.d0(timestamp,s0,s1) aligned values(13,427,'528')",
+// "insert into root.test.d0(timestamp,s1) aligned values(2,'1209')",
+// "insert into root.test.d1.g0(timestamp,s0) aligned values(4,330)",
+// "flush",
+// };
+//
+// private static String[] deleteTimeseiresSqls =
+// new String[] {"delete from root.vehicle.** where time >= 10 and time<=20", "flush"};
+//
+// private static final String TIMESTAMP_STR = "Time";
+// private static final String VEHICLE_D0_S0_STR = "root.vehicle.d0.s0";
+// private static final String VEHICLE_D0_S1_STR = "root.vehicle.d0.s1";
+// private static final String VEHICLE_D0_S2_STR = "root.vehicle.d1.s2";
+// private static final String VEHICLE_D0_S3_STR = "root.vehicle.d1.s3";
+// private static final String TEST_D0_S0_STR = "root.test.d0.s0";
+// private static final String TEST_D0_S1_STR = "root.test.d0.s1";
+// private static final String TEST_D1_STR = "root.test.d1.g0.s0";
+//
+// private int prevVirtualPartitionNum;
+// private int prevCompactionThread;
+//
+// private static String[] deleteSqls =
+// new String[] {"DELETE DATABASE root.vehicle", "DELETE DATABASE root.test"};
+//
+// @Before
+// public void setUp() throws Exception {
+// prevVirtualPartitionNum = IoTDBDescriptor.getInstance().getConfig().getDataRegionNum();
+// IoTDBDescriptor.getInstance().getConfig().setDataRegionNum(1);
+// prevCompactionThread = IoTDBDescriptor.getInstance().getConfig().getCompactionThreadCount();
+// EnvironmentUtils.envSetUp();
+// Class.forName(Config.JDBC_DRIVER_NAME);
+// prepareData(insertSequenceSqls);
+// }
+//
+// @After
+// public void tearDown() throws Exception {
+// EnvironmentUtils.cleanEnv();
+// IoTDBDescriptor.getInstance().getConfig().setCompactionThreadCount(prevCompactionThread);
+// IoTDBDescriptor.getInstance().getConfig().setDataRegionNum(prevVirtualPartitionNum);
+// }
+//
+// @Test
+// public void unloadTsfileTest() throws SQLException {
+// try (Connection connection =
+// DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+// Statement statement = connection.createStatement()) {
+//
+// // move root.vehicle
+// List<TsFileResource> resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+// assertEquals(1, resources.size());
+// File tmpDir =
+// new File(
+// resources.get(0).getTsFile().getParentFile().getParentFile(),
+// "tmp" + File.separator + new PartialPath("root.vehicle"));
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+// assertEquals(
+// 0,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList()
+// .size());
+// assertNotNull(tmpDir.listFiles());
+// assertEquals(1, tmpDir.listFiles().length >> 1);
+//
+// // move root.test
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+// tmpDir =
+// new File(
+// resources.get(0).getTsFile().getParentFile().getParentFile(),
+// "tmp" + File.separator + new PartialPath("root.test"));
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// // test unload using relative path
+// statement.execute(
+// String.format("unload '%s' '%s'", "./" + resource.getTsFilePath(), tmpDir));
+// }
+// assertEquals(
+// 0,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+// assertNotNull(tmpDir.listFiles());
+// assertEquals(2, tmpDir.listFiles().length >> 1);
+// } catch (StorageEngineException | IllegalPathException e) {
+// Assert.fail();
+// }
+// }
+//
+// @Test
+// public void loadSequenceTsfileTest() throws SQLException {
+// try (Connection connection =
+// DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+// Statement statement = connection.createStatement()) {
+//
+// // move root.vehicle
+// List<TsFileResource> resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+// File tmpDir =
+// new File(
+// resources
+// .get(0)
+// .getTsFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile(),
+// "tmp"
+// + File.separator
+// + new PartialPath("root.vehicle")
+// + File.separator
+// + "0"
+// + File.separator
+// + "0");
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+//
+// // move root.test
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// tmpDir =
+// new File(
+// resources
+// .get(0)
+// .getTsFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile(),
+// "tmp"
+// + File.separator
+// + new PartialPath("root.test")
+// + File.separator
+// + "0"
+// + File.separator
+// + "0");
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+//
+// // load all tsfile in tmp dir
+// tmpDir =
+// new File(
+// resources
+// .get(0)
+// .getTsFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile(),
+// "tmp");
+// statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+// assertEquals(1, resources.size());
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+// assertNotNull(tmpDir.listFiles());
+// assertEquals(
+// 0,
+// new File(
+// tmpDir,
+// new PartialPath("root.vehicle") + File.separator + "0" + File.separator + "0")
+// .listFiles()
+// .length);
+// assertEquals(
+// 0,
+// new File(
+// tmpDir,
+// new PartialPath("root.test") + File.separator + "0" + File.separator + "0")
+// .listFiles()
+// .length);
+// } catch (StorageEngineException | IllegalPathException e) {
+// Assert.fail();
+// }
+// }
+//
+// @Test
+// public void loadUnsequenceTsfileTest() throws SQLException {
+// prepareData(insertUnsequenceSqls);
+// String[] queryRes =
+// new String[] {
+// "1,null,null,null,null,null,null,110",
+// "2,null,null,null,null,null,1209,null",
+// "4,null,null,null,null,null,null,330",
+// "6,120,null,null,null,null,null,null",
+// "9,null,123,null,null,null,null,null",
+// "10,100,null,null,null,106,null,1100",
+// "11,null,null,104.0,null,null,null,null",
+// "12,101,102,null,null,null,null,null",
+// "13,null,null,null,null,427,528,null",
+// "14,null,null,1024.0,null,107,108,430",
+// "15,null,null,105.0,true,null,null,null",
+// "16,128,null,null,null,null,109,null",
+// "17,null,null,null,false,null,null,null",
+// "18,189,198,null,null,null,null,null",
+// "19,null,103,null,null,null,null,null",
+// "20,1000,null,null,null,426,null,null",
+// "29,null,null,1205.0,true,null,null,null",
+// "30,null,null,null,null,1006,null,null",
+// "33,null,null,null,true,null,null,null",
+// "34,null,null,null,null,1007,1008,null",
+// "36,null,null,null,null,null,1090,null",
+// "38,121,122,null,null,null,null,null",
+// "45,null,null,null,null,126,null,null",
+// "68,null,null,null,null,127,128,null",
+// "78,null,null,null,null,null,129,null",
+// "80,null,null,null,null,127,128,null",
+// "99,null,1234,null,null,null,null,null",
+// "140,null,null,null,null,null,null,430",
+// "150,null,null,null,null,426,null,null",
+// "200,null,null,null,null,null,129,null"
+// };
+// try (Connection connection =
+// DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+// Statement statement = connection.createStatement()) {
+//
+// // check query result
+// boolean hasResultSet = statement.execute("SELECT * FROM root.**");
+// Assert.assertTrue(hasResultSet);
+// try (ResultSet resultSet = statement.getResultSet()) {
+// int cnt = 0;
+// while (resultSet.next()) {
+// String queryString =
+// resultSet.getString(TIMESTAMP_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S0_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S1_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S2_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S3_STR)
+// + ","
+// + resultSet.getString(TEST_D0_S0_STR)
+// + ","
+// + resultSet.getString(TEST_D0_S1_STR)
+// + ","
+// + resultSet.getString(TEST_D1_STR);
+// Assert.assertEquals(queryRes[cnt++], queryString);
+// }
+// }
+//
+// // move root.vehicle
+// List<TsFileResource> resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+// File tmpDir =
+// new File(
+// resources.get(0).getTsFile().getParentFile().getParentFile().getParentFile(),
+// "tmp" + File.separator + new PartialPath("root.vehicle") + File.separator + "0");
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getUnSequenceFileList());
+// assertEquals(1, resources.size());
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+//
+// // move root.test
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+// tmpDir = new File(tmpDir.getParentFile().getParentFile(), "root.test" + File.separator +
+// "0");
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getUnSequenceFileList());
+// assertEquals(2, resources.size());
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+//
+// // load all tsfile in tmp dir
+// tmpDir = tmpDir.getParentFile().getParentFile();
+// statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList()
+// .size());
+// assertEquals(
+// 1,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getUnSequenceFileList()
+// .size());
+// if (config.getTimeIndexLevel().equals(TimeIndexLevel.DEVICE_TIME_INDEX)) {
+// if (StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getUnSequenceFileList()
+// .size()
+// == 1) {
+// assertEquals(
+// 3,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+// } else {
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+// }
+// } else if (config.getTimeIndexLevel().equals(TimeIndexLevel.FILE_TIME_INDEX)) {
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getUnSequenceFileList()
+// .size());
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+// }
+// assertNotNull(tmpDir.listFiles());
+// assertEquals(
+// 0,
+// new File(tmpDir, new PartialPath("root.vehicle") + File.separator + "0")
+// .listFiles()
+// .length);
+// assertEquals(
+// 0,
+// new File(tmpDir, new PartialPath("root.test") + File.separator +
+// "0").listFiles().length);
+//
+// // check query result
+// hasResultSet = statement.execute("SELECT * FROM root.**");
+// Assert.assertTrue(hasResultSet);
+// try (ResultSet resultSet = statement.getResultSet()) {
+// int cnt = 0;
+// while (resultSet.next()) {
+// String queryString =
+// resultSet.getString(TIMESTAMP_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S0_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S1_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S2_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S3_STR)
+// + ","
+// + resultSet.getString(TEST_D0_S0_STR)
+// + ","
+// + resultSet.getString(TEST_D0_S1_STR)
+// + ","
+// + resultSet.getString(TEST_D1_STR);
+// Assert.assertEquals(queryRes[cnt++], queryString);
+// }
+// }
+// } catch (StorageEngineException | IllegalPathException e) {
+// Assert.fail();
+// }
+// }
+//
+// @Test
+// public void loadTsfileWithModsTest() throws SQLException {
+// prepareData(insertUnsequenceSqls);
+// prepareData(deleteTimeseiresSqls);
+// String[] queryRes =
+// new String[] {
+// "1,null,null,null,null,null,null,110",
+// "2,null,null,null,null,null,1209,null",
+// "4,null,null,null,null,null,null,330",
+// "6,120,null,null,null,null,null,null",
+// "9,null,123,null,null,null,null,null",
+// "10,null,null,null,null,106,null,1100",
+// "13,null,null,null,null,427,528,null",
+// "14,null,null,null,null,107,108,430",
+// "16,null,null,null,null,null,109,null",
+// "20,null,null,null,null,426,null,null",
+// "29,null,null,1205.0,true,null,null,null",
+// "30,null,null,null,null,1006,null,null",
+// "33,null,null,null,true,null,null,null",
+// "34,null,null,null,null,1007,1008,null",
+// "36,null,null,null,null,null,1090,null",
+// "38,121,122,null,null,null,null,null",
+// "45,null,null,null,null,126,null,null",
+// "68,null,null,null,null,127,128,null",
+// "78,null,null,null,null,null,129,null",
+// "80,null,null,null,null,127,128,null",
+// "99,null,1234,null,null,null,null,null",
+// "140,null,null,null,null,null,null,430",
+// "150,null,null,null,null,426,null,null",
+// "200,null,null,null,null,null,129,null"
+// };
+// try (Connection connection =
+// DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+// Statement statement = connection.createStatement()) {
+//
+// // check query result
+// boolean hasResultSet = statement.execute("SELECT * FROM root.**");
+// Assert.assertTrue(hasResultSet);
+// try (ResultSet resultSet = statement.getResultSet()) {
+// int cnt = 0;
+// while (resultSet.next()) {
+// String queryString =
+// resultSet.getString(TIMESTAMP_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S0_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S1_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S2_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S3_STR)
+// + ","
+// + resultSet.getString(TEST_D0_S0_STR)
+// + ","
+// + resultSet.getString(TEST_D0_S1_STR)
+// + ","
+// + resultSet.getString(TEST_D1_STR);
+// Assert.assertEquals(queryRes[cnt++], queryString);
+// }
+// }
+//
+// // move root.vehicle
+// List<TsFileResource> resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+// File tmpDir =
+// new File(
+// resources.get(0).getTsFile().getParentFile().getParentFile().getParentFile(),
+// "tmp" + File.separator + new PartialPath("root.vehicle") + File.separator + "0");
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getUnSequenceFileList());
+// assertEquals(1, resources.size());
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+//
+// // move root.test
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+// tmpDir = new File(tmpDir.getParentFile().getParentFile(), "root.test" + File.separator +
+// "0");
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getUnSequenceFileList());
+// assertEquals(2, resources.size());
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+//
+// // load all tsfile in tmp dir
+// tmpDir = tmpDir.getParentFile().getParentFile();
+// statement.execute(String.format("load '%s'", tmpDir.getAbsolutePath()));
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList()
+// .size());
+// assertEquals(
+// 1,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getUnSequenceFileList()
+// .size());
+// if (config.getTimeIndexLevel().equals(TimeIndexLevel.DEVICE_TIME_INDEX)) {
+// if (StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getUnSequenceFileList()
+// .size()
+// == 1) {
+// assertEquals(
+// 3,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+// } else {
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+// }
+// } else if (config.getTimeIndexLevel().equals(TimeIndexLevel.FILE_TIME_INDEX)) {
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getUnSequenceFileList()
+// .size());
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+// }
+// assertNotNull(tmpDir.listFiles());
+// assertEquals(
+// 0,
+// new File(tmpDir, new PartialPath("root.vehicle") + File.separator + "0")
+// .listFiles()
+// .length);
+// assertEquals(
+// 0,
+// new File(tmpDir, new PartialPath("root.test") + File.separator +
+// "0").listFiles().length);
+//
+// // check query result
+// hasResultSet = statement.execute("SELECT * FROM root.**");
+// Assert.assertTrue(hasResultSet);
+// try (ResultSet resultSet = statement.getResultSet()) {
+// int cnt = 0;
+// while (resultSet.next()) {
+// String queryString =
+// resultSet.getString(TIMESTAMP_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S0_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S1_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S2_STR)
+// + ","
+// + resultSet.getString(VEHICLE_D0_S3_STR)
+// + ","
+// + resultSet.getString(TEST_D0_S0_STR)
+// + ","
+// + resultSet.getString(TEST_D0_S1_STR)
+// + ","
+// + resultSet.getString(TEST_D1_STR);
+// Assert.assertEquals(queryRes[cnt++], queryString);
+// }
+// }
+// } catch (StorageEngineException | IllegalPathException e) {
+// Assert.fail();
+// }
+// }
+//
+// @Test
+// public void loadTsFileTestWithAutoCreateSchema() throws SQLException {
+// try (Connection connection =
+// DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+// Statement statement = connection.createStatement()) {
+//
+// // move root.vehicle
+// List<TsFileResource> resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+//
+// File tmpDir =
+// new File(
+// resources
+// .get(0)
+// .getTsFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile(),
+// "tmp"
+// + File.separator
+// + "root.vehicle"
+// + File.separator
+// + "0"
+// + File.separator
+// + "0");
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+//
+// // move root.test
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// tmpDir =
+// new File(
+// resources
+// .get(0)
+// .getTsFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile(),
+// "tmp" + File.separator + "root.test" + File.separator + "0" + File.separator + "0");
+// if (!tmpDir.exists()) {
+// tmpDir.mkdirs();
+// }
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(), tmpDir));
+// }
+//
+// Set<String> expectedSet =
+// new HashSet<>(
+// Arrays.asList(
+// "root.vehicle.d0.s0,root.vehicle,INT32",
+// "root.vehicle.d0.s1,root.vehicle,TEXT",
+// "root.vehicle.d1.s2,root.vehicle,FLOAT",
+// "root.vehicle.d1.s3,root.vehicle,BOOLEAN",
+// "root.test.d0.s0,root.test,INT32",
+// "root.test.d0.s1,root.test,TEXT",
+// "root.test.d1.g0.s0,root.test,INT32"));
+//
+// boolean hasResultSet = statement.execute("SHOW timeseries");
+// Assert.assertTrue(hasResultSet);
+// try (ResultSet resultSet = statement.getResultSet()) {
+// while (resultSet.next()) {
+// Assert.assertTrue(
+// expectedSet.contains(
+// resultSet.getString(1)
+// + ","
+// + resultSet.getString(3)
+// + ","
+// + resultSet.getString(4)));
+// }
+// }
+//
+// // remove metadata
+// for (String sql : deleteSqls) {
+// statement.execute(sql);
+// }
+//
+// // test not load metadata automatically, it will occur errors.
+// // UPDATE: load grammar is updated in 0.14, change this into load metadata automatically
+// boolean hasError = false;
+// try {
+// statement.execute(String.format("load '%s' sglevel=1", tmpDir.getAbsolutePath()));
+// } catch (Exception e) {
+// hasError = true;
+// }
+// Assert.assertFalse(hasError);
+//
+// // test load metadata automatically, it will succeed.
+// tmpDir = tmpDir.getParentFile().getParentFile().getParentFile();
+// statement.execute(String.format("load '%s' sglevel=1", tmpDir.getAbsolutePath()));
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+// assertEquals(1, resources.size());
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+// assertEquals(2, tmpDir.listFiles().length);
+// for (File dir : tmpDir.listFiles()) {
+// assertEquals(0, dir.listFiles()[0].listFiles()[0].listFiles().length);
+// }
+// } catch (StorageEngineException | IllegalPathException e) {
+// e.printStackTrace();
+// Assert.fail();
+// }
+// }
+//
+// @Test
+// public void loadTsFileTestWithVerifyMetadata() throws Exception {
+// try (Connection connection =
+// DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+// Statement statement = connection.createStatement()) {
+// List<TsFileResource> resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+// assertEquals(1, resources.size());
+// File vehicleTmpDir =
+// new File(
+// resources
+// .get(0)
+// .getTsFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile()
+// .getParentFile(),
+// "tmp" + File.separator + "root.vehicle");
+// if (!vehicleTmpDir.exists()) {
+// vehicleTmpDir.mkdirs();
+// }
+//
+// for (TsFileResource resource : resources) {
+// statement.execute(
+// String.format("unload '%s' '%s'", resource.getTsFilePath(), vehicleTmpDir));
+// }
+//
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+//
+// File testTmpDir = new File(vehicleTmpDir.getParentFile(), "root.test");
+// if (!testTmpDir.exists()) {
+// testTmpDir.mkdirs();
+// }
+//
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("unload '%s' '%s'", resource.getTsFilePath(),
+// testTmpDir));
+// }
+//
+// for (String sql : deleteSqls) {
+// statement.execute(sql);
+// }
+//
+// List<String> metaDataSqls =
+// new ArrayList<>(
+// Arrays.asList(
+// "CREATE DATABASE root.vehicle",
+// "CREATE DATABASE root.test",
+// "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64, ENCODING=RLE",
+// "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=TEXT, ENCODING=PLAIN",
+// "CREATE TIMESERIES root.vehicle.d1.s2 WITH DATATYPE=FLOAT, ENCODING=RLE",
+// "CREATE TIMESERIES root.vehicle.d1.s3 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
+// "CREATE ALIGNED TIMESERIES root.test.d0(s0 FLOAT encoding=RLE, s1 TEXT
+// encoding=PLAIN)",
+// "CREATE ALIGNED TIMESERIES root.test.d1.g0(s0 INT32 encoding=RLE)"));
+//
+// for (String sql : metaDataSqls) {
+// statement.execute(sql);
+// }
+//
+// // load vehicle
+// boolean hasError = false;
+// try {
+// statement.execute(String.format("load '%s'", vehicleTmpDir));
+// } catch (Exception e) {
+// hasError = true;
+// assertTrue(
+// e.getMessage()
+// .contains("is aligned in the loading TsFile but is not aligned in IoTDB."));
+// }
+// assertTrue(hasError);
+//
+// statement.execute(String.format("load '%s' verify=false", vehicleTmpDir));
+// assertEquals(
+// 1,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList()
+// .size());
+//
+// // load test
+// hasError = false;
+// try {
+// statement.execute(String.format("load '%s'", testTmpDir));
+// } catch (Exception e) {
+// hasError = true;
+// assertTrue(
+// e.getMessage()
+// .contains(
+// "because root.test.d0.s0 is INT32 in the loading TsFile but is FLOAT in
+// IoTDB."));
+// }
+// assertTrue(hasError);
+//
+// statement.execute(String.format("load '%s' verify=false", testTmpDir));
+// assertEquals(
+// 2,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+//
+// } catch (Exception e) {
+// e.printStackTrace();
+// Assert.fail();
+// }
+// }
+//
+// @Test
+// public void removeTsFileTest() throws SQLException {
+// try (Connection connection =
+// DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+// Statement statement = connection.createStatement()) {
+// List<TsFileResource> resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList());
+// assertEquals(1, resources.size());
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("remove '%s'", resource.getTsFilePath()));
+// }
+// assertEquals(
+// 0,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.vehicle"))
+// .getSequenceFileList()
+// .size());
+//
+// resources =
+// new ArrayList<>(
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList());
+// assertEquals(2, resources.size());
+// for (TsFileResource resource : resources) {
+// statement.execute(String.format("remove '%s'", resource.getTsFilePath()));
+// }
+// assertEquals(
+// 0,
+// StorageEngine.getInstance()
+// .getProcessor(new PartialPath("root.test"))
+// .getSequenceFileList()
+// .size());
+// } catch (StorageEngineException | IllegalPathException e) {
+// Assert.fail();
+// }
+// }
+//
+// private void prepareData(String[] sqls) {
+// try (Connection connection =
+// DriverManager.getConnection(
+// Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+// Statement statement = connection.createStatement()) {
+//
+// for (String sql : sqls) {
+// statement.execute(sql);
+// }
+//
+// } catch (Exception e) {
+// LOGGER.error("Can not execute sql.", e);
+// fail();
+// }
+// }
+// }
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index bba0dd92c5..85fcf857e3 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -29,7 +29,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TCQConfig;
import org.apache.iotdb.confignode.rpc.thrift.TGlobalConfig;
import org.apache.iotdb.confignode.rpc.thrift.TRatisConfig;
import org.apache.iotdb.db.conf.directories.DirectoryManager;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.compaction.constant.CompactionPriority;
import org.apache.iotdb.db.engine.compaction.constant.CrossCompactionPerformer;
import org.apache.iotdb.db.engine.compaction.constant.CrossCompactionSelector;
@@ -1431,7 +1431,7 @@ public class IoTDBDescriptor {
// update timed flush & close conf
loadTimedService(properties);
- StorageEngineV2.getInstance().rebootTimedService();
+ StorageEngine.getInstance().rebootTimedService();
long memTableSizeThreshold =
Long.parseLong(
diff --git a/server/src/main/java/org/apache/iotdb/db/consensus/DataRegionConsensusImpl.java b/server/src/main/java/org/apache/iotdb/db/consensus/DataRegionConsensusImpl.java
index af866ae6fc..f578631392 100644
--- a/server/src/main/java/org/apache/iotdb/db/consensus/DataRegionConsensusImpl.java
+++ b/server/src/main/java/org/apache/iotdb/db/consensus/DataRegionConsensusImpl.java
@@ -31,7 +31,7 @@ import org.apache.iotdb.consensus.config.RatisConfig.Snapshot;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.consensus.statemachine.DataRegionStateMachine;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.ratis.util.SizeInBytes;
import org.apache.ratis.util.TimeDuration;
@@ -164,7 +164,7 @@ public class DataRegionConsensusImpl {
.build(),
gid ->
new DataRegionStateMachine(
- StorageEngineV2.getInstance().getDataRegion((DataRegionId) gid)))
+ StorageEngine.getInstance().getDataRegion((DataRegionId) gid)))
.orElseThrow(
() ->
new IllegalArgumentException(
diff --git a/server/src/main/java/org/apache/iotdb/db/consensus/statemachine/DataRegionStateMachine.java b/server/src/main/java/org/apache/iotdb/db/consensus/statemachine/DataRegionStateMachine.java
index b40154288e..6bd5fab3d1 100644
--- a/server/src/main/java/org/apache/iotdb/db/consensus/statemachine/DataRegionStateMachine.java
+++ b/server/src/main/java/org/apache/iotdb/db/consensus/statemachine/DataRegionStateMachine.java
@@ -30,7 +30,7 @@ import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest;
import org.apache.iotdb.consensus.iot.wal.GetConsensusReqReaderPlan;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.consensus.statemachine.visitor.DataExecutionVisitor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.cache.BloomFilterCache;
import org.apache.iotdb.db.engine.cache.ChunkCache;
import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
@@ -141,7 +141,7 @@ public class DataRegionStateMachine extends BaseStateMachine {
}
this.region = newRegion;
try {
- StorageEngineV2.getInstance()
+ StorageEngine.getInstance()
.setDataRegion(new DataRegionId(Integer.parseInt(region.getDataRegionId())), region);
ChunkCache.getInstance().clear();
TimeSeriesMetadataCache.getInstance().clear();
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/StorageEngine.java b/server/src/main/java/org/apache/iotdb/db/engine/StorageEngine.java
index 426595fdcb..2323e720d4 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/StorageEngine.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/StorageEngine.java
@@ -18,34 +18,125 @@
*/
package org.apache.iotdb.db.engine;
-import org.apache.iotdb.commons.exception.MetadataException;
+import org.apache.iotdb.common.rpc.thrift.TFlushReq;
+import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.common.rpc.thrift.TSetTTLReq;
+import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory;
+import org.apache.iotdb.commons.concurrent.ThreadName;
+import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
+import org.apache.iotdb.commons.conf.CommonDescriptor;
+import org.apache.iotdb.commons.consensus.DataRegionId;
import org.apache.iotdb.commons.exception.ShutdownException;
-import org.apache.iotdb.commons.path.PartialPath;
+import org.apache.iotdb.commons.file.SystemFileFactory;
import org.apache.iotdb.commons.service.IService;
import org.apache.iotdb.commons.service.ServiceType;
+import org.apache.iotdb.commons.utils.TestOnly;
+import org.apache.iotdb.consensus.ConsensusFactory;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.consensus.statemachine.visitor.DataExecutionVisitor;
+import org.apache.iotdb.db.engine.flush.CloseFileListener;
+import org.apache.iotdb.db.engine.flush.FlushListener;
+import org.apache.iotdb.db.engine.flush.TsFileFlushPolicy;
+import org.apache.iotdb.db.engine.flush.TsFileFlushPolicy.DirectFlushPolicy;
+import org.apache.iotdb.db.engine.load.LoadTsFileManager;
import org.apache.iotdb.db.engine.storagegroup.DataRegion;
+import org.apache.iotdb.db.engine.storagegroup.TsFileProcessor;
import org.apache.iotdb.db.exception.DataRegionException;
+import org.apache.iotdb.db.exception.LoadFileException;
import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.db.metadata.mnode.IStorageGroupMNode;
-import org.apache.iotdb.db.service.IoTDB;
-import org.apache.iotdb.tsfile.utils.Pair;
+import org.apache.iotdb.db.exception.TsFileProcessorException;
+import org.apache.iotdb.db.exception.WriteProcessRejectException;
+import org.apache.iotdb.db.exception.runtime.StorageEngineFailureException;
+import org.apache.iotdb.db.mpp.plan.planner.plan.node.PlanNode;
+import org.apache.iotdb.db.mpp.plan.planner.plan.node.load.LoadTsFilePieceNode;
+import org.apache.iotdb.db.mpp.plan.scheduler.load.LoadTsFileScheduler;
+import org.apache.iotdb.db.rescon.SystemInfo;
+import org.apache.iotdb.db.sync.SyncService;
+import org.apache.iotdb.db.utils.ThreadUtils;
+import org.apache.iotdb.db.utils.UpgradeUtils;
+import org.apache.iotdb.db.wal.WALManager;
+import org.apache.iotdb.db.wal.exception.WALException;
+import org.apache.iotdb.db.wal.recover.WALRecoverManager;
+import org.apache.iotdb.rpc.RpcUtils;
+import org.apache.iotdb.rpc.TSStatusCode;
+import org.apache.iotdb.tsfile.exception.write.PageException;
+import org.apache.iotdb.tsfile.utils.FilePathUtils;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
-import java.util.Comparator;
+import java.util.ConcurrentModificationException;
import java.util.HashMap;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.stream.Collectors;
+import java.util.Objects;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.iotdb.commons.conf.IoTDBConstant.FILE_NAME_SEPARATOR;
public class StorageEngine implements IService {
private static final Logger logger = LoggerFactory.getLogger(StorageEngine.class);
private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
+ private static final long TTL_CHECK_INTERVAL = 60 * 1000L;
+
+ /** Time range for dividing database, the time unit is the same with IoTDB's TimestampPrecision */
+ private static long timePartitionInterval = -1;
+
+ /**
+ * a folder (system/databases/ by default) that persist system info. Each database will have a
+ * subfolder under the systemDir.
+ */
+ private final String systemDir =
+ FilePathUtils.regularizePath(config.getSystemDir()) + "databases";
+
+ /** DataRegionId -> DataRegion */
+ private final ConcurrentHashMap<DataRegionId, DataRegion> dataRegionMap =
+ new ConcurrentHashMap<>();
+
+ /** DataRegionId -> DataRegion which is being deleted */
+ private final ConcurrentHashMap<DataRegionId, DataRegion> deletingDataRegionMap =
+ new ConcurrentHashMap<>();
+
+ /** Database name -> ttl, for region recovery only */
+ private final Map<String, Long> ttlMapForRecover = new ConcurrentHashMap<>();
+
+ /** number of ready data region */
+ private AtomicInteger readyDataRegionNum;
+
+ private AtomicBoolean isAllSgReady = new AtomicBoolean(false);
+
+ private ScheduledExecutorService ttlCheckThread;
+ private ScheduledExecutorService seqMemtableTimedFlushCheckThread;
+ private ScheduledExecutorService unseqMemtableTimedFlushCheckThread;
+
+ private TsFileFlushPolicy fileFlushPolicy = new DirectFlushPolicy();
+ /** used to do short-lived asynchronous tasks */
+ private ExecutorService cachedThreadPool;
+ // add customized listeners here for flush and close events
+ private List<CloseFileListener> customCloseFileListeners = new ArrayList<>();
+ private List<FlushListener> customFlushListeners = new ArrayList<>();
+ private int recoverDataRegionNum = 0;
+
+ private LoadTsFileManager loadTsFileManager = new LoadTsFileManager();
private StorageEngine() {}
@@ -53,14 +144,291 @@ public class StorageEngine implements IService {
return InstanceHolder.INSTANCE;
}
+ private static void initTimePartition() {
+ timePartitionInterval = IoTDBDescriptor.getInstance().getConfig().getTimePartitionInterval();
+ }
+
+ public static long getTimePartitionInterval() {
+ if (timePartitionInterval == -1) {
+ initTimePartition();
+ }
+ return timePartitionInterval;
+ }
+
+ public static long getTimePartition(long time) {
+ if (timePartitionInterval == -1) {
+ initTimePartition();
+ }
+ return time / timePartitionInterval;
+ }
+
+ /** block insertion if the insertion is rejected by memory control */
+ public static void blockInsertionIfReject(TsFileProcessor tsFileProcessor)
+ throws WriteProcessRejectException {
+ long startTime = System.currentTimeMillis();
+ while (SystemInfo.getInstance().isRejected()) {
+ if (tsFileProcessor != null && tsFileProcessor.shouldFlush()) {
+ break;
+ }
+ try {
+ TimeUnit.MILLISECONDS.sleep(config.getCheckPeriodWhenInsertBlocked());
+ if (System.currentTimeMillis() - startTime > config.getMaxWaitingTimeWhenInsertBlocked()) {
+ throw new WriteProcessRejectException(
+ "System rejected over " + (System.currentTimeMillis() - startTime) + "ms");
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ public void updateTTLInfo(byte[] allTTLInformation) {
+ if (allTTLInformation == null) {
+ return;
+ }
+ ByteBuffer buffer = ByteBuffer.wrap(allTTLInformation);
+ int mapSize = ReadWriteIOUtils.readInt(buffer);
+ for (int i = 0; i < mapSize; i++) {
+ ttlMapForRecover.put(
+ Objects.requireNonNull(ReadWriteIOUtils.readString(buffer)),
+ ReadWriteIOUtils.readLong(buffer));
+ }
+ }
+
+ public boolean isAllSgReady() {
+ return isAllSgReady.get();
+ }
+
+ public void setAllSgReady(boolean allSgReady) {
+ isAllSgReady.set(allSgReady);
+ }
+
+ public void recover() {
+ setAllSgReady(false);
+ cachedThreadPool =
+ IoTDBThreadPoolFactory.newCachedThreadPool(
+ ThreadName.STORAGE_ENGINE_CACHED_SERVICE.getName());
+
+ List<Future<Void>> futures = new LinkedList<>();
+ asyncRecover(futures);
+
+ // wait until wal is recovered
+ if (!config.isClusterMode()
+ || !config.getDataRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) {
+ try {
+ WALRecoverManager.getInstance().recover();
+ } catch (WALException e) {
+ logger.error("Fail to recover wal.", e);
+ }
+ }
+
+ // operations after all data regions are recovered
+ Thread recoverEndTrigger =
+ new Thread(
+ () -> {
+ checkResults(futures, "StorageEngine failed to recover.");
+ setAllSgReady(true);
+ ttlMapForRecover.clear();
+ });
+ recoverEndTrigger.start();
+ }
+
+ private void asyncRecover(List<Future<Void>> futures) {
+ Map<String, List<DataRegionId>> localDataRegionInfo = getLocalDataRegionInfo();
+ localDataRegionInfo.values().forEach(list -> recoverDataRegionNum += list.size());
+ readyDataRegionNum = new AtomicInteger(0);
+ // init wal recover manager
+ WALRecoverManager.getInstance()
+ .setAllDataRegionScannedLatch(new CountDownLatch(recoverDataRegionNum));
+ for (Map.Entry<String, List<DataRegionId>> entry : localDataRegionInfo.entrySet()) {
+ String sgName = entry.getKey();
+ for (DataRegionId dataRegionId : entry.getValue()) {
+ Callable<Void> recoverDataRegionTask =
+ () -> {
+ DataRegion dataRegion = null;
+ try {
+ dataRegion =
+ buildNewDataRegion(
+ sgName,
+ dataRegionId,
+ ttlMapForRecover.getOrDefault(sgName, Long.MAX_VALUE));
+ } catch (DataRegionException e) {
+ logger.error(
+ "Failed to recover data region {}[{}]", sgName, dataRegionId.getId(), e);
+ }
+ dataRegionMap.put(dataRegionId, dataRegion);
+ logger.info(
+ "Data regions have been recovered {}/{}",
+ readyDataRegionNum.incrementAndGet(),
+ recoverDataRegionNum);
+ return null;
+ };
+ futures.add(cachedThreadPool.submit(recoverDataRegionTask));
+ }
+ }
+ }
+
+ /** get StorageGroup -> DataRegionIdList map from data/system directory. */
+ public Map<String, List<DataRegionId>> getLocalDataRegionInfo() {
+ File system = SystemFileFactory.INSTANCE.getFile(systemDir);
+ File[] sgDirs = system.listFiles();
+ Map<String, List<DataRegionId>> localDataRegionInfo = new HashMap<>();
+ if (sgDirs == null) {
+ return localDataRegionInfo;
+ }
+ for (File sgDir : sgDirs) {
+ if (!sgDir.isDirectory()) {
+ continue;
+ }
+ String sgName = sgDir.getName();
+ List<DataRegionId> dataRegionIdList = new ArrayList<>();
+ for (File dataRegionDir : sgDir.listFiles()) {
+ if (!dataRegionDir.isDirectory()) {
+ continue;
+ }
+ dataRegionIdList.add(new DataRegionId(Integer.parseInt(dataRegionDir.getName())));
+ }
+ localDataRegionInfo.put(sgName, dataRegionIdList);
+ }
+ return localDataRegionInfo;
+ }
+
@Override
- public void start() {}
+ public void start() {
+ // build time Interval to divide time partition
+ initTimePartition();
+ // create systemDir
+ try {
+ FileUtils.forceMkdir(SystemFileFactory.INSTANCE.getFile(systemDir));
+ } catch (IOException e) {
+ throw new StorageEngineFailureException(e);
+ }
+
+ // recover upgrade process
+ UpgradeUtils.recoverUpgrade();
+
+ recover();
+
+ ttlCheckThread = IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor("TTL-Check");
+ ScheduledExecutorUtil.safelyScheduleAtFixedRate(
+ ttlCheckThread,
+ this::checkTTL,
+ TTL_CHECK_INTERVAL,
+ TTL_CHECK_INTERVAL,
+ TimeUnit.MILLISECONDS);
+ logger.info("start ttl check thread successfully.");
+
+ startTimedService();
+ }
+
+ private void checkTTL() {
+ try {
+ for (DataRegion dataRegion : dataRegionMap.values()) {
+ if (dataRegion != null) {
+ dataRegion.checkFilesTTL();
+ }
+ }
+ } catch (ConcurrentModificationException e) {
+ // ignore
+ } catch (Exception e) {
+ logger.error("An error occurred when checking TTL", e);
+ }
+ }
+
+ private void startTimedService() {
+ // timed flush sequence memtable
+ if (config.isEnableTimedFlushSeqMemtable()) {
+ seqMemtableTimedFlushCheckThread =
+ IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor(
+ ThreadName.TIMED_FlUSH_SEQ_MEMTABLE.getName());
+ ScheduledExecutorUtil.safelyScheduleAtFixedRate(
+ seqMemtableTimedFlushCheckThread,
+ this::timedFlushSeqMemTable,
+ config.getSeqMemtableFlushCheckInterval(),
+ config.getSeqMemtableFlushCheckInterval(),
+ TimeUnit.MILLISECONDS);
+ logger.info("start sequence memtable timed flush check thread successfully.");
+ }
+ // timed flush unsequence memtable
+ if (config.isEnableTimedFlushUnseqMemtable()) {
+ unseqMemtableTimedFlushCheckThread =
+ IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor(
+ ThreadName.TIMED_FlUSH_UNSEQ_MEMTABLE.getName());
+ ScheduledExecutorUtil.safelyScheduleAtFixedRate(
+ unseqMemtableTimedFlushCheckThread,
+ this::timedFlushUnseqMemTable,
+ config.getUnseqMemtableFlushCheckInterval(),
+ config.getUnseqMemtableFlushCheckInterval(),
+ TimeUnit.MILLISECONDS);
+ logger.info("start unsequence memtable timed flush check thread successfully.");
+ }
+ }
+
+ private void timedFlushSeqMemTable() {
+ for (DataRegion dataRegion : dataRegionMap.values()) {
+ if (dataRegion != null) {
+ dataRegion.timedFlushSeqMemTable();
+ }
+ }
+ }
+
+ private void timedFlushUnseqMemTable() {
+ for (DataRegion dataRegion : dataRegionMap.values()) {
+ if (dataRegion != null) {
+ dataRegion.timedFlushUnseqMemTable();
+ }
+ }
+ }
@Override
- public void stop() {}
+ public void stop() {
+ for (DataRegion dataRegion : dataRegionMap.values()) {
+ if (dataRegion != null) {
+ ThreadUtils.stopThreadPool(
+ dataRegion.getTimedCompactionScheduleTask(), ThreadName.COMPACTION_SCHEDULE);
+ }
+ }
+ syncCloseAllProcessor();
+ ThreadUtils.stopThreadPool(ttlCheckThread, ThreadName.TTL_CHECK_SERVICE);
+ ThreadUtils.stopThreadPool(
+ seqMemtableTimedFlushCheckThread, ThreadName.TIMED_FlUSH_SEQ_MEMTABLE);
+ ThreadUtils.stopThreadPool(
+ unseqMemtableTimedFlushCheckThread, ThreadName.TIMED_FlUSH_UNSEQ_MEMTABLE);
+ if (cachedThreadPool != null) {
+ cachedThreadPool.shutdownNow();
+ }
+ dataRegionMap.clear();
+ }
@Override
- public void shutdown(long milliseconds) throws ShutdownException {}
+ public void shutdown(long milliseconds) throws ShutdownException {
+ try {
+ for (DataRegion dataRegion : dataRegionMap.values()) {
+ ThreadUtils.stopThreadPool(
+ dataRegion.getTimedCompactionScheduleTask(), ThreadName.COMPACTION_SCHEDULE);
+ }
+ forceCloseAllProcessor();
+ } catch (TsFileProcessorException e) {
+ throw new ShutdownException(e);
+ }
+ shutdownTimedService(ttlCheckThread, "TTlCheckThread");
+ shutdownTimedService(seqMemtableTimedFlushCheckThread, "SeqMemtableTimedFlushCheckThread");
+ shutdownTimedService(unseqMemtableTimedFlushCheckThread, "UnseqMemtableTimedFlushCheckThread");
+ cachedThreadPool.shutdownNow();
+ dataRegionMap.clear();
+ }
+
+ private void shutdownTimedService(ScheduledExecutorService pool, String poolName) {
+ if (pool != null) {
+ pool.shutdownNow();
+ try {
+ pool.awaitTermination(30, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ logger.warn("{} still doesn't exit after 30s", poolName);
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
@Override
public ServiceType getID() {
@@ -68,56 +436,377 @@ public class StorageEngine implements IService {
}
/**
- * This method is for insert and query or sth like them, this may get a virtual database
+ * build a new data region
*
- * @param path device path
- * @return database processor
+ * @param dataRegionId data region id e.g. 1
+ * @param logicalStorageGroupName database name e.g. root.sg1
*/
- public DataRegion getProcessor(PartialPath path) throws StorageEngineException {
- try {
- IStorageGroupMNode storageGroupMNode = IoTDB.schemaProcessor.getStorageGroupNodeByPath(path);
- return getStorageGroupProcessorByPath(path, storageGroupMNode);
- } catch (DataRegionException | MetadataException e) {
- throw new StorageEngineException(e);
+ public DataRegion buildNewDataRegion(
+ String logicalStorageGroupName, DataRegionId dataRegionId, long ttl)
+ throws DataRegionException {
+ DataRegion dataRegion;
+ logger.info(
+ "construct a data region instance, the database is {}, Thread is {}",
+ logicalStorageGroupName,
+ Thread.currentThread().getId());
+ dataRegion =
+ new DataRegion(
+ systemDir + File.separator + logicalStorageGroupName,
+ String.valueOf(dataRegionId.getId()),
+ fileFlushPolicy,
+ logicalStorageGroupName);
+ dataRegion.setDataTTLWithTimePrecisionCheck(ttl);
+ dataRegion.setCustomFlushListeners(customFlushListeners);
+ dataRegion.setCustomCloseFileListeners(customCloseFileListeners);
+ return dataRegion;
+ }
+
+ /** Write data into DataRegion. For standalone mode only. */
+ public TSStatus write(DataRegionId groupId, PlanNode planNode) {
+ return planNode.accept(new DataExecutionVisitor(), dataRegionMap.get(groupId));
+ }
+
+ /** This function is just for unit test. */
+ @TestOnly
+ public synchronized void reset() {
+ dataRegionMap.clear();
+ }
+
+ /** flush command Sync asyncCloseOneProcessor all file node processors. */
+ public void syncCloseAllProcessor() {
+ logger.info("Start closing all database processor");
+ List<Future<Void>> tasks = new ArrayList<>();
+ for (DataRegion dataRegion : dataRegionMap.values()) {
+ if (dataRegion != null) {
+ tasks.add(
+ cachedThreadPool.submit(
+ () -> {
+ dataRegion.syncCloseAllWorkingTsFileProcessors();
+ return null;
+ }));
+ }
+ }
+ checkResults(tasks, "Failed to sync close processor.");
+ }
+
+ public void forceCloseAllProcessor() throws TsFileProcessorException {
+ logger.info("Start force closing all database processor");
+ List<Future<Void>> tasks = new ArrayList<>();
+ for (DataRegion dataRegion : dataRegionMap.values()) {
+ if (dataRegion != null) {
+ tasks.add(
+ cachedThreadPool.submit(
+ () -> {
+ dataRegion.forceCloseAllWorkingTsFileProcessors();
+ return null;
+ }));
+ }
+ }
+ checkResults(tasks, "Failed to force close processor.");
+ }
+
+ public void closeStorageGroupProcessor(String storageGroupPath, boolean isSeq) {
+ List<Future<Void>> tasks = new ArrayList<>();
+ for (DataRegion dataRegion : dataRegionMap.values()) {
+ if (dataRegion.getStorageGroupName().equals(storageGroupPath)) {
+ if (isSeq) {
+ for (TsFileProcessor tsFileProcessor : dataRegion.getWorkSequenceTsFileProcessors()) {
+ tasks.add(
+ cachedThreadPool.submit(
+ () -> {
+ dataRegion.syncCloseOneTsFileProcessor(isSeq, tsFileProcessor);
+ return null;
+ }));
+ }
+ } else {
+ for (TsFileProcessor tsFileProcessor : dataRegion.getWorkUnsequenceTsFileProcessors()) {
+ tasks.add(
+ cachedThreadPool.submit(
+ () -> {
+ dataRegion.syncCloseOneTsFileProcessor(isSeq, tsFileProcessor);
+ return null;
+ }));
+ }
+ }
+ }
+ }
+ checkResults(tasks, "Failed to close database processor.");
+ }
+
+ private <V> void checkResults(List<Future<V>> tasks, String errorMsg) {
+ for (Future<V> task : tasks) {
+ try {
+ task.get();
+ } catch (ExecutionException e) {
+ throw new StorageEngineFailureException(errorMsg, e);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new StorageEngineFailureException(errorMsg, e);
+ }
}
}
/**
- * get database processor by device path
+ * merge all databases.
*
- * @param devicePath path of the device
- * @param storageGroupMNode mnode of the storage group, we need synchronize this to avoid
- * modification in mtree
- * @return found or new storage group processor
+ * @throws StorageEngineException StorageEngineException
*/
- private DataRegion getStorageGroupProcessorByPath(
- PartialPath devicePath, IStorageGroupMNode storageGroupMNode) throws DataRegionException {
- return null;
+ public void mergeAll() throws StorageEngineException {
+ if (CommonDescriptor.getInstance().getConfig().isReadOnly()) {
+ throw new StorageEngineException("Current system mode is read only, does not support merge");
+ }
+ dataRegionMap.values().forEach(DataRegion::compact);
+ }
+
+ public TSStatus operateFlush(TFlushReq req) {
+ if (req.storageGroups == null) {
+ StorageEngine.getInstance().syncCloseAllProcessor();
+ WALManager.getInstance().deleteOutdatedWALFiles();
+ } else {
+ for (String storageGroup : req.storageGroups) {
+ if (req.isSeq == null) {
+ StorageEngine.getInstance().closeStorageGroupProcessor(storageGroup, true);
+ StorageEngine.getInstance().closeStorageGroupProcessor(storageGroup, false);
+ } else {
+ StorageEngine.getInstance()
+ .closeStorageGroupProcessor(storageGroup, Boolean.parseBoolean(req.isSeq));
+ }
+ }
+ }
+ return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS);
+ }
+
+ public void setTTL(List<DataRegionId> dataRegionIdList, long dataTTL) {
+ for (DataRegionId dataRegionId : dataRegionIdList) {
+ DataRegion dataRegion = dataRegionMap.get(dataRegionId);
+ if (dataRegion != null) {
+ dataRegion.setDataTTLWithTimePrecisionCheck(dataTTL);
+ }
+ }
+ }
+
+ /**
+ * Add a listener to listen flush start/end events. Notice that this addition only applies to
+ * TsFileProcessors created afterwards.
+ *
+ * @param listener
+ */
+ public void registerFlushListener(FlushListener listener) {
+ customFlushListeners.add(listener);
+ }
+
+ /**
+ * Add a listener to listen file close events. Notice that this addition only applies to
+ * TsFileProcessors created afterwards.
+ *
+ * @param listener
+ */
+ public void registerCloseFileListener(CloseFileListener listener) {
+ customCloseFileListeners.add(listener);
+ }
+
+ private void makeSureNoOldRegion(DataRegionId regionId) {
+ while (deletingDataRegionMap.containsKey(regionId)) {
+ DataRegion oldRegion = deletingDataRegionMap.get(regionId);
+ if (oldRegion != null) {
+ oldRegion.waitForDeleted();
+ }
+ }
+ }
+
+ // When registering a new region, the coordinator needs to register the corresponding region with
+ // the local engine before adding the corresponding consensusGroup to the consensus layer
+ public DataRegion createDataRegion(DataRegionId regionId, String sg, long ttl)
+ throws DataRegionException {
+ makeSureNoOldRegion(regionId);
+ AtomicReference<DataRegionException> exceptionAtomicReference = new AtomicReference<>(null);
+ DataRegion dataRegion =
+ dataRegionMap.computeIfAbsent(
+ regionId,
+ x -> {
+ try {
+ return buildNewDataRegion(sg, x, ttl);
+ } catch (DataRegionException e) {
+ exceptionAtomicReference.set(e);
+ }
+ return null;
+ });
+ if (exceptionAtomicReference.get() != null) {
+ throw exceptionAtomicReference.get();
+ }
+ return dataRegion;
+ }
+
+ public void deleteDataRegion(DataRegionId regionId) {
+ if (!dataRegionMap.containsKey(regionId) || deletingDataRegionMap.containsKey(regionId)) {
+ return;
+ }
+ DataRegion region =
+ deletingDataRegionMap.computeIfAbsent(regionId, k -> dataRegionMap.remove(regionId));
+ if (region != null) {
+ region.markDeleted();
+ try {
+ region.abortCompaction();
+ region.syncDeleteDataFiles();
+ region.deleteFolder(systemDir);
+ if (config.isClusterMode()
+ && config
+ .getDataRegionConsensusProtocolClass()
+ .equals(ConsensusFactory.IOT_CONSENSUS)) {
+ WALManager.getInstance()
+ .deleteWALNode(
+ region.getStorageGroupName() + FILE_NAME_SEPARATOR + region.getDataRegionId());
+ }
+ SyncService.getInstance().unregisterDataRegion(region.getDataRegionId());
+ } catch (Exception e) {
+ logger.error(
+ "Error occurs when deleting data region {}-{}",
+ region.getStorageGroupName(),
+ region.getDataRegionId(),
+ e);
+ } finally {
+ deletingDataRegionMap.remove(regionId);
+ }
+ }
+ }
+
+ public DataRegion getDataRegion(DataRegionId regionId) {
+ return dataRegionMap.get(regionId);
+ }
+
+ public List<DataRegion> getAllDataRegions() {
+ return new ArrayList<>(dataRegionMap.values());
+ }
+
+ public List<DataRegionId> getAllDataRegionIds() {
+ return new ArrayList<>(dataRegionMap.keySet());
+ }
+
+ /** This method is not thread-safe */
+ public void setDataRegion(DataRegionId regionId, DataRegion newRegion) {
+ if (dataRegionMap.containsKey(regionId)) {
+ DataRegion oldRegion = dataRegionMap.get(regionId);
+ oldRegion.syncCloseAllWorkingTsFileProcessors();
+ oldRegion.abortCompaction();
+ }
+ dataRegionMap.put(regionId, newRegion);
}
- /** get all merge lock of the storage group processor related to the query */
- public Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> mergeLock(
- List<PartialPath> pathList) throws StorageEngineException {
- Map<DataRegion, List<PartialPath>> map = new HashMap<>();
- for (PartialPath path : pathList) {
- map.computeIfAbsent(getProcessor(path.getDevicePath()), key -> new ArrayList<>()).add(path);
+ public TSStatus setTTL(TSetTTLReq req) {
+ Map<String, List<DataRegionId>> localDataRegionInfo =
+ StorageEngine.getInstance().getLocalDataRegionInfo();
+ List<DataRegionId> dataRegionIdList = new ArrayList<>();
+ req.storageGroupPathPattern.forEach(
+ storageGroup -> dataRegionIdList.addAll(localDataRegionInfo.get(storageGroup)));
+ for (DataRegionId dataRegionId : dataRegionIdList) {
+ DataRegion dataRegion = dataRegionMap.get(dataRegionId);
+ if (dataRegion != null) {
+ dataRegion.setDataTTLWithTimePrecisionCheck(req.TTL);
+ }
}
- List<DataRegion> list =
- map.keySet().stream()
- .sorted(Comparator.comparing(DataRegion::getDataRegionId))
- .collect(Collectors.toList());
- list.forEach(DataRegion::readLock);
+ return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS);
+ }
+
+ public TsFileFlushPolicy getFileFlushPolicy() {
+ return fileFlushPolicy;
+ }
+
+ public TSStatus writeLoadTsFileNode(
+ DataRegionId dataRegionId, LoadTsFilePieceNode pieceNode, String uuid) {
+ TSStatus status = new TSStatus();
+
+ try {
+ loadTsFileManager.writeToDataRegion(getDataRegion(dataRegionId), pieceNode, uuid);
+ } catch (PageException e) {
+ logger.error(
+ String.format(
+ "Parse Page error when writing piece node of TsFile %s to DataRegion %s.",
+ pieceNode.getTsFile(), dataRegionId),
+ e);
+ status.setCode(TSStatusCode.LOAD_PIECE_OF_TSFILE_ERROR.getStatusCode());
+ status.setMessage(e.getMessage());
+ return status;
+ } catch (IOException e) {
+ logger.error(
+ String.format(
+ "IO error when writing piece node of TsFile %s to DataRegion %s.",
+ pieceNode.getTsFile(), dataRegionId),
+ e);
+ status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode());
+ status.setMessage(e.getMessage());
+ return status;
+ }
+
+ return RpcUtils.SUCCESS_STATUS;
+ }
- return new Pair<>(list, map);
+ public TSStatus executeLoadCommand(LoadTsFileScheduler.LoadCommand loadCommand, String uuid) {
+ TSStatus status = new TSStatus();
+
+ try {
+ switch (loadCommand) {
+ case EXECUTE:
+ if (loadTsFileManager.loadAll(uuid)) {
+ status = RpcUtils.SUCCESS_STATUS;
+ } else {
+ status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode());
+ status.setMessage(
+ String.format(
+ "No load TsFile uuid %s recorded for execute load command %s.",
+ uuid, loadCommand));
+ }
+ break;
+ case ROLLBACK:
+ if (loadTsFileManager.deleteAll(uuid)) {
+ status = RpcUtils.SUCCESS_STATUS;
+ } else {
+ status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode());
+ status.setMessage(
+ String.format(
+ "No load TsFile uuid %s recorded for execute load command %s.",
+ uuid, loadCommand));
+ }
+ break;
+ default:
+ status.setCode(TSStatusCode.ILLEGAL_PARAMETER.getStatusCode());
+ status.setMessage(String.format("Wrong load command %s.", loadCommand));
+ }
+ } catch (IOException | LoadFileException e) {
+ logger.error(String.format("Execute load command %s error.", loadCommand), e);
+ status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode());
+ status.setMessage(e.getMessage());
+ }
+
+ return status;
}
- /** unlock all merge lock of the storage group processor related to the query */
- public void mergeUnLock(List<DataRegion> list) {
- list.forEach(DataRegion::readUnlock);
+ /** reboot timed flush sequence/unsequence memetable thread */
+ public void rebootTimedService() throws ShutdownException {
+ logger.info("Start rebooting all timed service.");
+
+ // exclude ttl check thread
+ stopTimedServiceAndThrow(seqMemtableTimedFlushCheckThread, "SeqMemtableTimedFlushCheckThread");
+ stopTimedServiceAndThrow(
+ unseqMemtableTimedFlushCheckThread, "UnseqMemtableTimedFlushCheckThread");
+
+ logger.info("Stop all timed service successfully, and now restart them.");
+
+ startTimedService();
+
+ logger.info("Reboot all timed service successfully");
}
- public String getStorageGroupPath(PartialPath selectedPath) {
- return null;
+ private void stopTimedServiceAndThrow(ScheduledExecutorService pool, String poolName)
+ throws ShutdownException {
+ if (pool != null) {
+ pool.shutdownNow();
+ try {
+ pool.awaitTermination(30, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ logger.warn("{} still doesn't exit after 30s", poolName);
+ throw new ShutdownException(e);
+ }
+ }
}
static class InstanceHolder {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java b/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java
deleted file mode 100644
index e5aa77d06b..0000000000
--- a/server/src/main/java/org/apache/iotdb/db/engine/StorageEngineV2.java
+++ /dev/null
@@ -1,820 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.engine;
-
-import org.apache.iotdb.common.rpc.thrift.TFlushReq;
-import org.apache.iotdb.common.rpc.thrift.TSStatus;
-import org.apache.iotdb.common.rpc.thrift.TSetTTLReq;
-import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory;
-import org.apache.iotdb.commons.concurrent.ThreadName;
-import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
-import org.apache.iotdb.commons.conf.CommonDescriptor;
-import org.apache.iotdb.commons.consensus.DataRegionId;
-import org.apache.iotdb.commons.exception.ShutdownException;
-import org.apache.iotdb.commons.file.SystemFileFactory;
-import org.apache.iotdb.commons.service.IService;
-import org.apache.iotdb.commons.service.ServiceType;
-import org.apache.iotdb.commons.utils.TestOnly;
-import org.apache.iotdb.consensus.ConsensusFactory;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.consensus.statemachine.visitor.DataExecutionVisitor;
-import org.apache.iotdb.db.engine.flush.CloseFileListener;
-import org.apache.iotdb.db.engine.flush.FlushListener;
-import org.apache.iotdb.db.engine.flush.TsFileFlushPolicy;
-import org.apache.iotdb.db.engine.flush.TsFileFlushPolicy.DirectFlushPolicy;
-import org.apache.iotdb.db.engine.load.LoadTsFileManager;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
-import org.apache.iotdb.db.engine.storagegroup.TsFileProcessor;
-import org.apache.iotdb.db.exception.DataRegionException;
-import org.apache.iotdb.db.exception.LoadFileException;
-import org.apache.iotdb.db.exception.StorageEngineException;
-import org.apache.iotdb.db.exception.TsFileProcessorException;
-import org.apache.iotdb.db.exception.WriteProcessRejectException;
-import org.apache.iotdb.db.exception.runtime.StorageEngineFailureException;
-import org.apache.iotdb.db.mpp.plan.planner.plan.node.PlanNode;
-import org.apache.iotdb.db.mpp.plan.planner.plan.node.load.LoadTsFilePieceNode;
-import org.apache.iotdb.db.mpp.plan.scheduler.load.LoadTsFileScheduler;
-import org.apache.iotdb.db.rescon.SystemInfo;
-import org.apache.iotdb.db.sync.SyncService;
-import org.apache.iotdb.db.utils.ThreadUtils;
-import org.apache.iotdb.db.utils.UpgradeUtils;
-import org.apache.iotdb.db.wal.WALManager;
-import org.apache.iotdb.db.wal.exception.WALException;
-import org.apache.iotdb.db.wal.recover.WALRecoverManager;
-import org.apache.iotdb.rpc.RpcUtils;
-import org.apache.iotdb.rpc.TSStatusCode;
-import org.apache.iotdb.tsfile.exception.write.PageException;
-import org.apache.iotdb.tsfile.utils.FilePathUtils;
-import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
-
-import org.apache.commons.io.FileUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.ConcurrentModificationException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.iotdb.commons.conf.IoTDBConstant.FILE_NAME_SEPARATOR;
-
-public class StorageEngineV2 implements IService {
- private static final Logger logger = LoggerFactory.getLogger(StorageEngineV2.class);
-
- private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
- private static final long TTL_CHECK_INTERVAL = 60 * 1000L;
-
- /** Time range for dividing database, the time unit is the same with IoTDB's TimestampPrecision */
- private static long timePartitionInterval = -1;
-
- /**
- * a folder (system/databases/ by default) that persist system info. Each database will have a
- * subfolder under the systemDir.
- */
- private final String systemDir =
- FilePathUtils.regularizePath(config.getSystemDir()) + "databases";
-
- /** DataRegionId -> DataRegion */
- private final ConcurrentHashMap<DataRegionId, DataRegion> dataRegionMap =
- new ConcurrentHashMap<>();
-
- /** DataRegionId -> DataRegion which is being deleted */
- private final ConcurrentHashMap<DataRegionId, DataRegion> deletingDataRegionMap =
- new ConcurrentHashMap<>();
-
- /** Database name -> ttl, for region recovery only */
- private final Map<String, Long> ttlMapForRecover = new ConcurrentHashMap<>();
-
- /** number of ready data region */
- private AtomicInteger readyDataRegionNum;
-
- private AtomicBoolean isAllSgReady = new AtomicBoolean(false);
-
- private ScheduledExecutorService ttlCheckThread;
- private ScheduledExecutorService seqMemtableTimedFlushCheckThread;
- private ScheduledExecutorService unseqMemtableTimedFlushCheckThread;
-
- private TsFileFlushPolicy fileFlushPolicy = new DirectFlushPolicy();
- /** used to do short-lived asynchronous tasks */
- private ExecutorService cachedThreadPool;
- // add customized listeners here for flush and close events
- private List<CloseFileListener> customCloseFileListeners = new ArrayList<>();
- private List<FlushListener> customFlushListeners = new ArrayList<>();
- private int recoverDataRegionNum = 0;
-
- private LoadTsFileManager loadTsFileManager = new LoadTsFileManager();
-
- private StorageEngineV2() {}
-
- public static StorageEngineV2 getInstance() {
- return InstanceHolder.INSTANCE;
- }
-
- private static void initTimePartition() {
- timePartitionInterval = IoTDBDescriptor.getInstance().getConfig().getTimePartitionInterval();
- }
-
- public static long getTimePartitionInterval() {
- if (timePartitionInterval == -1) {
- initTimePartition();
- }
- return timePartitionInterval;
- }
-
- public static long getTimePartition(long time) {
- if (timePartitionInterval == -1) {
- initTimePartition();
- }
- return time / timePartitionInterval;
- }
-
- /** block insertion if the insertion is rejected by memory control */
- public static void blockInsertionIfReject(TsFileProcessor tsFileProcessor)
- throws WriteProcessRejectException {
- long startTime = System.currentTimeMillis();
- while (SystemInfo.getInstance().isRejected()) {
- if (tsFileProcessor != null && tsFileProcessor.shouldFlush()) {
- break;
- }
- try {
- TimeUnit.MILLISECONDS.sleep(config.getCheckPeriodWhenInsertBlocked());
- if (System.currentTimeMillis() - startTime > config.getMaxWaitingTimeWhenInsertBlocked()) {
- throw new WriteProcessRejectException(
- "System rejected over " + (System.currentTimeMillis() - startTime) + "ms");
- }
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- }
- }
- }
-
- public void updateTTLInfo(byte[] allTTLInformation) {
- if (allTTLInformation == null) {
- return;
- }
- ByteBuffer buffer = ByteBuffer.wrap(allTTLInformation);
- int mapSize = ReadWriteIOUtils.readInt(buffer);
- for (int i = 0; i < mapSize; i++) {
- ttlMapForRecover.put(
- Objects.requireNonNull(ReadWriteIOUtils.readString(buffer)),
- ReadWriteIOUtils.readLong(buffer));
- }
- }
-
- public boolean isAllSgReady() {
- return isAllSgReady.get();
- }
-
- public void setAllSgReady(boolean allSgReady) {
- isAllSgReady.set(allSgReady);
- }
-
- public void recover() {
- setAllSgReady(false);
- cachedThreadPool =
- IoTDBThreadPoolFactory.newCachedThreadPool(
- ThreadName.STORAGE_ENGINE_CACHED_SERVICE.getName());
-
- List<Future<Void>> futures = new LinkedList<>();
- asyncRecover(futures);
-
- // wait until wal is recovered
- if (!config.isClusterMode()
- || !config.getDataRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)) {
- try {
- WALRecoverManager.getInstance().recover();
- } catch (WALException e) {
- logger.error("Fail to recover wal.", e);
- }
- }
-
- // operations after all data regions are recovered
- Thread recoverEndTrigger =
- new Thread(
- () -> {
- checkResults(futures, "StorageEngine failed to recover.");
- setAllSgReady(true);
- ttlMapForRecover.clear();
- });
- recoverEndTrigger.start();
- }
-
- private void asyncRecover(List<Future<Void>> futures) {
- Map<String, List<DataRegionId>> localDataRegionInfo = getLocalDataRegionInfo();
- localDataRegionInfo.values().forEach(list -> recoverDataRegionNum += list.size());
- readyDataRegionNum = new AtomicInteger(0);
- // init wal recover manager
- WALRecoverManager.getInstance()
- .setAllDataRegionScannedLatch(new CountDownLatch(recoverDataRegionNum));
- for (Map.Entry<String, List<DataRegionId>> entry : localDataRegionInfo.entrySet()) {
- String sgName = entry.getKey();
- for (DataRegionId dataRegionId : entry.getValue()) {
- Callable<Void> recoverDataRegionTask =
- () -> {
- DataRegion dataRegion = null;
- try {
- dataRegion =
- buildNewDataRegion(
- sgName,
- dataRegionId,
- ttlMapForRecover.getOrDefault(sgName, Long.MAX_VALUE));
- } catch (DataRegionException e) {
- logger.error(
- "Failed to recover data region {}[{}]", sgName, dataRegionId.getId(), e);
- }
- dataRegionMap.put(dataRegionId, dataRegion);
- logger.info(
- "Data regions have been recovered {}/{}",
- readyDataRegionNum.incrementAndGet(),
- recoverDataRegionNum);
- return null;
- };
- futures.add(cachedThreadPool.submit(recoverDataRegionTask));
- }
- }
- }
-
- /** get StorageGroup -> DataRegionIdList map from data/system directory. */
- public Map<String, List<DataRegionId>> getLocalDataRegionInfo() {
- File system = SystemFileFactory.INSTANCE.getFile(systemDir);
- File[] sgDirs = system.listFiles();
- Map<String, List<DataRegionId>> localDataRegionInfo = new HashMap<>();
- if (sgDirs == null) {
- return localDataRegionInfo;
- }
- for (File sgDir : sgDirs) {
- if (!sgDir.isDirectory()) {
- continue;
- }
- String sgName = sgDir.getName();
- List<DataRegionId> dataRegionIdList = new ArrayList<>();
- for (File dataRegionDir : sgDir.listFiles()) {
- if (!dataRegionDir.isDirectory()) {
- continue;
- }
- dataRegionIdList.add(new DataRegionId(Integer.parseInt(dataRegionDir.getName())));
- }
- localDataRegionInfo.put(sgName, dataRegionIdList);
- }
- return localDataRegionInfo;
- }
-
- @Override
- public void start() {
- // build time Interval to divide time partition
- initTimePartition();
- // create systemDir
- try {
- FileUtils.forceMkdir(SystemFileFactory.INSTANCE.getFile(systemDir));
- } catch (IOException e) {
- throw new StorageEngineFailureException(e);
- }
-
- // recover upgrade process
- UpgradeUtils.recoverUpgrade();
-
- recover();
-
- ttlCheckThread = IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor("TTL-Check");
- ScheduledExecutorUtil.safelyScheduleAtFixedRate(
- ttlCheckThread,
- this::checkTTL,
- TTL_CHECK_INTERVAL,
- TTL_CHECK_INTERVAL,
- TimeUnit.MILLISECONDS);
- logger.info("start ttl check thread successfully.");
-
- startTimedService();
- }
-
- private void checkTTL() {
- try {
- for (DataRegion dataRegion : dataRegionMap.values()) {
- if (dataRegion != null) {
- dataRegion.checkFilesTTL();
- }
- }
- } catch (ConcurrentModificationException e) {
- // ignore
- } catch (Exception e) {
- logger.error("An error occurred when checking TTL", e);
- }
- }
-
- private void startTimedService() {
- // timed flush sequence memtable
- if (config.isEnableTimedFlushSeqMemtable()) {
- seqMemtableTimedFlushCheckThread =
- IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor(
- ThreadName.TIMED_FlUSH_SEQ_MEMTABLE.getName());
- ScheduledExecutorUtil.safelyScheduleAtFixedRate(
- seqMemtableTimedFlushCheckThread,
- this::timedFlushSeqMemTable,
- config.getSeqMemtableFlushCheckInterval(),
- config.getSeqMemtableFlushCheckInterval(),
- TimeUnit.MILLISECONDS);
- logger.info("start sequence memtable timed flush check thread successfully.");
- }
- // timed flush unsequence memtable
- if (config.isEnableTimedFlushUnseqMemtable()) {
- unseqMemtableTimedFlushCheckThread =
- IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor(
- ThreadName.TIMED_FlUSH_UNSEQ_MEMTABLE.getName());
- ScheduledExecutorUtil.safelyScheduleAtFixedRate(
- unseqMemtableTimedFlushCheckThread,
- this::timedFlushUnseqMemTable,
- config.getUnseqMemtableFlushCheckInterval(),
- config.getUnseqMemtableFlushCheckInterval(),
- TimeUnit.MILLISECONDS);
- logger.info("start unsequence memtable timed flush check thread successfully.");
- }
- }
-
- private void timedFlushSeqMemTable() {
- for (DataRegion dataRegion : dataRegionMap.values()) {
- if (dataRegion != null) {
- dataRegion.timedFlushSeqMemTable();
- }
- }
- }
-
- private void timedFlushUnseqMemTable() {
- for (DataRegion dataRegion : dataRegionMap.values()) {
- if (dataRegion != null) {
- dataRegion.timedFlushUnseqMemTable();
- }
- }
- }
-
- @Override
- public void stop() {
- for (DataRegion dataRegion : dataRegionMap.values()) {
- if (dataRegion != null) {
- ThreadUtils.stopThreadPool(
- dataRegion.getTimedCompactionScheduleTask(), ThreadName.COMPACTION_SCHEDULE);
- }
- }
- syncCloseAllProcessor();
- ThreadUtils.stopThreadPool(ttlCheckThread, ThreadName.TTL_CHECK_SERVICE);
- ThreadUtils.stopThreadPool(
- seqMemtableTimedFlushCheckThread, ThreadName.TIMED_FlUSH_SEQ_MEMTABLE);
- ThreadUtils.stopThreadPool(
- unseqMemtableTimedFlushCheckThread, ThreadName.TIMED_FlUSH_UNSEQ_MEMTABLE);
- if (cachedThreadPool != null) {
- cachedThreadPool.shutdownNow();
- }
- dataRegionMap.clear();
- }
-
- @Override
- public void shutdown(long milliseconds) throws ShutdownException {
- try {
- for (DataRegion dataRegion : dataRegionMap.values()) {
- ThreadUtils.stopThreadPool(
- dataRegion.getTimedCompactionScheduleTask(), ThreadName.COMPACTION_SCHEDULE);
- }
- forceCloseAllProcessor();
- } catch (TsFileProcessorException e) {
- throw new ShutdownException(e);
- }
- shutdownTimedService(ttlCheckThread, "TTlCheckThread");
- shutdownTimedService(seqMemtableTimedFlushCheckThread, "SeqMemtableTimedFlushCheckThread");
- shutdownTimedService(unseqMemtableTimedFlushCheckThread, "UnseqMemtableTimedFlushCheckThread");
- cachedThreadPool.shutdownNow();
- dataRegionMap.clear();
- }
-
- private void shutdownTimedService(ScheduledExecutorService pool, String poolName) {
- if (pool != null) {
- pool.shutdownNow();
- try {
- pool.awaitTermination(30, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- logger.warn("{} still doesn't exit after 30s", poolName);
- Thread.currentThread().interrupt();
- }
- }
- }
-
- @Override
- public ServiceType getID() {
- return ServiceType.STORAGE_ENGINE_SERVICE;
- }
-
- /**
- * build a new data region
- *
- * @param dataRegionId data region id e.g. 1
- * @param logicalStorageGroupName database name e.g. root.sg1
- */
- public DataRegion buildNewDataRegion(
- String logicalStorageGroupName, DataRegionId dataRegionId, long ttl)
- throws DataRegionException {
- DataRegion dataRegion;
- logger.info(
- "construct a data region instance, the database is {}, Thread is {}",
- logicalStorageGroupName,
- Thread.currentThread().getId());
- dataRegion =
- new DataRegion(
- systemDir + File.separator + logicalStorageGroupName,
- String.valueOf(dataRegionId.getId()),
- fileFlushPolicy,
- logicalStorageGroupName);
- dataRegion.setDataTTLWithTimePrecisionCheck(ttl);
- dataRegion.setCustomFlushListeners(customFlushListeners);
- dataRegion.setCustomCloseFileListeners(customCloseFileListeners);
- return dataRegion;
- }
-
- /** Write data into DataRegion. For standalone mode only. */
- public TSStatus write(DataRegionId groupId, PlanNode planNode) {
- return planNode.accept(new DataExecutionVisitor(), dataRegionMap.get(groupId));
- }
-
- /** This function is just for unit test. */
- @TestOnly
- public synchronized void reset() {
- dataRegionMap.clear();
- }
-
- /** flush command Sync asyncCloseOneProcessor all file node processors. */
- public void syncCloseAllProcessor() {
- logger.info("Start closing all database processor");
- List<Future<Void>> tasks = new ArrayList<>();
- for (DataRegion dataRegion : dataRegionMap.values()) {
- if (dataRegion != null) {
- tasks.add(
- cachedThreadPool.submit(
- () -> {
- dataRegion.syncCloseAllWorkingTsFileProcessors();
- return null;
- }));
- }
- }
- checkResults(tasks, "Failed to sync close processor.");
- }
-
- public void forceCloseAllProcessor() throws TsFileProcessorException {
- logger.info("Start force closing all database processor");
- List<Future<Void>> tasks = new ArrayList<>();
- for (DataRegion dataRegion : dataRegionMap.values()) {
- if (dataRegion != null) {
- tasks.add(
- cachedThreadPool.submit(
- () -> {
- dataRegion.forceCloseAllWorkingTsFileProcessors();
- return null;
- }));
- }
- }
- checkResults(tasks, "Failed to force close processor.");
- }
-
- public void closeStorageGroupProcessor(String storageGroupPath, boolean isSeq) {
- List<Future<Void>> tasks = new ArrayList<>();
- for (DataRegion dataRegion : dataRegionMap.values()) {
- if (dataRegion.getStorageGroupName().equals(storageGroupPath)) {
- if (isSeq) {
- for (TsFileProcessor tsFileProcessor : dataRegion.getWorkSequenceTsFileProcessors()) {
- tasks.add(
- cachedThreadPool.submit(
- () -> {
- dataRegion.syncCloseOneTsFileProcessor(isSeq, tsFileProcessor);
- return null;
- }));
- }
- } else {
- for (TsFileProcessor tsFileProcessor : dataRegion.getWorkUnsequenceTsFileProcessors()) {
- tasks.add(
- cachedThreadPool.submit(
- () -> {
- dataRegion.syncCloseOneTsFileProcessor(isSeq, tsFileProcessor);
- return null;
- }));
- }
- }
- }
- }
- checkResults(tasks, "Failed to close database processor.");
- }
-
- private <V> void checkResults(List<Future<V>> tasks, String errorMsg) {
- for (Future<V> task : tasks) {
- try {
- task.get();
- } catch (ExecutionException e) {
- throw new StorageEngineFailureException(errorMsg, e);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- throw new StorageEngineFailureException(errorMsg, e);
- }
- }
- }
-
- /**
- * merge all databases.
- *
- * @throws StorageEngineException StorageEngineException
- */
- public void mergeAll() throws StorageEngineException {
- if (CommonDescriptor.getInstance().getConfig().isReadOnly()) {
- throw new StorageEngineException("Current system mode is read only, does not support merge");
- }
- dataRegionMap.values().forEach(DataRegion::compact);
- }
-
- public TSStatus operateFlush(TFlushReq req) {
- if (req.storageGroups == null) {
- StorageEngineV2.getInstance().syncCloseAllProcessor();
- WALManager.getInstance().deleteOutdatedWALFiles();
- } else {
- for (String storageGroup : req.storageGroups) {
- if (req.isSeq == null) {
- StorageEngineV2.getInstance().closeStorageGroupProcessor(storageGroup, true);
- StorageEngineV2.getInstance().closeStorageGroupProcessor(storageGroup, false);
- } else {
- StorageEngineV2.getInstance()
- .closeStorageGroupProcessor(storageGroup, Boolean.parseBoolean(req.isSeq));
- }
- }
- }
- return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS);
- }
-
- public void setTTL(List<DataRegionId> dataRegionIdList, long dataTTL) {
- for (DataRegionId dataRegionId : dataRegionIdList) {
- DataRegion dataRegion = dataRegionMap.get(dataRegionId);
- if (dataRegion != null) {
- dataRegion.setDataTTLWithTimePrecisionCheck(dataTTL);
- }
- }
- }
-
- /**
- * Add a listener to listen flush start/end events. Notice that this addition only applies to
- * TsFileProcessors created afterwards.
- *
- * @param listener
- */
- public void registerFlushListener(FlushListener listener) {
- customFlushListeners.add(listener);
- }
-
- /**
- * Add a listener to listen file close events. Notice that this addition only applies to
- * TsFileProcessors created afterwards.
- *
- * @param listener
- */
- public void registerCloseFileListener(CloseFileListener listener) {
- customCloseFileListeners.add(listener);
- }
-
- private void makeSureNoOldRegion(DataRegionId regionId) {
- while (deletingDataRegionMap.containsKey(regionId)) {
- DataRegion oldRegion = deletingDataRegionMap.get(regionId);
- if (oldRegion != null) {
- oldRegion.waitForDeleted();
- }
- }
- }
-
- // When registering a new region, the coordinator needs to register the corresponding region with
- // the local engine before adding the corresponding consensusGroup to the consensus layer
- public DataRegion createDataRegion(DataRegionId regionId, String sg, long ttl)
- throws DataRegionException {
- makeSureNoOldRegion(regionId);
- AtomicReference<DataRegionException> exceptionAtomicReference = new AtomicReference<>(null);
- DataRegion dataRegion =
- dataRegionMap.computeIfAbsent(
- regionId,
- x -> {
- try {
- return buildNewDataRegion(sg, x, ttl);
- } catch (DataRegionException e) {
- exceptionAtomicReference.set(e);
- }
- return null;
- });
- if (exceptionAtomicReference.get() != null) {
- throw exceptionAtomicReference.get();
- }
- return dataRegion;
- }
-
- public void deleteDataRegion(DataRegionId regionId) {
- if (!dataRegionMap.containsKey(regionId) || deletingDataRegionMap.containsKey(regionId)) {
- return;
- }
- DataRegion region =
- deletingDataRegionMap.computeIfAbsent(regionId, k -> dataRegionMap.remove(regionId));
- if (region != null) {
- region.markDeleted();
- try {
- region.abortCompaction();
- region.syncDeleteDataFiles();
- region.deleteFolder(systemDir);
- if (config.isClusterMode()
- && config
- .getDataRegionConsensusProtocolClass()
- .equals(ConsensusFactory.IOT_CONSENSUS)) {
- WALManager.getInstance()
- .deleteWALNode(
- region.getStorageGroupName() + FILE_NAME_SEPARATOR + region.getDataRegionId());
- }
- SyncService.getInstance().unregisterDataRegion(region.getDataRegionId());
- } catch (Exception e) {
- logger.error(
- "Error occurs when deleting data region {}-{}",
- region.getStorageGroupName(),
- region.getDataRegionId(),
- e);
- } finally {
- deletingDataRegionMap.remove(regionId);
- }
- }
- }
-
- public DataRegion getDataRegion(DataRegionId regionId) {
- return dataRegionMap.get(regionId);
- }
-
- public List<DataRegion> getAllDataRegions() {
- return new ArrayList<>(dataRegionMap.values());
- }
-
- public List<DataRegionId> getAllDataRegionIds() {
- return new ArrayList<>(dataRegionMap.keySet());
- }
-
- /** This method is not thread-safe */
- public void setDataRegion(DataRegionId regionId, DataRegion newRegion) {
- if (dataRegionMap.containsKey(regionId)) {
- DataRegion oldRegion = dataRegionMap.get(regionId);
- oldRegion.syncCloseAllWorkingTsFileProcessors();
- oldRegion.abortCompaction();
- }
- dataRegionMap.put(regionId, newRegion);
- }
-
- public TSStatus setTTL(TSetTTLReq req) {
- Map<String, List<DataRegionId>> localDataRegionInfo =
- StorageEngineV2.getInstance().getLocalDataRegionInfo();
- List<DataRegionId> dataRegionIdList = new ArrayList<>();
- req.storageGroupPathPattern.forEach(
- storageGroup -> dataRegionIdList.addAll(localDataRegionInfo.get(storageGroup)));
- for (DataRegionId dataRegionId : dataRegionIdList) {
- DataRegion dataRegion = dataRegionMap.get(dataRegionId);
- if (dataRegion != null) {
- dataRegion.setDataTTLWithTimePrecisionCheck(req.TTL);
- }
- }
- return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS);
- }
-
- public TsFileFlushPolicy getFileFlushPolicy() {
- return fileFlushPolicy;
- }
-
- public TSStatus writeLoadTsFileNode(
- DataRegionId dataRegionId, LoadTsFilePieceNode pieceNode, String uuid) {
- TSStatus status = new TSStatus();
-
- try {
- loadTsFileManager.writeToDataRegion(getDataRegion(dataRegionId), pieceNode, uuid);
- } catch (PageException e) {
- logger.error(
- String.format(
- "Parse Page error when writing piece node of TsFile %s to DataRegion %s.",
- pieceNode.getTsFile(), dataRegionId),
- e);
- status.setCode(TSStatusCode.LOAD_PIECE_OF_TSFILE_ERROR.getStatusCode());
- status.setMessage(e.getMessage());
- return status;
- } catch (IOException e) {
- logger.error(
- String.format(
- "IO error when writing piece node of TsFile %s to DataRegion %s.",
- pieceNode.getTsFile(), dataRegionId),
- e);
- status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode());
- status.setMessage(e.getMessage());
- return status;
- }
-
- return RpcUtils.SUCCESS_STATUS;
- }
-
- public TSStatus executeLoadCommand(LoadTsFileScheduler.LoadCommand loadCommand, String uuid) {
- TSStatus status = new TSStatus();
-
- try {
- switch (loadCommand) {
- case EXECUTE:
- if (loadTsFileManager.loadAll(uuid)) {
- status = RpcUtils.SUCCESS_STATUS;
- } else {
- status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode());
- status.setMessage(
- String.format(
- "No load TsFile uuid %s recorded for execute load command %s.",
- uuid, loadCommand));
- }
- break;
- case ROLLBACK:
- if (loadTsFileManager.deleteAll(uuid)) {
- status = RpcUtils.SUCCESS_STATUS;
- } else {
- status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode());
- status.setMessage(
- String.format(
- "No load TsFile uuid %s recorded for execute load command %s.",
- uuid, loadCommand));
- }
- break;
- default:
- status.setCode(TSStatusCode.ILLEGAL_PARAMETER.getStatusCode());
- status.setMessage(String.format("Wrong load command %s.", loadCommand));
- }
- } catch (IOException | LoadFileException e) {
- logger.error(String.format("Execute load command %s error.", loadCommand), e);
- status.setCode(TSStatusCode.LOAD_FILE_ERROR.getStatusCode());
- status.setMessage(e.getMessage());
- }
-
- return status;
- }
-
- /** reboot timed flush sequence/unsequence memetable thread */
- public void rebootTimedService() throws ShutdownException {
- logger.info("Start rebooting all timed service.");
-
- // exclude ttl check thread
- stopTimedServiceAndThrow(seqMemtableTimedFlushCheckThread, "SeqMemtableTimedFlushCheckThread");
- stopTimedServiceAndThrow(
- unseqMemtableTimedFlushCheckThread, "UnseqMemtableTimedFlushCheckThread");
-
- logger.info("Stop all timed service successfully, and now restart them.");
-
- startTimedService();
-
- logger.info("Reboot all timed service successfully");
- }
-
- private void stopTimedServiceAndThrow(ScheduledExecutorService pool, String poolName)
- throws ShutdownException {
- if (pool != null) {
- pool.shutdownNow();
- try {
- pool.awaitTermination(30, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- logger.warn("{} still doesn't exit after 30s", poolName);
- throw new ShutdownException(e);
- }
- }
- }
-
- static class InstanceHolder {
-
- private static final StorageEngineV2 INSTANCE = new StorageEngineV2();
-
- private InstanceHolder() {
- // forbidding instantiation
- }
- }
-}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/snapshot/SnapshotLoader.java b/server/src/main/java/org/apache/iotdb/db/engine/snapshot/SnapshotLoader.java
index 72d9dc1384..a5922082d3 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/snapshot/SnapshotLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/snapshot/SnapshotLoader.java
@@ -23,7 +23,7 @@ import org.apache.iotdb.commons.conf.IoTDBConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.directories.FolderManager;
import org.apache.iotdb.db.conf.directories.strategy.DirectoryStrategyType;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.DiskSpaceInsufficientException;
@@ -66,7 +66,7 @@ public class SnapshotLoader {
+ File.separator
+ storageGroupName,
dataRegionId,
- StorageEngineV2.getInstance().getFileFlushPolicy(),
+ StorageEngine.getInstance().getFileFlushPolicy(),
storageGroupName);
} catch (Exception e) {
LOGGER.error("Exception occurs while load snapshot from {}", snapshotPath, e);
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegion.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegion.java
index 8e7eb63e70..c32c4f8f59 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegion.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/DataRegion.java
@@ -36,7 +36,7 @@ import org.apache.iotdb.consensus.ConsensusFactory;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.directories.DirectoryManager;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.TsFileMetricManager;
import org.apache.iotdb.db.engine.compaction.CompactionRecoverManager;
import org.apache.iotdb.db.engine.compaction.CompactionScheduler;
@@ -306,7 +306,7 @@ public class DataRegion {
// recover tsfiles unless consensus protocol is ratis and storage engine is not ready
if (config.isClusterMode()
&& config.getDataRegionConsensusProtocolClass().equals(ConsensusFactory.RATIS_CONSENSUS)
- && !StorageEngineV2.getInstance().isAllSgReady()) {
+ && !StorageEngine.getInstance().isAllSgReady()) {
logger.debug(
"Skip recovering data region {}[{}] when consensus protocol is ratis and storage engine is not ready.",
storageGroupName,
@@ -552,7 +552,7 @@ public class DataRegion {
// recover and start timed compaction thread
initCompaction();
- if (StorageEngineV2.getInstance().isAllSgReady()) {
+ if (StorageEngine.getInstance().isAllSgReady()) {
logger.info("The data region {}[{}] is created successfully", storageGroupName, dataRegionId);
} else {
logger.info(
@@ -619,11 +619,11 @@ public class DataRegion {
for (TsFileResource resource : upgradeSeqFileList) {
for (String deviceId : resource.getDevices()) {
long endTime = resource.getEndTime(deviceId);
- long endTimePartitionId = StorageEngineV2.getTimePartition(endTime);
+ long endTimePartitionId = StorageEngine.getTimePartition(endTime);
lastFlushTimeMap.setOneDeviceGlobalFlushedTime(deviceId, endTime);
// set all the covered partition's LatestFlushedTime
- long partitionId = StorageEngineV2.getTimePartition(resource.getStartTime(deviceId));
+ long partitionId = StorageEngine.getTimePartition(resource.getStartTime(deviceId));
while (partitionId <= endTimePartitionId) {
lastFlushTimeMap.setOneDeviceFlushedTime(partitionId, deviceId, endTime);
if (!timePartitionIdVersionControllerMap.containsKey(partitionId)) {
@@ -890,7 +890,7 @@ public class DataRegion {
throw new OutOfTTLException(insertRowNode.getTime(), (DateTimeUtils.currentTime() - dataTTL));
}
if (enableMemControl) {
- StorageEngineV2.blockInsertionIfReject(null);
+ StorageEngine.blockInsertionIfReject(null);
}
writeLock("InsertRow");
try {
@@ -898,7 +898,7 @@ public class DataRegion {
return;
}
// init map
- long timePartitionId = StorageEngineV2.getTimePartition(insertRowNode.getTime());
+ long timePartitionId = StorageEngine.getTimePartition(insertRowNode.getTime());
if (!lastFlushTimeMap.checkAndCreateFlushedTimePartition(timePartitionId)) {
TimePartitionManager.getInstance()
@@ -939,7 +939,7 @@ public class DataRegion {
public void insertTablet(InsertTabletNode insertTabletNode)
throws BatchProcessException, WriteProcessException {
if (enableMemControl) {
- StorageEngineV2.blockInsertionIfReject(null);
+ StorageEngine.blockInsertionIfReject(null);
}
writeLock("insertTablet");
try {
@@ -982,7 +982,7 @@ public class DataRegion {
int before = loc;
// before time partition
long beforeTimePartition =
- StorageEngineV2.getTimePartition(insertTabletNode.getTimes()[before]);
+ StorageEngine.getTimePartition(insertTabletNode.getTimes()[before]);
// init map
if (!lastFlushTimeMap.checkAndCreateFlushedTimePartition(beforeTimePartition)) {
@@ -1933,8 +1933,8 @@ public class DataRegion {
long searchIndex,
PartialPath path,
TimePartitionFilter timePartitionFilter) {
- long timePartitionStartId = StorageEngineV2.getTimePartition(startTime);
- long timePartitionEndId = StorageEngineV2.getTimePartition(endTime);
+ long timePartitionStartId = StorageEngine.getTimePartition(startTime);
+ long timePartitionEndId = StorageEngine.getTimePartition(endTime);
List<WALFlushListener> walFlushListeners = new ArrayList<>();
if (config.getWalMode() == WALMode.DISABLE) {
return walFlushListeners;
@@ -2598,7 +2598,7 @@ public class DataRegion {
private void updateLastFlushTime(TsFileResource newTsFileResource) {
for (String device : newTsFileResource.getDevices()) {
long endTime = newTsFileResource.getEndTime(device);
- long timePartitionId = StorageEngineV2.getTimePartition(endTime);
+ long timePartitionId = StorageEngine.getTimePartition(endTime);
lastFlushTimeMap.updateFlushedTime(timePartitionId, device, endTime);
lastFlushTimeMap.updateGlobalFlushedTime(device, endTime);
}
@@ -3070,7 +3070,7 @@ public class DataRegion {
public void insert(InsertRowsOfOneDeviceNode insertRowsOfOneDeviceNode)
throws WriteProcessException, BatchProcessException {
if (enableMemControl) {
- StorageEngineV2.blockInsertionIfReject(null);
+ StorageEngine.blockInsertionIfReject(null);
}
writeLock("InsertRowsOfOneDevice");
try {
@@ -3097,7 +3097,7 @@ public class DataRegion {
continue;
}
// init map
- long timePartitionId = StorageEngineV2.getTimePartition(insertRowNode.getTime());
+ long timePartitionId = StorageEngine.getTimePartition(insertRowNode.getTime());
if (!lastFlushTimeMap.checkAndCreateFlushedTimePartition(timePartitionId)) {
TimePartitionManager.getInstance()
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TimePartitionManager.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TimePartitionManager.java
index 1e26a46b8a..1f77b36b82 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TimePartitionManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TimePartitionManager.java
@@ -22,7 +22,7 @@ package org.apache.iotdb.db.engine.storagegroup;
import org.apache.iotdb.commons.consensus.DataRegionId;
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import java.util.HashMap;
import java.util.Map;
@@ -105,7 +105,7 @@ public class TimePartitionManager {
TimePartitionInfo timePartitionInfo = treeSet.first();
memCost -= timePartitionInfo.memSize;
DataRegion dataRegion =
- StorageEngineV2.getInstance().getDataRegion(timePartitionInfo.dataRegionId);
+ StorageEngine.getInstance().getDataRegion(timePartitionInfo.dataRegionId);
if (dataRegion != null) {
dataRegion.releaseFlushTimeMap(timePartitionInfo.partitionId);
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessor.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessor.java
index 6b98732236..04ec438712 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessor.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessor.java
@@ -27,7 +27,7 @@ import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.adapter.CompressionRatio;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.TsFileMetricManager;
import org.apache.iotdb.db.engine.flush.CloseFileListener;
import org.apache.iotdb.db.engine.flush.FlushListener;
@@ -612,7 +612,7 @@ public class TsFileProcessor {
if (dataRegionInfo.needToReportToSystem()) {
try {
if (!SystemInfo.getInstance().reportStorageGroupStatus(dataRegionInfo, this)) {
- StorageEngineV2.blockInsertionIfReject(this);
+ StorageEngine.blockInsertionIfReject(this);
}
} catch (WriteProcessRejectException e) {
dataRegionInfo.releaseStorageGroupMemCost(memTableIncrement);
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/timeindex/DeviceTimeIndex.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/timeindex/DeviceTimeIndex.java
index 2c5ea3439b..066bbc17e2 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/timeindex/DeviceTimeIndex.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/timeindex/DeviceTimeIndex.java
@@ -22,7 +22,7 @@ package org.apache.iotdb.db.engine.storagegroup.timeindex;
import org.apache.iotdb.commons.exception.IllegalPathException;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.commons.utils.SerializeUtils;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.exception.PartitionViolationException;
import org.apache.iotdb.tsfile.utils.FilePathUtils;
@@ -234,8 +234,7 @@ public class DeviceTimeIndex implements ITimeIndex {
public long getTimePartition(String tsFilePath) {
try {
if (deviceToIndex != null && !deviceToIndex.isEmpty()) {
- return StorageEngineV2.getTimePartition(
- startTimes[deviceToIndex.values().iterator().next()]);
+ return StorageEngine.getTimePartition(startTimes[deviceToIndex.values().iterator().next()]);
}
String[] filePathSplits = FilePathUtils.splitTsFilePath(tsFilePath);
return Long.parseLong(filePathSplits[filePathSplits.length - 2]);
@@ -248,7 +247,7 @@ public class DeviceTimeIndex implements ITimeIndex {
private long getTimePartitionWithCheck() {
long partitionId = SPANS_MULTI_TIME_PARTITIONS_FLAG_ID;
for (int index : deviceToIndex.values()) {
- long p = StorageEngineV2.getTimePartition(startTimes[index]);
+ long p = StorageEngine.getTimePartition(startTimes[index]);
if (partitionId == SPANS_MULTI_TIME_PARTITIONS_FLAG_ID) {
partitionId = p;
} else {
@@ -257,7 +256,7 @@ public class DeviceTimeIndex implements ITimeIndex {
}
}
- p = StorageEngineV2.getTimePartition(endTimes[index]);
+ p = StorageEngine.getTimePartition(endTimes[index]);
if (partitionId != p) {
return SPANS_MULTI_TIME_PARTITIONS_FLAG_ID;
}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/timeindex/FileTimeIndex.java b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/timeindex/FileTimeIndex.java
index dfa80b25a6..ed7b6bf18e 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/timeindex/FileTimeIndex.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/storagegroup/timeindex/FileTimeIndex.java
@@ -20,7 +20,7 @@
package org.apache.iotdb.db.engine.storagegroup.timeindex;
import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.exception.PartitionViolationException;
import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
@@ -139,8 +139,8 @@ public class FileTimeIndex implements ITimeIndex {
}
private long getTimePartitionWithCheck() {
- long startPartitionId = StorageEngineV2.getTimePartition(startTime);
- long endPartitionId = StorageEngineV2.getTimePartition(endTime);
+ long startPartitionId = StorageEngine.getTimePartition(startTime);
+ long endPartitionId = StorageEngine.getTimePartition(endTime);
if (startPartitionId == endPartitionId) {
return startPartitionId;
}
diff --git a/server/src/main/java/org/apache/iotdb/db/localconfignode/LocalConfigNode.java b/server/src/main/java/org/apache/iotdb/db/localconfignode/LocalConfigNode.java
index c52d2b125d..8c29db709f 100644
--- a/server/src/main/java/org/apache/iotdb/db/localconfignode/LocalConfigNode.java
+++ b/server/src/main/java/org/apache/iotdb/db/localconfignode/LocalConfigNode.java
@@ -59,7 +59,7 @@ import org.apache.iotdb.confignode.rpc.thrift.TShowPipeInfo;
import org.apache.iotdb.confignode.rpc.thrift.TShowPipeResp;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.cache.BloomFilterCache;
import org.apache.iotdb.db.engine.cache.ChunkCache;
import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
@@ -126,7 +126,7 @@ public class LocalConfigNode {
private final LocalSchemaPartitionTable schemaPartitionTable =
LocalSchemaPartitionTable.getInstance();
- private final StorageEngineV2 storageEngine = StorageEngineV2.getInstance();
+ private final StorageEngine storageEngine = StorageEngine.getInstance();
private final LocalDataPartitionInfo dataPartitionInfo = LocalDataPartitionInfo.getInstance();
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/StandalonePartitionFetcher.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/StandalonePartitionFetcher.java
index a4484debb2..625314c995 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/StandalonePartitionFetcher.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/analyze/StandalonePartitionFetcher.java
@@ -30,7 +30,7 @@ import org.apache.iotdb.commons.path.PathPatternTree;
import org.apache.iotdb.commons.utils.PathUtils;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.exception.DataRegionException;
import org.apache.iotdb.db.exception.sql.StatementAnalyzeException;
import org.apache.iotdb.db.localconfignode.LocalConfigNode;
@@ -57,7 +57,7 @@ public class StandalonePartitionFetcher implements IPartitionFetcher {
private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
private static final Logger logger = LoggerFactory.getLogger(StandalonePartitionFetcher.class);
private final LocalConfigNode localConfigNode = LocalConfigNode.getInstance();
- private final StorageEngineV2 storageEngine = StorageEngineV2.getInstance();
+ private final StorageEngine storageEngine = StorageEngine.getInstance();
private final SeriesPartitionExecutor executor =
SeriesPartitionExecutor.getSeriesPartitionExecutor(
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/StandaloneScheduler.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/StandaloneScheduler.java
index ebceb0e10b..7e06bd5e38 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/StandaloneScheduler.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/StandaloneScheduler.java
@@ -26,7 +26,7 @@ import org.apache.iotdb.commons.conf.CommonDescriptor;
import org.apache.iotdb.commons.consensus.ConsensusGroupId;
import org.apache.iotdb.commons.consensus.DataRegionId;
import org.apache.iotdb.commons.consensus.SchemaRegionId;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.WriteProcessException;
import org.apache.iotdb.db.metadata.schemaregion.ISchemaRegion;
@@ -55,7 +55,7 @@ import java.util.concurrent.ScheduledExecutorService;
public class StandaloneScheduler implements IScheduler {
- private static final StorageEngineV2 STORAGE_ENGINE = StorageEngineV2.getInstance();
+ private static final StorageEngine STORAGE_ENGINE = StorageEngine.getInstance();
private static final SchemaEngine SCHEMA_ENGINE = SchemaEngine.getInstance();
@@ -100,8 +100,7 @@ public class StandaloneScheduler implements IScheduler {
ConsensusGroupId.Factory.createFromTConsensusGroupId(
fragmentInstance.getRegionReplicaSet().getRegionId());
if (groupId instanceof DataRegionId) {
- DataRegion region =
- StorageEngineV2.getInstance().getDataRegion((DataRegionId) groupId);
+ DataRegion region = StorageEngine.getInstance().getDataRegion((DataRegionId) groupId);
FragmentInstanceInfo info =
FragmentInstanceManager.getInstance()
.execDataQueryFragmentInstance(fragmentInstance, region);
diff --git a/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/load/LoadTsFileDispatcherImpl.java b/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/load/LoadTsFileDispatcherImpl.java
index dc36fce471..956f7c2ae9 100644
--- a/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/load/LoadTsFileDispatcherImpl.java
+++ b/server/src/main/java/org/apache/iotdb/db/mpp/plan/scheduler/load/LoadTsFileDispatcherImpl.java
@@ -29,7 +29,7 @@ import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory;
import org.apache.iotdb.commons.consensus.ConsensusGroupId;
import org.apache.iotdb.commons.consensus.DataRegionId;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.exception.LoadFileException;
import org.apache.iotdb.db.exception.mpp.FragmentInstanceDispatchException;
import org.apache.iotdb.db.mpp.plan.planner.plan.FragmentInstance;
@@ -160,15 +160,14 @@ public class LoadTsFileDispatcherImpl implements IFragInstanceDispatcher {
new TSStatus(TSStatusCode.DESERIALIZE_PIECE_OF_TSFILE_ERROR.getStatusCode()));
}
TSStatus resultStatus =
- StorageEngineV2.getInstance()
- .writeLoadTsFileNode((DataRegionId) groupId, pieceNode, uuid);
+ StorageEngine.getInstance().writeLoadTsFileNode((DataRegionId) groupId, pieceNode, uuid);
if (!RpcUtils.SUCCESS_STATUS.equals(resultStatus)) {
throw new FragmentInstanceDispatchException(resultStatus);
}
} else if (planNode instanceof LoadSingleTsFileNode) { // do not need split
try {
- StorageEngineV2.getInstance()
+ StorageEngine.getInstance()
.getDataRegion((DataRegionId) groupId)
.loadNewTsFile(
((LoadSingleTsFileNode) planNode).getTsFileResource(),
@@ -237,7 +236,7 @@ public class LoadTsFileDispatcherImpl implements IFragInstanceDispatcher {
private void dispatchLocally(TLoadCommandReq loadCommandReq)
throws FragmentInstanceDispatchException {
TSStatus resultStatus =
- StorageEngineV2.getInstance()
+ StorageEngine.getInstance()
.executeLoadCommand(
LoadTsFileScheduler.LoadCommand.values()[loadCommandReq.commandType],
loadCommandReq.uuid);
diff --git a/server/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java b/server/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
index 66b04bee6e..330d6a1ec1 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/control/QueryResourceManager.java
@@ -19,7 +19,6 @@
package org.apache.iotdb.db.query.control;
import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.StorageEngineException;
@@ -28,12 +27,10 @@ import org.apache.iotdb.db.metadata.idtable.IDTable;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.query.externalsort.serialize.IExternalSortFileDeserializer;
import org.apache.iotdb.db.service.TemporaryQueryDataFileService;
-import org.apache.iotdb.db.utils.QueryUtils;
import org.apache.iotdb.tsfile.read.filter.basic.Filter;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -155,40 +152,7 @@ public class QueryResourceManager {
public QueryDataSource getQueryDataSource(
PartialPath selectedPath, QueryContext context, Filter timeFilter, boolean ascending)
throws StorageEngineException, QueryProcessException {
-
- long queryId = context.getQueryId();
- String storageGroupPath = StorageEngine.getInstance().getStorageGroupPath(selectedPath);
- String deviceId = selectedPath.getDevice();
-
- // get cached QueryDataSource
- QueryDataSource cachedQueryDataSource;
- if (cachedQueryDataSourcesMap.containsKey(queryId)
- && cachedQueryDataSourcesMap.get(queryId).containsKey(storageGroupPath)) {
- cachedQueryDataSource = cachedQueryDataSourcesMap.get(queryId).get(storageGroupPath);
- } else {
- // QueryDataSource is never cached in cluster mode
- DataRegion processor = StorageEngine.getInstance().getProcessor(selectedPath.getDevicePath());
- PartialPath translatedPath = IDTable.translateQueryPath(selectedPath);
- cachedQueryDataSource =
- processor.query(
- Collections.singletonList(translatedPath),
- translatedPath.getDevice(),
- context,
- filePathsManager,
- timeFilter);
- }
-
- // construct QueryDataSource for selectedPath
- QueryDataSource queryDataSource =
- new QueryDataSource(
- cachedQueryDataSource.getSeqResources(), cachedQueryDataSource.getUnseqResources());
-
- queryDataSource.setDataTTL(cachedQueryDataSource.getDataTTL());
-
- // calculate the read order of unseqResources
- QueryUtils.fillOrderIndexes(queryDataSource, deviceId, ascending);
-
- return queryDataSource;
+ return null;
}
/**
diff --git a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
index a84eedf231..dffac115c5 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithValueFilterDataSet.java
@@ -19,41 +19,28 @@
package org.apache.iotdb.db.query.dataset.groupby;
-import org.apache.iotdb.commons.path.AlignedPath;
-import org.apache.iotdb.commons.path.MeasurementPath;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.metadata.utils.MetaUtils;
import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan;
import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan;
import org.apache.iotdb.db.query.aggregation.AggregateResult;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.query.control.QueryResourceManager;
import org.apache.iotdb.db.query.executor.groupby.SlidingWindowGroupByExecutor;
-import org.apache.iotdb.db.query.executor.groupby.SlidingWindowGroupByExecutorFactory;
-import org.apache.iotdb.db.query.factory.AggregateResultFactory;
import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
import org.apache.iotdb.db.query.reader.series.SeriesReaderByTimestamp;
import org.apache.iotdb.db.query.timegenerator.ServerTimeGenerator;
import org.apache.iotdb.db.utils.QueryUtils;
import org.apache.iotdb.db.utils.ValueIterator;
-import org.apache.iotdb.tsfile.read.filter.TimeFilter;
-import org.apache.iotdb.tsfile.read.filter.basic.Filter;
-import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory;
import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
-import org.apache.iotdb.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
@@ -95,80 +82,7 @@ public class GroupByWithValueFilterDataSet extends GroupByTimeEngineDataSet {
/** init reader and aggregate function. This method should be called once after initializing */
public void initGroupBy(QueryContext context, GroupByTimePlan groupByTimePlan)
- throws StorageEngineException, QueryProcessException {
- this.timestampGenerator = getTimeGenerator(context, groupByTimePlan);
- this.readerToAggrIndexesMap = new HashMap<>();
- this.groupByTimePlan = groupByTimePlan;
-
- Filter timeFilter =
- FilterFactory.and(
- TimeFilter.gtEq(groupByTimePlan.getStartTime()),
- TimeFilter.lt(groupByTimePlan.getEndTime()));
-
- List<PartialPath> selectedSeries = new ArrayList<>();
- groupByTimePlan
- .getDeduplicatedPaths()
- .forEach(k -> selectedSeries.add(((MeasurementPath) k).transformToExactPath()));
-
- Map<PartialPath, List<Integer>> pathToAggrIndexesMap =
- MetaUtils.groupAggregationsBySeries(selectedSeries);
- Map<AlignedPath, List<List<Integer>>> alignedPathToAggrIndexesMap =
- MetaUtils.groupAlignedSeriesWithAggregations(pathToAggrIndexesMap);
-
- List<PartialPath> groupedPathList =
- new ArrayList<>(pathToAggrIndexesMap.size() + alignedPathToAggrIndexesMap.size());
- groupedPathList.addAll(pathToAggrIndexesMap.keySet());
- groupedPathList.addAll(alignedPathToAggrIndexesMap.keySet());
-
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(groupedPathList);
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource Cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(processorToSeriesMap, context, timeFilter);
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
-
- // init non-aligned series reader
- for (PartialPath path : pathToAggrIndexesMap.keySet()) {
- IReaderByTimestamp seriesReaderByTimestamp = getReaderByTime(path, groupByTimePlan, context);
- readerToAggrIndexesMap.put(
- seriesReaderByTimestamp, Collections.singletonList(pathToAggrIndexesMap.get(path)));
- }
- // assign null to be friendly for GC
- pathToAggrIndexesMap = null;
- // init aligned series reader
- for (PartialPath alignedPath : alignedPathToAggrIndexesMap.keySet()) {
- IReaderByTimestamp seriesReaderByTimestamp =
- getReaderByTime(alignedPath, groupByTimePlan, context);
- readerToAggrIndexesMap.put(
- seriesReaderByTimestamp, alignedPathToAggrIndexesMap.get(alignedPath));
- }
- // assign null to be friendly for GC
- alignedPathToAggrIndexesMap = null;
-
- preAggregateResults = new AggregateResult[paths.size()];
- for (int i = 0; i < paths.size(); i++) {
- preAggregateResults[i] =
- AggregateResultFactory.getAggrResultByName(
- groupByTimePlan.getDeduplicatedAggregations().get(i),
- groupByTimePlan.getDeduplicatedDataTypes().get(i),
- ascending);
- slidingWindowGroupByExecutors[i] =
- SlidingWindowGroupByExecutorFactory.getSlidingWindowGroupByExecutor(
- groupByTimePlan.getDeduplicatedAggregations().get(i),
- groupByTimePlan.getDeduplicatedDataTypes().get(i),
- ascending);
- }
- }
+ throws StorageEngineException, QueryProcessException {}
protected TimeGenerator getTimeGenerator(QueryContext context, RawDataQueryPlan queryPlan)
throws StorageEngineException {
diff --git a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithoutValueFilterDataSet.java b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithoutValueFilterDataSet.java
index 38adb532c1..6aa049d090 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithoutValueFilterDataSet.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/GroupByWithoutValueFilterDataSet.java
@@ -22,33 +22,23 @@ package org.apache.iotdb.db.query.dataset.groupby;
import org.apache.iotdb.commons.path.AlignedPath;
import org.apache.iotdb.commons.path.MeasurementPath;
import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.metadata.utils.MetaUtils;
import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan;
import org.apache.iotdb.db.query.aggregation.AggregateResult;
import org.apache.iotdb.db.query.context.QueryContext;
-import org.apache.iotdb.db.query.control.QueryResourceManager;
import org.apache.iotdb.db.query.executor.groupby.AlignedGroupByExecutor;
import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor;
import org.apache.iotdb.db.query.executor.groupby.SlidingWindowGroupByExecutor;
-import org.apache.iotdb.db.query.executor.groupby.SlidingWindowGroupByExecutorFactory;
import org.apache.iotdb.db.query.executor.groupby.impl.LocalAlignedGroupByExecutor;
import org.apache.iotdb.db.query.executor.groupby.impl.LocalGroupByExecutor;
-import org.apache.iotdb.db.query.factory.AggregateResultFactory;
import org.apache.iotdb.db.query.filter.TsFileFilter;
-import org.apache.iotdb.tsfile.read.expression.IExpression;
-import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
import org.apache.iotdb.tsfile.read.filter.basic.Filter;
-import org.apache.iotdb.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -93,101 +83,7 @@ public class GroupByWithoutValueFilterDataSet extends GroupByTimeEngineDataSet {
/** init reader and aggregate function. This method should be called once after initializing */
public void initGroupBy(QueryContext context, GroupByTimePlan groupByTimePlan)
- throws StorageEngineException, QueryProcessException {
- IExpression expression = groupByTimePlan.getExpression();
-
- Filter timeFilter = null;
- if (expression != null) {
- timeFilter = ((GlobalTimeExpression) expression).getFilter();
- }
- if (timeFilter == null) {
- throw new QueryProcessException("TimeFilter cannot be null in GroupBy query.");
- }
-
- // init resultIndexes, group aligned series
- pathToAggrIndexesMap = MetaUtils.groupAggregationsBySeries(paths);
- alignedPathToAggrIndexesMap =
- MetaUtils.groupAlignedSeriesWithAggregations(pathToAggrIndexesMap);
-
- List<PartialPath> groupedPathList =
- new ArrayList<>(pathToAggrIndexesMap.size() + alignedPathToAggrIndexesMap.size());
- groupedPathList.addAll(pathToAggrIndexesMap.keySet());
- groupedPathList.addAll(alignedPathToAggrIndexesMap.keySet());
-
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(groupedPathList);
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource Cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(processorToSeriesMap, context, timeFilter);
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
-
- // init GroupByExecutor for non-aligned series
- for (Map.Entry<PartialPath, List<Integer>> entry : pathToAggrIndexesMap.entrySet()) {
- MeasurementPath path = (MeasurementPath) entry.getKey();
- List<Integer> indexes = entry.getValue();
- if (!pathExecutors.containsKey(path)) {
- pathExecutors.put(
- path,
- getGroupByExecutor(
- path,
- groupByTimePlan.getAllMeasurementsInDevice(path.getDevice()),
- context,
- timeFilter.copy(),
- null,
- ascending));
- }
- for (int index : indexes) {
- AggregateResult aggrResult =
- AggregateResultFactory.getAggrResultByName(
- groupByTimePlan.getDeduplicatedAggregations().get(index),
- path.getSeriesType(),
- ascending);
- slidingWindowGroupByExecutors[index] =
- SlidingWindowGroupByExecutorFactory.getSlidingWindowGroupByExecutor(
- groupByTimePlan.getDeduplicatedAggregations().get(index),
- path.getSeriesType(),
- ascending);
- pathExecutors.get(path).addAggregateResult(aggrResult);
- }
- }
- // init GroupByExecutor for aligned series
- for (Map.Entry<AlignedPath, List<List<Integer>>> entry :
- alignedPathToAggrIndexesMap.entrySet()) {
- AlignedPath path = entry.getKey();
- List<List<Integer>> indexesList = entry.getValue();
- if (!alignedPathExecutors.containsKey(path)) {
- alignedPathExecutors.put(
- path, getAlignedGroupByExecutor(path, context, timeFilter.copy(), null, ascending));
- }
- for (int i = 0; i < path.getMeasurementList().size(); i++) {
- List<AggregateResult> aggrResultList = new ArrayList<>();
- for (int index : indexesList.get(i)) {
- AggregateResult aggrResult =
- AggregateResultFactory.getAggrResultByName(
- groupByTimePlan.getDeduplicatedAggregations().get(index),
- path.getSchemaList().get(i).getType(),
- ascending);
- slidingWindowGroupByExecutors[index] =
- SlidingWindowGroupByExecutorFactory.getSlidingWindowGroupByExecutor(
- groupByTimePlan.getDeduplicatedAggregations().get(index),
- path.getSchemaList().get(i).getType(),
- ascending);
- aggrResultList.add(aggrResult);
- }
- alignedPathExecutors.get(path).addAggregateResult(aggrResultList);
- }
- }
- }
+ throws StorageEngineException, QueryProcessException {}
@Override
protected AggregateResult[] getNextAggregateResult() throws IOException {
diff --git a/server/src/main/java/org/apache/iotdb/db/query/executor/AggregationExecutor.java b/server/src/main/java/org/apache/iotdb/db/query/executor/AggregationExecutor.java
index d5d0081263..ffe129776d 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/executor/AggregationExecutor.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/executor/AggregationExecutor.java
@@ -24,12 +24,9 @@ import org.apache.iotdb.commons.path.AlignedPath;
import org.apache.iotdb.commons.path.MeasurementPath;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.metadata.utils.MetaUtils;
import org.apache.iotdb.db.qp.physical.crud.AggregationPlan;
import org.apache.iotdb.db.qp.physical.crud.QueryPlan;
import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan;
@@ -53,20 +50,15 @@ import org.apache.iotdb.tsfile.read.common.BatchData;
import org.apache.iotdb.tsfile.read.common.IBatchDataIterator;
import org.apache.iotdb.tsfile.read.common.RowRecord;
import org.apache.iotdb.tsfile.read.expression.IExpression;
-import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
import org.apache.iotdb.tsfile.read.filter.basic.Filter;
import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
-import org.apache.iotdb.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@@ -107,61 +99,7 @@ public class AggregationExecutor {
/** execute aggregate function with only time filter or no filter. */
public QueryDataSet executeWithoutValueFilter(AggregationPlan aggregationPlan)
throws StorageEngineException, IOException, QueryProcessException {
-
- Filter timeFilter = null;
- if (expression != null) {
- timeFilter = ((GlobalTimeExpression) expression).getFilter();
- }
-
- // TODO use multi-thread
- Map<PartialPath, List<Integer>> pathToAggrIndexesMap =
- MetaUtils.groupAggregationsBySeries(selectedSeries);
- // Attention: this method will REMOVE aligned path from pathToAggrIndexesMap
- Map<AlignedPath, List<List<Integer>>> alignedPathToAggrIndexesMap =
- MetaUtils.groupAlignedSeriesWithAggregations(pathToAggrIndexesMap);
-
- List<PartialPath> groupedPathList =
- new ArrayList<>(pathToAggrIndexesMap.size() + alignedPathToAggrIndexesMap.size());
- groupedPathList.addAll(pathToAggrIndexesMap.keySet());
- groupedPathList.addAll(alignedPathToAggrIndexesMap.keySet());
-
- // TODO-Cluster: group the paths by database to reduce communications
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(groupedPathList);
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource Cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(processorToSeriesMap, context, timeFilter);
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
-
- for (Map.Entry<PartialPath, List<Integer>> entry : pathToAggrIndexesMap.entrySet()) {
- PartialPath seriesPath = entry.getKey();
- aggregateOneSeries(
- seriesPath,
- entry.getValue(),
- aggregationPlan.getAllMeasurementsInDevice(seriesPath.getDevice()),
- timeFilter);
- }
- for (Map.Entry<AlignedPath, List<List<Integer>>> entry :
- alignedPathToAggrIndexesMap.entrySet()) {
- AlignedPath alignedPath = entry.getKey();
- aggregateOneAlignedSeries(
- alignedPath,
- entry.getValue(),
- aggregationPlan.getAllMeasurementsInDevice(alignedPath.getDevice()),
- timeFilter);
- }
-
- return constructDataSet(Arrays.asList(aggregateResultList), aggregationPlan);
+ return null;
}
/**
@@ -621,65 +559,7 @@ public class AggregationExecutor {
/** execute aggregate function with value filter. */
public QueryDataSet executeWithValueFilter(AggregationPlan queryPlan)
throws StorageEngineException, IOException, QueryProcessException {
- optimizeLastElementFunc(queryPlan);
-
- TimeGenerator timestampGenerator = getTimeGenerator(context, queryPlan);
-
- Map<IReaderByTimestamp, List<List<Integer>>> readerToAggrIndexesMap = new HashMap<>();
-
- // group by path name
- Map<PartialPath, List<Integer>> pathToAggrIndexesMap =
- MetaUtils.groupAggregationsBySeries(selectedSeries);
- Map<AlignedPath, List<List<Integer>>> alignedPathToAggrIndexesMap =
- MetaUtils.groupAlignedSeriesWithAggregations(pathToAggrIndexesMap);
-
- List<PartialPath> groupedPathList =
- new ArrayList<>(pathToAggrIndexesMap.size() + alignedPathToAggrIndexesMap.size());
- groupedPathList.addAll(pathToAggrIndexesMap.keySet());
- groupedPathList.addAll(alignedPathToAggrIndexesMap.keySet());
-
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(groupedPathList);
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource Cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(
- processorToSeriesMap, context, timestampGenerator.getTimeFilter());
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
-
- for (PartialPath path : pathToAggrIndexesMap.keySet()) {
- IReaderByTimestamp seriesReaderByTimestamp =
- getReaderByTime(path, queryPlan, path.getSeriesType(), context);
- readerToAggrIndexesMap.put(
- seriesReaderByTimestamp, Collections.singletonList(pathToAggrIndexesMap.get(path)));
- }
- // assign null to be friendly for GC
- pathToAggrIndexesMap = null;
- for (AlignedPath vectorPath : alignedPathToAggrIndexesMap.keySet()) {
- IReaderByTimestamp seriesReaderByTimestamp =
- getReaderByTime(vectorPath, queryPlan, vectorPath.getSeriesType(), context);
- readerToAggrIndexesMap.put(
- seriesReaderByTimestamp, alignedPathToAggrIndexesMap.get(vectorPath));
- }
- // assign null to be friendly for GC
- alignedPathToAggrIndexesMap = null;
-
- for (int i = 0; i < selectedSeries.size(); i++) {
- aggregateResultList[i] =
- AggregateResultFactory.getAggrResultByName(
- aggregations.get(i), dataTypes.get(i), ascending);
- }
- aggregateWithValueFilter(timestampGenerator, readerToAggrIndexesMap);
- return constructDataSet(Arrays.asList(aggregateResultList), queryPlan);
+ return null;
}
private void optimizeLastElementFunc(QueryPlan queryPlan) {
diff --git a/server/src/main/java/org/apache/iotdb/db/query/executor/FillQueryExecutor.java b/server/src/main/java/org/apache/iotdb/db/query/executor/FillQueryExecutor.java
index 0c0beca368..b926ab0297 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/executor/FillQueryExecutor.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/executor/FillQueryExecutor.java
@@ -21,15 +21,12 @@ package org.apache.iotdb.db.query.executor;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.query.QueryProcessException;
import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.query.control.QueryResourceManager;
-import org.apache.iotdb.db.query.dataset.SingleDataSet;
import org.apache.iotdb.db.query.executor.fill.IFill;
import org.apache.iotdb.db.query.executor.fill.LinearFill;
import org.apache.iotdb.db.query.executor.fill.PreviousFill;
@@ -39,12 +36,10 @@ import org.apache.iotdb.db.query.reader.series.SeriesRawDataBatchReader;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.read.TimeValuePair;
import org.apache.iotdb.tsfile.read.common.BatchData;
-import org.apache.iotdb.tsfile.read.common.RowRecord;
import org.apache.iotdb.tsfile.read.filter.TimeFilter;
import org.apache.iotdb.tsfile.read.filter.basic.Filter;
import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory;
import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
-import org.apache.iotdb.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -87,69 +82,7 @@ public class FillQueryExecutor {
*/
public QueryDataSet execute(QueryContext context)
throws StorageEngineException, QueryProcessException, IOException {
- RowRecord record = new RowRecord(queryTime);
-
- Filter timeFilter = initFillExecutorsAndContructTimeFilter(context);
-
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(selectedSeries);
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource Cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(processorToSeriesMap, context, timeFilter);
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
-
- List<TimeValuePair> timeValuePairs = getTimeValuePairs(context);
- for (int i = 0; i < selectedSeries.size(); i++) {
- TSDataType dataType = dataTypes.get(i);
-
- if (timeValuePairs.get(i) != null) {
- // No need to fill
- record.addField(timeValuePairs.get(i).getValue().getValue(), dataType);
- continue;
- }
-
- IFill fill = fillExecutors[i];
-
- if (fill instanceof LinearFill
- && (dataType == TSDataType.VECTOR
- || dataType == TSDataType.BOOLEAN
- || dataType == TSDataType.TEXT)) {
- record.addField(null);
- logger.info("Linear fill doesn't support the " + i + "-th column in SQL.");
- continue;
- }
-
- TimeValuePair timeValuePair;
- try {
- timeValuePair = fill.getFillResult();
- if (timeValuePair == null && fill instanceof ValueFill) {
- timeValuePair = ((ValueFill) fill).getSpecifiedFillResult(dataType);
- }
- } catch (QueryProcessException | NumberFormatException ignored) {
- record.addField(null);
- logger.info("Value fill doesn't support the " + i + "-th column in SQL.");
- continue;
- }
- if (timeValuePair == null || timeValuePair.getValue() == null) {
- record.addField(null);
- } else {
- record.addField(timeValuePair.getValue().getValue(), dataType);
- }
- }
-
- SingleDataSet dataSet = new SingleDataSet(selectedSeries, dataTypes);
- dataSet.setRecord(record);
- return dataSet;
+ return null;
}
private Filter initFillExecutorsAndContructTimeFilter(QueryContext context)
diff --git a/server/src/main/java/org/apache/iotdb/db/query/executor/LastQueryExecutor.java b/server/src/main/java/org/apache/iotdb/db/query/executor/LastQueryExecutor.java
index c602c30194..025986c820 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/executor/LastQueryExecutor.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/executor/LastQueryExecutor.java
@@ -22,18 +22,12 @@ package org.apache.iotdb.db.query.executor;
import org.apache.iotdb.commons.path.MeasurementPath;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.metadata.utils.ResourceByPathUtils;
import org.apache.iotdb.db.qp.physical.crud.LastQueryPlan;
import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan;
import org.apache.iotdb.db.query.context.QueryContext;
-import org.apache.iotdb.db.query.control.QueryResourceManager;
import org.apache.iotdb.db.query.dataset.ListDataSet;
-import org.apache.iotdb.db.query.executor.fill.LastPointReader;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.read.TimeValuePair;
import org.apache.iotdb.tsfile.read.common.Field;
@@ -45,7 +39,6 @@ import org.apache.iotdb.tsfile.read.filter.operator.Gt;
import org.apache.iotdb.tsfile.read.filter.operator.GtEq;
import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
import org.apache.iotdb.tsfile.utils.Binary;
-import org.apache.iotdb.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -53,7 +46,6 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -256,47 +248,7 @@ public class LastQueryExecutor {
QueryContext context,
Map<String, Set<String>> deviceMeasurementsMap)
throws StorageEngineException, QueryProcessException, IOException {
- // Acquire query resources for the rest series paths
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(seriesPaths);
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource Cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(processorToSeriesMap, context, filter);
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
-
- List<LastPointReader> readers = new ArrayList<>();
- for (int i = 0; i < seriesPaths.size(); i++) {
- QueryDataSource dataSource =
- QueryResourceManager.getInstance()
- .getQueryDataSource(seriesPaths.get(i), context, filter, ascending);
- LastPointReader lastReader =
- ResourceByPathUtils.getResourceInstance(seriesPaths.get(i))
- .createLastPointReader(
- dataTypes.get(i),
- deviceMeasurementsMap.getOrDefault(
- seriesPaths.get(i).getDevice(), new HashSet<>()),
- context,
- dataSource,
- Long.MAX_VALUE,
- filter);
- readers.add(lastReader);
- }
-
- List<TimeValuePair> lastPairs = new ArrayList<>(seriesPaths.size());
- for (LastPointReader reader : readers) {
- lastPairs.add(reader.readLastPoint());
- }
- return lastPairs;
+ return null;
}
private interface LastCacheAccessor {
diff --git a/server/src/main/java/org/apache/iotdb/db/query/executor/RawDataQueryExecutor.java b/server/src/main/java/org/apache/iotdb/db/query/executor/RawDataQueryExecutor.java
index 45ef501754..404d59d7b3 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/executor/RawDataQueryExecutor.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/executor/RawDataQueryExecutor.java
@@ -18,12 +18,8 @@
*/
package org.apache.iotdb.db.query.executor;
-import org.apache.iotdb.commons.path.AlignedPath;
import org.apache.iotdb.commons.path.MeasurementPath;
import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.query.QueryProcessException;
import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan;
@@ -34,11 +30,9 @@ import org.apache.iotdb.db.query.dataset.RawQueryDataSetWithValueFilter;
import org.apache.iotdb.db.query.dataset.RawQueryDataSetWithoutValueFilter;
import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp;
import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader;
-import org.apache.iotdb.db.query.reader.series.SeriesRawDataBatchReader;
import org.apache.iotdb.db.query.reader.series.SeriesReaderByTimestamp;
import org.apache.iotdb.db.query.timegenerator.ServerTimeGenerator;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression;
import org.apache.iotdb.tsfile.read.filter.basic.Filter;
import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
@@ -49,10 +43,7 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
@@ -105,58 +96,7 @@ public class RawDataQueryExecutor {
protected List<ManagedSeriesReader> initManagedSeriesReader(QueryContext context)
throws StorageEngineException, QueryProcessException {
- Filter timeFilter = null;
- if (queryPlan.getExpression() != null) {
- timeFilter = ((GlobalTimeExpression) queryPlan.getExpression()).getFilter();
- }
-
- List<ManagedSeriesReader> readersOfSelectedSeries = new ArrayList<>();
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(queryPlan.getDeduplicatedPaths());
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(processorToSeriesMap, context, timeFilter);
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
-
- try {
- List<PartialPath> paths = queryPlan.getDeduplicatedPaths();
- for (PartialPath path : paths) {
- TSDataType dataType = path.getSeriesType();
-
- QueryDataSource queryDataSource =
- QueryResourceManager.getInstance()
- .getQueryDataSource(path, context, timeFilter, queryPlan.isAscending());
- timeFilter = queryDataSource.updateFilterUsingTTL(timeFilter);
-
- ManagedSeriesReader reader =
- new SeriesRawDataBatchReader(
- path,
- queryPlan.getAllMeasurementsInDevice(path.getDevice()),
- dataType,
- context,
- queryDataSource,
- timeFilter,
- null,
- null,
- queryPlan.isAscending());
- readersOfSelectedSeries.add(reader);
- }
- } catch (Exception e) {
- logger.error("Meet error when init series reader ", e);
- throw new QueryProcessException("Meet error when init series reader .", e);
- }
-
- return readersOfSelectedSeries;
+ return null;
}
/**
@@ -209,81 +149,7 @@ public class RawDataQueryExecutor {
protected Pair<List<IReaderByTimestamp>, List<List<Integer>>> initSeriesReaderByTimestamp(
QueryContext context, RawDataQueryPlan queryPlan, List<Boolean> cached, Filter timeFilter)
throws QueryProcessException, StorageEngineException {
- List<IReaderByTimestamp> readersOfSelectedSeries = new ArrayList<>();
-
- List<PartialPath> pathList = new ArrayList<>();
- List<PartialPath> notCachedPathList = new ArrayList<>();
-
- // reader index -> deduplicated path index
- List<List<Integer>> readerToIndexList = new ArrayList<>();
- // fullPath -> reader index
- Map<String, Integer> fullPathToReaderIndexMap = new HashMap<>();
- List<PartialPath> deduplicatedPaths = queryPlan.getDeduplicatedPaths();
- int index = 0;
- for (int i = 0; i < cached.size(); i++) {
- if (cached.get(i)) {
- pathList.add(deduplicatedPaths.get(i));
- readerToIndexList.add(Collections.singletonList(i));
- cached.set(index++, Boolean.TRUE);
- } else {
- notCachedPathList.add(deduplicatedPaths.get(i));
- // For aligned Path, it's deviceID; for nonAligned path, it's full path
- String fullPath = deduplicatedPaths.get(i).getFullPath();
- Integer readerIndex = fullPathToReaderIndexMap.get(fullPath);
-
- // it's another sub sensor in aligned device, we just add it to the previous AlignedPath
- if (readerIndex != null) {
- AlignedPath anotherSubSensor = (AlignedPath) deduplicatedPaths.get(i);
- ((AlignedPath) pathList.get(readerIndex)).mergeAlignedPath(anotherSubSensor);
- readerToIndexList.get(readerIndex).add(i);
- } else {
- pathList.add(deduplicatedPaths.get(i));
- fullPathToReaderIndexMap.put(fullPath, index);
- List<Integer> indexList = new ArrayList<>();
- indexList.add(i);
- readerToIndexList.add(indexList);
- cached.set(index++, Boolean.FALSE);
- }
- }
- }
-
- queryPlan.setDeduplicatedPaths(pathList);
- int previousSize = cached.size();
- if (previousSize > pathList.size()) {
- cached.subList(pathList.size(), previousSize).clear();
- }
-
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(notCachedPathList);
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource Cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(processorToSeriesMap, context, timeFilter);
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
- for (int i = 0; i < queryPlan.getDeduplicatedPaths().size(); i++) {
- if (cached.get(i)) {
- readersOfSelectedSeries.add(null);
- continue;
- }
- PartialPath path = queryPlan.getDeduplicatedPaths().get(i);
- IReaderByTimestamp seriesReaderByTimestamp =
- getReaderByTimestamp(
- path,
- queryPlan.getAllMeasurementsInDevice(path.getDevice()),
- queryPlan.getDeduplicatedDataTypes().get(i),
- context);
- readersOfSelectedSeries.add(seriesReaderByTimestamp);
- }
- return new Pair<>(readersOfSelectedSeries, readerToIndexList);
+ return null;
}
protected IReaderByTimestamp getReaderByTimestamp(
diff --git a/server/src/main/java/org/apache/iotdb/db/query/timegenerator/ServerTimeGenerator.java b/server/src/main/java/org/apache/iotdb/db/query/timegenerator/ServerTimeGenerator.java
index 33a577cdcf..c4c2c016b2 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/timegenerator/ServerTimeGenerator.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/timegenerator/ServerTimeGenerator.java
@@ -20,9 +20,7 @@ package org.apache.iotdb.db.query.timegenerator;
import org.apache.iotdb.commons.path.MeasurementPath;
import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.query.QueryProcessException;
import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan;
@@ -41,15 +39,12 @@ import org.apache.iotdb.tsfile.read.filter.factory.FilterType;
import org.apache.iotdb.tsfile.read.filter.operator.AndFilter;
import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator;
import org.apache.iotdb.tsfile.read.reader.IBatchReader;
-import org.apache.iotdb.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.List;
-import java.util.Map;
/**
* A timestamp generator for query with filter. e.g. For query clause "select s1, s2 from root where
@@ -81,28 +76,7 @@ public class ServerTimeGenerator extends TimeGenerator {
}
public void serverConstructNode(IExpression expression)
- throws IOException, StorageEngineException, QueryProcessException {
- List<PartialPath> pathList = new ArrayList<>();
- timeFilter = getPathListAndConstructTimeFilterFromExpression(expression, pathList);
-
- Pair<List<DataRegion>, Map<DataRegion, List<PartialPath>>> lockListAndProcessorToSeriesMapPair =
- StorageEngine.getInstance().mergeLock(pathList);
- List<DataRegion> lockList = lockListAndProcessorToSeriesMapPair.left;
- Map<DataRegion, List<PartialPath>> processorToSeriesMap =
- lockListAndProcessorToSeriesMapPair.right;
-
- try {
- // init QueryDataSource Cache
- QueryResourceManager.getInstance()
- .initQueryDataSourceCache(processorToSeriesMap, context, timeFilter);
- } catch (Exception e) {
- logger.error("Meet error when init QueryDataSource ", e);
- throw new QueryProcessException("Meet error when init QueryDataSource.", e);
- } finally {
- StorageEngine.getInstance().mergeUnLock(lockList);
- }
- operatorNode = construct(expression);
- }
+ throws IOException, StorageEngineException, QueryProcessException {}
/**
* collect PartialPath from Expression and transform MeasurementPath whose isUnderAlignedEntity is
diff --git a/server/src/main/java/org/apache/iotdb/db/service/DataNode.java b/server/src/main/java/org/apache/iotdb/db/service/DataNode.java
index 5bb1a9b474..31e4f69efa 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/DataNode.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/DataNode.java
@@ -53,7 +53,7 @@ import org.apache.iotdb.db.conf.IoTDBStartCheck;
import org.apache.iotdb.db.conf.rest.IoTDBRestServiceDescriptor;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
import org.apache.iotdb.db.consensus.SchemaRegionConsensusImpl;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.cache.CacheHitRatioMonitor;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
import org.apache.iotdb.db.engine.flush.FlushManager;
@@ -227,7 +227,7 @@ public class DataNode implements DataNodeMBean {
getTriggerInformationList(dataNodeRegisterResp.getAllTriggerInformation());
// store ttl information
- StorageEngineV2.getInstance().updateTTLInfo(dataNodeRegisterResp.getAllTTLInformation());
+ StorageEngine.getInstance().updateTTLInfo(dataNodeRegisterResp.getAllTTLInformation());
if (dataNodeRegisterResp.getStatus().getCode()
== TSStatusCode.SUCCESS_STATUS.getStatusCode()
@@ -355,7 +355,7 @@ public class DataNode implements DataNodeMBean {
registerManager.register(WALManager.getInstance());
// in mpp mode we need to start some other services
- registerManager.register(StorageEngineV2.getInstance());
+ registerManager.register(StorageEngine.getInstance());
registerManager.register(MPPDataExchangeService.getInstance());
registerManager.register(DriverScheduler.getInstance());
@@ -364,7 +364,7 @@ public class DataNode implements DataNodeMBean {
logger.info(
"IoTDB DataNode is setting up, some databases may not be ready now, please wait several seconds...");
- while (!StorageEngineV2.getInstance().isAllSgReady()) {
+ while (!StorageEngine.getInstance().isAllSgReady()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
@@ -379,9 +379,6 @@ public class DataNode implements DataNodeMBean {
registerManager.register(SyncService.getInstance());
registerManager.register(UpgradeSevice.getINSTANCE());
- // in mpp mode we temporarily don't start settle service because it uses StorageEngine directly
- // in itself, but currently we need to use StorageEngineV2 instead of StorageEngine in mpp mode.
- // registerManager.register(SettleService.getINSTANCE());
// start region migrate service
registerManager.register(RegionMigrateService.getInstance());
diff --git a/server/src/main/java/org/apache/iotdb/db/service/IoTDB.java b/server/src/main/java/org/apache/iotdb/db/service/IoTDB.java
index cd895c0ff1..5124f8a316 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/IoTDB.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/IoTDB.java
@@ -31,7 +31,6 @@ import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.IoTDBStartCheck;
import org.apache.iotdb.db.conf.rest.IoTDBRestServiceCheck;
-import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.cache.CacheHitRatioMonitor;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
import org.apache.iotdb.db.engine.flush.FlushManager;
@@ -150,7 +149,7 @@ public class IoTDB implements IoTDBMBean {
registerManager.register(SyncService.getInstance());
registerManager.register(WALManager.getInstance());
- registerManager.register(StorageEngine.getInstance());
+ // registerManager.register(StorageEngine.getInstance());
registerManager.register(TemporaryQueryDataFileService.getInstance());
registerManager.register(
diff --git a/server/src/main/java/org/apache/iotdb/db/service/IoTDBShutdownHook.java b/server/src/main/java/org/apache/iotdb/db/service/IoTDBShutdownHook.java
index ef83211603..d73cdfe194 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/IoTDBShutdownHook.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/IoTDBShutdownHook.java
@@ -22,7 +22,7 @@ import org.apache.iotdb.commons.conf.CommonDescriptor;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.directories.DirectoryChecker;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.metadata.schemaregion.SchemaEngineMode;
import org.apache.iotdb.db.utils.MemUtils;
import org.apache.iotdb.db.wal.WALManager;
@@ -49,7 +49,7 @@ public class IoTDBShutdownHook extends Thread {
// flush data to Tsfile and remove WAL log files
if (!IoTDBDescriptor.getInstance().getConfig().isClusterMode()) {
- StorageEngineV2.getInstance().syncCloseAllProcessor();
+ StorageEngine.getInstance().syncCloseAllProcessor();
}
WALManager.getInstance().deleteOutdatedWALFiles();
diff --git a/server/src/main/java/org/apache/iotdb/db/service/NewIoTDB.java b/server/src/main/java/org/apache/iotdb/db/service/NewIoTDB.java
index 6abd0d6bd7..2e24a38916 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/NewIoTDB.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/NewIoTDB.java
@@ -33,7 +33,7 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.IoTDBStartCheck;
import org.apache.iotdb.db.conf.rest.IoTDBRestServiceCheck;
import org.apache.iotdb.db.conf.rest.IoTDBRestServiceDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.cache.CacheHitRatioMonitor;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
import org.apache.iotdb.db.engine.flush.FlushManager;
@@ -138,7 +138,7 @@ public class NewIoTDB implements NewIoTDBMBean {
registerManager.register(SyncService.getInstance());
registerManager.register(WALManager.getInstance());
- registerManager.register(StorageEngineV2.getInstance());
+ registerManager.register(StorageEngine.getInstance());
if (!isTesting) {
registerManager.register(DriverScheduler.getInstance());
}
@@ -158,7 +158,7 @@ public class NewIoTDB implements NewIoTDBMBean {
logger.info(
"IoTDB is setting up, some databases may not be ready now, please wait several seconds...");
- while (!StorageEngineV2.getInstance().isAllSgReady()) {
+ while (!StorageEngine.getInstance().isAllSgReady()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
diff --git a/server/src/main/java/org/apache/iotdb/db/service/RegionMigrateService.java b/server/src/main/java/org/apache/iotdb/db/service/RegionMigrateService.java
index 81d404bc1b..327efc3787 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/RegionMigrateService.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/RegionMigrateService.java
@@ -36,7 +36,7 @@ import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse;
import org.apache.iotdb.db.client.ConfigNodeClient;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
import org.apache.iotdb.db.consensus.SchemaRegionConsensusImpl;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.metadata.schemaregion.SchemaEngine;
import org.apache.iotdb.db.rescon.AbstractPoolManager;
import org.apache.iotdb.mpp.rpc.thrift.TMaintainPeerReq;
@@ -436,7 +436,7 @@ public class RegionMigrateService implements IService {
ConsensusGroupId regionId = ConsensusGroupId.Factory.createFromTConsensusGroupId(tRegionId);
try {
if (regionId instanceof DataRegionId) {
- StorageEngineV2.getInstance().deleteDataRegion((DataRegionId) regionId);
+ StorageEngine.getInstance().deleteDataRegion((DataRegionId) regionId);
} else {
SchemaEngine.getInstance().deleteSchemaRegion((SchemaRegionId) regionId);
}
diff --git a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
index c60382ce76..d6301492f5 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
@@ -56,7 +56,7 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.OperationType;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
import org.apache.iotdb.db.consensus.SchemaRegionConsensusImpl;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.cache.BloomFilterCache;
import org.apache.iotdb.db.engine.cache.ChunkCache;
import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
@@ -221,7 +221,7 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface
private final ISchemaFetcher SCHEMA_FETCHER;
private final SchemaEngine schemaEngine = SchemaEngine.getInstance();
- private final StorageEngineV2 storageEngine = StorageEngineV2.getInstance();
+ private final StorageEngine storageEngine = StorageEngine.getInstance();
private final DataNodeRegionManager regionManager = DataNodeRegionManager.getInstance();
@@ -352,7 +352,7 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface
}
TSStatus resultStatus =
- StorageEngineV2.getInstance()
+ StorageEngine.getInstance()
.writeLoadTsFileNode((DataRegionId) groupId, pieceNode, req.uuid);
return createTLoadResp(resultStatus);
@@ -362,7 +362,7 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface
public TLoadResp sendLoadCommand(TLoadCommandReq req) throws TException {
TSStatus resultStatus =
- StorageEngineV2.getInstance()
+ StorageEngine.getInstance()
.executeLoadCommand(
LoadTsFileScheduler.LoadCommand.values()[req.commandType], req.uuid);
return createTLoadResp(resultStatus);
diff --git a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeRegionManager.java b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeRegionManager.java
index 0f892bf519..f08bdac736 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeRegionManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeRegionManager.java
@@ -34,7 +34,7 @@ import org.apache.iotdb.consensus.common.Peer;
import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
import org.apache.iotdb.db.consensus.SchemaRegionConsensusImpl;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.exception.DataRegionException;
import org.apache.iotdb.db.metadata.schemaregion.SchemaEngine;
import org.apache.iotdb.rpc.RpcUtils;
@@ -58,7 +58,7 @@ public class DataNodeRegionManager {
private static final Logger LOGGER = LoggerFactory.getLogger(DataNodeRegionManager.class);
private final SchemaEngine schemaEngine = SchemaEngine.getInstance();
- private final StorageEngineV2 storageEngine = StorageEngineV2.getInstance();
+ private final StorageEngine storageEngine = StorageEngine.getInstance();
private final Map<SchemaRegionId, ReentrantReadWriteLock> schemaRegionLockMap =
new ConcurrentHashMap<>();
diff --git a/server/src/main/java/org/apache/iotdb/db/sync/sender/pipe/TsFilePipe.java b/server/src/main/java/org/apache/iotdb/db/sync/sender/pipe/TsFilePipe.java
index 26852c4429..4241610473 100644
--- a/server/src/main/java/org/apache/iotdb/db/sync/sender/pipe/TsFilePipe.java
+++ b/server/src/main/java/org/apache/iotdb/db/sync/sender/pipe/TsFilePipe.java
@@ -28,7 +28,7 @@ import org.apache.iotdb.commons.sync.pipe.PipeStatus;
import org.apache.iotdb.commons.sync.pipe.TsFilePipeInfo;
import org.apache.iotdb.commons.sync.pipesink.PipeSink;
import org.apache.iotdb.commons.sync.utils.SyncPathUtil;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.modification.Deletion;
import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.sync.pipedata.DeletionPipeData;
@@ -152,7 +152,7 @@ public class TsFilePipe implements Pipe {
senderManager.checkConnection();
// init sync manager
- List<DataRegion> dataRegions = StorageEngineV2.getInstance().getAllDataRegions();
+ List<DataRegion> dataRegions = StorageEngine.getInstance().getAllDataRegions();
for (DataRegion dataRegion : dataRegions) {
logger.info(
logFormat(
@@ -313,7 +313,7 @@ public class TsFilePipe implements Pipe {
id -> {
registerDataRegion(id);
return new LocalSyncManager(
- StorageEngineV2.getInstance().getDataRegion(new DataRegionId(Integer.parseInt(id))),
+ StorageEngine.getInstance().getDataRegion(new DataRegionId(Integer.parseInt(id))),
this);
});
}
diff --git a/server/src/main/java/org/apache/iotdb/db/sync/transport/client/SyncClientFactory.java b/server/src/main/java/org/apache/iotdb/db/sync/transport/client/SyncClientFactory.java
index f8bd4fa0ec..fbf85be9a6 100644
--- a/server/src/main/java/org/apache/iotdb/db/sync/transport/client/SyncClientFactory.java
+++ b/server/src/main/java/org/apache/iotdb/db/sync/transport/client/SyncClientFactory.java
@@ -21,7 +21,7 @@ package org.apache.iotdb.db.sync.transport.client;
import org.apache.iotdb.commons.consensus.DataRegionId;
import org.apache.iotdb.commons.sync.pipesink.IoTDBPipeSink;
import org.apache.iotdb.commons.sync.pipesink.PipeSink;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.storagegroup.DataRegion;
import org.apache.iotdb.db.sync.sender.pipe.Pipe;
@@ -34,8 +34,7 @@ public class SyncClientFactory {
public static ISyncClient createSyncClient(Pipe pipe, PipeSink pipeSink, String dataRegionId) {
DataRegion dataRegion =
- StorageEngineV2.getInstance()
- .getDataRegion(new DataRegionId(Integer.parseInt(dataRegionId)));
+ StorageEngine.getInstance().getDataRegion(new DataRegionId(Integer.parseInt(dataRegionId)));
switch (pipeSink.getType()) {
case IoTDB:
IoTDBPipeSink ioTDBPipeSink = (IoTDBPipeSink) pipeSink;
diff --git a/server/src/main/java/org/apache/iotdb/db/tools/TsFileSplitByPartitionTool.java b/server/src/main/java/org/apache/iotdb/db/tools/TsFileSplitByPartitionTool.java
index a2f1762e1f..cee707e0ac 100644
--- a/server/src/main/java/org/apache/iotdb/db/tools/TsFileSplitByPartitionTool.java
+++ b/server/src/main/java/org/apache/iotdb/db/tools/TsFileSplitByPartitionTool.java
@@ -21,7 +21,7 @@ package org.apache.iotdb.db.tools;
import org.apache.iotdb.commons.exception.IllegalPathException;
import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.modification.Deletion;
import org.apache.iotdb.db.engine.modification.Modification;
import org.apache.iotdb.db.engine.modification.ModificationFile;
@@ -289,8 +289,8 @@ public class TsFileSplitByPartitionTool implements AutoCloseable {
}
}
}
- return StorageEngineV2.getTimePartition(pageHeader.getStartTime())
- != StorageEngineV2.getTimePartition(pageHeader.getEndTime());
+ return StorageEngine.getTimePartition(pageHeader.getStartTime())
+ != StorageEngine.getTimePartition(pageHeader.getEndTime());
}
/**
@@ -376,7 +376,7 @@ public class TsFileSplitByPartitionTool implements AutoCloseable {
ByteBuffer pageData,
Map<Long, ChunkWriterImpl> partitionChunkWriterMap)
throws PageException {
- long partitionId = StorageEngineV2.getTimePartition(pageHeader.getStartTime());
+ long partitionId = StorageEngine.getTimePartition(pageHeader.getStartTime());
getOrDefaultTsFileIOWriter(oldTsFile, partitionId);
ChunkWriterImpl chunkWriter =
partitionChunkWriterMap.computeIfAbsent(partitionId, v -> new ChunkWriterImpl(schema));
@@ -431,7 +431,7 @@ public class TsFileSplitByPartitionTool implements AutoCloseable {
while (batchData.hasCurrent()) {
long time = batchData.currentTime();
Object value = batchData.currentValue();
- long partitionId = StorageEngineV2.getTimePartition(time);
+ long partitionId = StorageEngine.getTimePartition(time);
ChunkWriterImpl chunkWriter =
partitionChunkWriterMap.computeIfAbsent(partitionId, v -> new ChunkWriterImpl(schema));
diff --git a/server/src/main/java/org/apache/iotdb/db/tools/upgrade/TsFileOnlineUpgradeTool.java b/server/src/main/java/org/apache/iotdb/db/tools/upgrade/TsFileOnlineUpgradeTool.java
index 99bf57b8af..976b5d178a 100644
--- a/server/src/main/java/org/apache/iotdb/db/tools/upgrade/TsFileOnlineUpgradeTool.java
+++ b/server/src/main/java/org/apache/iotdb/db/tools/upgrade/TsFileOnlineUpgradeTool.java
@@ -19,7 +19,7 @@
package org.apache.iotdb.db.tools.upgrade;
import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.tools.TsFileSplitByPartitionTool;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
@@ -250,8 +250,8 @@ public class TsFileOnlineUpgradeTool extends TsFileSplitByPartitionTool {
return dataType == TSDataType.BOOLEAN
|| dataType == TSDataType.TEXT
|| (dataType == TSDataType.INT32 && encoding == TSEncoding.PLAIN)
- || StorageEngineV2.getTimePartition(pageHeader.getStartTime())
- != StorageEngineV2.getTimePartition(pageHeader.getEndTime())
+ || StorageEngine.getTimePartition(pageHeader.getStartTime())
+ != StorageEngine.getTimePartition(pageHeader.getEndTime())
|| super.checkIfNeedToDecode(schema, deviceId, pageHeader, chunkHeaderOffset);
}
diff --git a/server/src/main/java/org/apache/iotdb/db/utils/ThreadUtils.java b/server/src/main/java/org/apache/iotdb/db/utils/ThreadUtils.java
index 1db7754d44..0f02044646 100644
--- a/server/src/main/java/org/apache/iotdb/db/utils/ThreadUtils.java
+++ b/server/src/main/java/org/apache/iotdb/db/utils/ThreadUtils.java
@@ -19,7 +19,6 @@
package org.apache.iotdb.db.utils;
import org.apache.iotdb.commons.concurrent.ThreadName;
-import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.exception.runtime.StorageEngineFailureException;
import org.slf4j.Logger;
@@ -31,7 +30,7 @@ import java.util.concurrent.TimeUnit;
/** the utils for managing thread or thread pool */
public class ThreadUtils {
- private static final Logger logger = LoggerFactory.getLogger(StorageEngine.class);
+ private static final Logger logger = LoggerFactory.getLogger(ThreadUtils.class);
public static void stopThreadPool(ExecutorService pool, ThreadName poolName) {
if (pool != null) {
diff --git a/server/src/main/java/org/apache/iotdb/db/wal/node/WALNode.java b/server/src/main/java/org/apache/iotdb/db/wal/node/WALNode.java
index 381ab2b30c..c91e59f4d2 100644
--- a/server/src/main/java/org/apache/iotdb/db/wal/node/WALNode.java
+++ b/server/src/main/java/org/apache/iotdb/db/wal/node/WALNode.java
@@ -27,7 +27,7 @@ import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest;
import org.apache.iotdb.consensus.common.request.IoTConsensusRequest;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.flush.FlushStatus;
import org.apache.iotdb.db.engine.memtable.IMemTable;
import org.apache.iotdb.db.engine.storagegroup.DataRegion;
@@ -337,7 +337,7 @@ public class WALNode implements IWALNode {
DataRegion dataRegion;
try {
dataRegion =
- StorageEngineV2.getInstance()
+ StorageEngine.getInstance()
.getDataRegion(new DataRegionId(TsFileUtils.getDataRegionId(oldestTsFile)));
} catch (Exception e) {
logger.error("Fail to get data region processor for {}", oldestTsFile, e);
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/StorageEngineV2Test.java b/server/src/test/java/org/apache/iotdb/db/engine/StorageEngineTest.java
similarity index 87%
rename from server/src/test/java/org/apache/iotdb/db/engine/StorageEngineV2Test.java
rename to server/src/test/java/org/apache/iotdb/db/engine/StorageEngineTest.java
index a53f186093..e47e910f93 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/StorageEngineV2Test.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/StorageEngineTest.java
@@ -37,18 +37,18 @@ import java.util.List;
@PowerMockIgnore({"com.sun.org.apache.xerces.*", "javax.xml.*", "org.xml.*", "javax.management.*"})
@RunWith(PowerMockRunner.class)
@PrepareForTest(DataRegion.class)
-public class StorageEngineV2Test {
+public class StorageEngineTest {
- private StorageEngineV2 storageEngineV2;
+ private StorageEngine storageEngine;
@Before
public void setUp() {
- storageEngineV2 = StorageEngineV2.getInstance();
+ storageEngine = StorageEngine.getInstance();
}
@After
public void after() {
- storageEngineV2 = null;
+ storageEngine = null;
}
@Test
@@ -57,11 +57,11 @@ public class StorageEngineV2Test {
DataRegion rg1 = PowerMockito.mock(DataRegion.class);
DataRegion rg2 = PowerMockito.mock(DataRegion.class);
DataRegionId id2 = new DataRegionId(2);
- storageEngineV2.setDataRegion(id1, rg1);
- storageEngineV2.setDataRegion(id2, rg2);
+ storageEngine.setDataRegion(id1, rg1);
+ storageEngine.setDataRegion(id2, rg2);
List<DataRegionId> actual = Lists.newArrayList(id1, id2);
- List<DataRegionId> expect = storageEngineV2.getAllDataRegionIds();
+ List<DataRegionId> expect = storageEngine.getAllDataRegionIds();
Assert.assertEquals(expect.size(), actual.size());
Assert.assertTrue(actual.containsAll(expect));
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/DataRegionTest.java b/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/DataRegionTest.java
index 11d421e8d8..7e11d6d7a2 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/DataRegionTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/DataRegionTest.java
@@ -30,7 +30,7 @@ import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.constant.TestConstant;
import org.apache.iotdb.db.engine.MetadataManagerHelper;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.compaction.CompactionTaskManager;
import org.apache.iotdb.db.engine.compaction.inner.InnerSpaceCompactionTask;
import org.apache.iotdb.db.engine.compaction.log.CompactionLogger;
@@ -90,7 +90,7 @@ public class DataRegionTest {
MetadataManagerHelper.initMetadata();
EnvironmentUtils.envSetUp();
dataRegion = new DummyDataRegion(systemDir, storageGroup);
- StorageEngineV2.getInstance().setDataRegion(new DataRegionId(0), dataRegion);
+ StorageEngine.getInstance().setDataRegion(new DataRegionId(0), dataRegion);
CompactionTaskManager.getInstance().start();
}
@@ -98,7 +98,7 @@ public class DataRegionTest {
public void tearDown() throws Exception {
if (dataRegion != null) {
dataRegion.syncDeleteDataFiles();
- StorageEngineV2.getInstance().deleteDataRegion(new DataRegionId(0));
+ StorageEngine.getInstance().deleteDataRegion(new DataRegionId(0));
}
EnvironmentUtils.cleanEnv();
EnvironmentUtils.cleanDir(TestConstant.OUTPUT_DATA_DIR);
@@ -821,7 +821,7 @@ public class DataRegionTest {
0);
CompactionTaskManager.getInstance().addTaskToWaitingQueue(task);
Thread.sleep(20);
- List<DataRegion> dataRegions = StorageEngineV2.getInstance().getAllDataRegions();
+ List<DataRegion> dataRegions = StorageEngine.getInstance().getAllDataRegions();
List<DataRegion> regionsToBeDeleted = new ArrayList<>();
for (DataRegion region : dataRegions) {
if (region.getStorageGroupName().equals(storageGroup)) {
@@ -829,7 +829,7 @@ public class DataRegionTest {
}
}
for (DataRegion region : regionsToBeDeleted) {
- StorageEngineV2.getInstance()
+ StorageEngine.getInstance()
.deleteDataRegion(new DataRegionId(Integer.parseInt(region.getDataRegionId())));
}
Thread.sleep(500);
@@ -870,7 +870,7 @@ public class DataRegionTest {
long preFLushInterval = config.getSeqMemtableFlushInterval();
config.setEnableTimedFlushSeqMemtable(true);
config.setSeqMemtableFlushInterval(5);
- StorageEngineV2.getInstance().rebootTimedService();
+ StorageEngine.getInstance().rebootTimedService();
Thread.sleep(500);
@@ -925,7 +925,7 @@ public class DataRegionTest {
long preFLushInterval = config.getUnseqMemtableFlushInterval();
config.setEnableTimedFlushUnseqMemtable(true);
config.setUnseqMemtableFlushInterval(5);
- StorageEngineV2.getInstance().rebootTimedService();
+ StorageEngine.getInstance().rebootTimedService();
Thread.sleep(500);
@@ -1009,7 +1009,7 @@ public class DataRegionTest {
}
}
- List<DataRegion> dataRegions = StorageEngineV2.getInstance().getAllDataRegions();
+ List<DataRegion> dataRegions = StorageEngine.getInstance().getAllDataRegions();
List<DataRegion> regionsToBeDeleted = new ArrayList<>();
for (DataRegion region : dataRegions) {
if (region.getStorageGroupName().equals(storageGroup)) {
@@ -1017,7 +1017,7 @@ public class DataRegionTest {
}
}
for (DataRegion region : regionsToBeDeleted) {
- StorageEngineV2.getInstance()
+ StorageEngine.getInstance()
.deleteDataRegion(new DataRegionId(Integer.parseInt(region.getDataRegionId())));
}
Thread.sleep(500);
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorV2Test.java b/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorTest.java
similarity index 99%
rename from server/src/test/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorV2Test.java
rename to server/src/test/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorTest.java
index 49b359af67..702aca7c7e 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorV2Test.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/storagegroup/TsFileProcessorTest.java
@@ -66,7 +66,7 @@ import static org.apache.iotdb.db.engine.storagegroup.DataRegionTest.buildInsert
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-public class TsFileProcessorV2Test {
+public class TsFileProcessorTest {
private TsFileProcessor processor;
private final String storageGroup = "root.vehicle";
@@ -79,9 +79,9 @@ public class TsFileProcessorV2Test {
private final Map<String, String> props = Collections.emptyMap();
private QueryContext context;
private final String systemDir = TestConstant.OUTPUT_DATA_DIR.concat("info");
- private static final Logger logger = LoggerFactory.getLogger(TsFileProcessorV2Test.class);
+ private static final Logger logger = LoggerFactory.getLogger(TsFileProcessorTest.class);
- public TsFileProcessorV2Test() {}
+ public TsFileProcessorTest() {}
@Before
public void setUp() throws DataRegionException {
diff --git a/server/src/test/java/org/apache/iotdb/db/mpp/plan/StandaloneCoordinatorTest.java b/server/src/test/java/org/apache/iotdb/db/mpp/plan/StandaloneCoordinatorTest.java
index abaff37ba7..060990cfcb 100644
--- a/server/src/test/java/org/apache/iotdb/db/mpp/plan/StandaloneCoordinatorTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/mpp/plan/StandaloneCoordinatorTest.java
@@ -23,7 +23,7 @@ import org.apache.iotdb.commons.exception.IllegalPathException;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.flush.FlushManager;
import org.apache.iotdb.db.localconfignode.LocalConfigNode;
import org.apache.iotdb.db.mpp.common.SessionInfo;
@@ -72,7 +72,7 @@ public class StandaloneCoordinatorTest {
configNode.init();
WALManager.getInstance().start();
FlushManager.getInstance().start();
- StorageEngineV2.getInstance().start();
+ StorageEngine.getInstance().start();
}
@After
@@ -81,7 +81,7 @@ public class StandaloneCoordinatorTest {
WALManager.getInstance().clear();
WALRecoverManager.getInstance().clear();
WALManager.getInstance().stop();
- StorageEngineV2.getInstance().stop();
+ StorageEngine.getInstance().stop();
FlushManager.getInstance().stop();
EnvironmentUtils.cleanAllDir();
conf.setDataNodeId(-1);
diff --git a/server/src/test/java/org/apache/iotdb/db/mpp/plan/scheduler/StandaloneSchedulerTest.java b/server/src/test/java/org/apache/iotdb/db/mpp/plan/scheduler/StandaloneSchedulerTest.java
index a3bdfc2da8..ad2d8a1e4c 100644
--- a/server/src/test/java/org/apache/iotdb/db/mpp/plan/scheduler/StandaloneSchedulerTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/mpp/plan/scheduler/StandaloneSchedulerTest.java
@@ -29,7 +29,7 @@ import org.apache.iotdb.commons.exception.MetadataException;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.flush.FlushManager;
import org.apache.iotdb.db.engine.storagegroup.DataRegionTest;
import org.apache.iotdb.db.exception.DataRegionException;
@@ -88,7 +88,7 @@ public class StandaloneSchedulerTest {
configNode.init();
WALManager.getInstance().start();
FlushManager.getInstance().start();
- StorageEngineV2.getInstance().start();
+ StorageEngine.getInstance().start();
LocalDataPartitionTable.DataRegionIdGenerator.getInstance().reset();
}
@@ -96,7 +96,7 @@ public class StandaloneSchedulerTest {
public void tearDown() throws Exception {
configNode.clear();
WALManager.getInstance().stop();
- StorageEngineV2.getInstance().stop();
+ StorageEngine.getInstance().stop();
FlushManager.getInstance().stop();
EnvironmentUtils.cleanAllDir();
conf.setDataNodeId(-1);
diff --git a/server/src/test/java/org/apache/iotdb/db/tools/TsFileAndModSettleToolTest.java b/server/src/test/java/org/apache/iotdb/db/tools/TsFileAndModSettleToolTest.java
index 44a9453a38..a647566ae4 100644
--- a/server/src/test/java/org/apache/iotdb/db/tools/TsFileAndModSettleToolTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/tools/TsFileAndModSettleToolTest.java
@@ -18,176 +18,203 @@
*/
package org.apache.iotdb.db.tools;
-// public class TsFileAndModSettleToolTest {
-// private final long newPartitionInterval = 3600_000;
-// protected final long maxTimestamp = 50000L; // 100000000L;
-// protected final String folder = "target" + File.separator + "settle";
-// protected final String STORAGE_GROUP = "root.sg_0";
-// protected final String DEVICE1 = STORAGE_GROUP + ".device_1";
-// protected final String DEVICE2 = STORAGE_GROUP + ".device_2";
-// protected final String SENSOR1 = "sensor_1";
-// protected final String SENSOR2 = "sensor_2";
-// private final long VALUE_OFFSET = 1;
-// private final Planner processor = new Planner();
-// private String path = null;
-// private IoTDBConfig config;
-// private long originPartitionInterval;
-//
-// @Before
-// public void setUp() {
-// EnvironmentUtils.envSetUp();
-//
-// config = IoTDBDescriptor.getInstance().getConfig();
-// originPartitionInterval = config.getTimePartitionIntervalForStorage();
-//
-// config.setTimePartitionIntervalForStorage(newPartitionInterval);
-//
-// StorageEngineV2.setPartitionIntervalForStorage(newPartitionInterval);
-//
-// File f = new File(folder);
-// if (!f.exists()) {
-// boolean success = f.mkdir();
-// Assert.assertTrue(success);
-// }
-// path = folder + File.separator + System.currentTimeMillis() + "-" + 0 + "-0.tsfile";
-// }
-//
-// @After
-// public void tearDown() {
-// File[] fileLists = FSFactoryProducer.getFSFactory().listFilesBySuffix(folder, TSFILE_SUFFIX);
-// for (File f : fileLists) {
-// if (f.exists()) {
-// boolean deleteSuccess = f.delete();
-// Assert.assertTrue(deleteSuccess);
-// }
-// }
-// config.setTimePartitionIntervalForStorage(originPartitionInterval);
-//
-// StorageEngine.setTimePartitionInterval(originPartitionInterval);
-//
-// File directory = new File(folder);
-// try {
-// FileUtils.deleteDirectory(directory);
-// } catch (IOException e) {
-// Assert.fail(e.getMessage());
-// }
-// try {
-// EnvironmentUtils.cleanEnv();
-// } catch (Exception e) {
-// Assert.fail(e.getMessage());
-// }
-// }
-//
-// @Test
-// public void settleTsFilesAndModsTest() { // offline settleTool test
-// try {
-// List<TsFileResource> resourcesToBeSettled = createFiles();
-// List<TsFileResource> settledResources = new ArrayList<>();
-// for (TsFileResource oldResource : resourcesToBeSettled) {
-// TsFileAndModSettleTool tsFileAndModSettleTool = TsFileAndModSettleTool.getInstance();
-// tsFileAndModSettleTool.settleOneTsFileAndMod(oldResource, settledResources);
-// }
-// } catch (Exception e) {
-// Assert.fail(e.getMessage());
-// }
-// }
-//
-// public List<TsFileResource> createFiles() throws IOException, InterruptedException {
-// List<TsFileResource> resourcesToBeSettled = new ArrayList<>();
-// HashMap<String, List<String>> deviceSensorsMap = new HashMap<>();
-// List<String> sensors = new ArrayList<>();
-//
-// // first File
-// sensors.add(SENSOR1);
-// deviceSensorsMap.put(DEVICE1, sensors);
-// String timeseriesPath = STORAGE_GROUP + DEVICE1 + SENSOR1;
-// createFile(resourcesToBeSettled, deviceSensorsMap, timeseriesPath);
-//
-// // second file
-// path = folder + File.separator + System.currentTimeMillis() + "-" + 0 + "-0.tsfile";
-// sensors.add(SENSOR2);
-// deviceSensorsMap.put(DEVICE1, sensors);
-// timeseriesPath = STORAGE_GROUP + DEVICE1 + SENSOR2;
-// createFile(resourcesToBeSettled, deviceSensorsMap, timeseriesPath);
-//
-// Thread.sleep(100);
-// // third file
-// path = folder + File.separator + System.currentTimeMillis() + "-" + 0 + "-0.tsfile";
-// createOneTsFile(deviceSensorsMap);
-// TsFileResource tsFileResource = new TsFileResource(new File(path));
-// tsFileResource.serialize();
-// tsFileResource.close();
-// resourcesToBeSettled.add(tsFileResource);
-//
-// return resourcesToBeSettled;
-// }
-//
-// private void createFile(
-// List<TsFileResource> resourcesToBeSettled,
-// HashMap<String, List<String>> deviceSensorsMap,
-// String timeseriesPath)
-// throws IOException {
-// createOneTsFile(deviceSensorsMap);
-// createlModificationFile(timeseriesPath);
-// TsFileResource tsFileResource = new TsFileResource(new File(path));
-// tsFileResource.setModFile(
-// new ModificationFile(tsFileResource.getTsFilePath() + ModificationFile.FILE_SUFFIX));
-// tsFileResource.serialize();
-// tsFileResource.close();
-// resourcesToBeSettled.add(tsFileResource);
-// }
-//
-// public void createlModificationFile(String timeseriesPath) {
-// String modFilePath = path + ModificationFile.FILE_SUFFIX;
-// ModificationFile modificationFile = new ModificationFile(modFilePath);
-// List<Modification> mods = new ArrayList<>();
-// try {
-// PartialPath partialPath = new PartialPath(timeseriesPath);
-// mods.add(new Deletion(partialPath, 10000000, 1500, 10000));
-// mods.add(new Deletion(partialPath, 10000000, 20000, 30000));
-// mods.add(new Deletion(partialPath, 10000000, 45000, 50000));
-// for (Modification mod : mods) {
-// modificationFile.write(mod);
-// }
-// modificationFile.close();
-// } catch (IllegalPathException | IOException e) {
-// Assert.fail(e.getMessage());
-// }
-// }
-//
-// protected void createOneTsFile(HashMap<String, List<String>> deviceSensorsMap) {
-// try {
-// File f = FSFactoryProducer.getFSFactory().getFile(path);
-// TsFileWriter tsFileWriter = new TsFileWriter(f);
-// // add measurements into file schema
-// try {
-// for (Map.Entry<String, List<String>> entry : deviceSensorsMap.entrySet()) {
-// String device = entry.getKey();
-// for (String sensor : entry.getValue()) {
-// tsFileWriter.registerTimeseries(
-// new Path(device), new MeasurementSchema(sensor, TSDataType.INT64,
-// TSEncoding.RLE));
-// }
-// }
-// } catch (WriteProcessException e) {
-// Assert.fail(e.getMessage());
-// }
-//
-// for (long timestamp = 0; timestamp < maxTimestamp; timestamp += 1000) {
-// for (Map.Entry<String, List<String>> entry : deviceSensorsMap.entrySet()) {
-// String device = entry.getKey();
-// TSRecord tsRecord = new TSRecord(timestamp, device);
-// for (String sensor : entry.getValue()) {
-// DataPoint dataPoint = new LongDataPoint(sensor, timestamp + VALUE_OFFSET);
-// tsRecord.addTuple(dataPoint);
-// }
-// tsFileWriter.write(tsRecord);
-// }
-// }
-// tsFileWriter.flushAllChunkGroups();
-// tsFileWriter.close();
-// } catch (Throwable e) {
-// Assert.fail(e.getMessage());
-// }
-// }
-// }
+import org.apache.iotdb.commons.exception.IllegalPathException;
+import org.apache.iotdb.commons.path.PartialPath;
+import org.apache.iotdb.commons.utils.FileUtils;
+import org.apache.iotdb.db.conf.IoTDBConfig;
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.modification.Deletion;
+import org.apache.iotdb.db.engine.modification.Modification;
+import org.apache.iotdb.db.engine.modification.ModificationFile;
+import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
+import org.apache.iotdb.db.qp.Planner;
+import org.apache.iotdb.db.tools.settle.TsFileAndModSettleTool;
+import org.apache.iotdb.db.utils.EnvironmentUtils;
+import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.write.TsFileWriter;
+import org.apache.iotdb.tsfile.write.record.TSRecord;
+import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
+import org.apache.iotdb.tsfile.write.record.datapoint.LongDataPoint;
+import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.TSFILE_SUFFIX;
+
+public class TsFileAndModSettleToolTest {
+ private final long newPartitionInterval = 3600_000;
+ protected final long maxTimestamp = 50000L; // 100000000L;
+ protected final String folder = "target" + File.separator + "settle";
+ protected final String STORAGE_GROUP = "root.sg_0";
+ protected final String DEVICE1 = STORAGE_GROUP + ".device_1";
+ protected final String DEVICE2 = STORAGE_GROUP + ".device_2";
+ protected final String SENSOR1 = "sensor_1";
+ protected final String SENSOR2 = "sensor_2";
+ private final long VALUE_OFFSET = 1;
+ private final Planner processor = new Planner();
+ private String path = null;
+ private IoTDBConfig config;
+ private long originPartitionInterval;
+
+ @Before
+ public void setUp() {
+ config = IoTDBDescriptor.getInstance().getConfig();
+ originPartitionInterval = config.getTimePartitionInterval();
+ config.setTimePartitionInterval(newPartitionInterval);
+ EnvironmentUtils.envSetUp();
+
+ File f = new File(folder);
+ if (!f.exists()) {
+ boolean success = f.mkdir();
+ Assert.assertTrue(success);
+ }
+ path = folder + File.separator + System.currentTimeMillis() + "-" + 0 + "-0.tsfile";
+ }
+
+ @After
+ public void tearDown() {
+ File[] fileLists = FSFactoryProducer.getFSFactory().listFilesBySuffix(folder, TSFILE_SUFFIX);
+ for (File f : fileLists) {
+ if (f.exists()) {
+ boolean deleteSuccess = f.delete();
+ Assert.assertTrue(deleteSuccess);
+ }
+ }
+
+ File directory = new File(folder);
+ FileUtils.deleteDirectory(directory);
+ try {
+ EnvironmentUtils.cleanEnv();
+ } catch (Exception e) {
+ Assert.fail(e.getMessage());
+ } finally {
+ config.setTimePartitionInterval(originPartitionInterval);
+ }
+ }
+
+ @Test
+ public void settleTsFilesAndModsTest() { // offline settleTool test
+ try {
+ List<TsFileResource> resourcesToBeSettled = createFiles();
+ List<TsFileResource> settledResources = new ArrayList<>();
+ for (TsFileResource oldResource : resourcesToBeSettled) {
+ TsFileAndModSettleTool tsFileAndModSettleTool = TsFileAndModSettleTool.getInstance();
+ tsFileAndModSettleTool.settleOneTsFileAndMod(oldResource, settledResources);
+ }
+ } catch (Exception e) {
+ Assert.fail(e.getMessage());
+ }
+ }
+
+ public List<TsFileResource> createFiles() throws IOException, InterruptedException {
+ List<TsFileResource> resourcesToBeSettled = new ArrayList<>();
+ HashMap<String, List<String>> deviceSensorsMap = new HashMap<>();
+ List<String> sensors = new ArrayList<>();
+
+ // first File
+ sensors.add(SENSOR1);
+ deviceSensorsMap.put(DEVICE1, sensors);
+ String timeseriesPath = STORAGE_GROUP + DEVICE1 + SENSOR1;
+ createFile(resourcesToBeSettled, deviceSensorsMap, timeseriesPath);
+
+ // second file
+ path = folder + File.separator + System.currentTimeMillis() + "-" + 0 + "-0.tsfile";
+ sensors.add(SENSOR2);
+ deviceSensorsMap.put(DEVICE1, sensors);
+ timeseriesPath = STORAGE_GROUP + DEVICE1 + SENSOR2;
+ createFile(resourcesToBeSettled, deviceSensorsMap, timeseriesPath);
+
+ Thread.sleep(100);
+ // third file
+ path = folder + File.separator + System.currentTimeMillis() + "-" + 0 + "-0.tsfile";
+ createOneTsFile(deviceSensorsMap);
+ TsFileResource tsFileResource = new TsFileResource(new File(path));
+ tsFileResource.serialize();
+ tsFileResource.close();
+ resourcesToBeSettled.add(tsFileResource);
+
+ return resourcesToBeSettled;
+ }
+
+ private void createFile(
+ List<TsFileResource> resourcesToBeSettled,
+ HashMap<String, List<String>> deviceSensorsMap,
+ String timeseriesPath)
+ throws IOException {
+ createOneTsFile(deviceSensorsMap);
+ createlModificationFile(timeseriesPath);
+ TsFileResource tsFileResource = new TsFileResource(new File(path));
+ tsFileResource.setModFile(
+ new ModificationFile(tsFileResource.getTsFilePath() + ModificationFile.FILE_SUFFIX));
+ tsFileResource.serialize();
+ tsFileResource.close();
+ resourcesToBeSettled.add(tsFileResource);
+ }
+
+ public void createlModificationFile(String timeseriesPath) {
+ String modFilePath = path + ModificationFile.FILE_SUFFIX;
+ ModificationFile modificationFile = new ModificationFile(modFilePath);
+ List<Modification> mods = new ArrayList<>();
+ try {
+ PartialPath partialPath = new PartialPath(timeseriesPath);
+ mods.add(new Deletion(partialPath, 10000000, 1500, 10000));
+ mods.add(new Deletion(partialPath, 10000000, 20000, 30000));
+ mods.add(new Deletion(partialPath, 10000000, 45000, 50000));
+ for (Modification mod : mods) {
+ modificationFile.write(mod);
+ }
+ modificationFile.close();
+ } catch (IllegalPathException | IOException e) {
+ Assert.fail(e.getMessage());
+ }
+ }
+
+ protected void createOneTsFile(HashMap<String, List<String>> deviceSensorsMap) {
+ try {
+ File f = FSFactoryProducer.getFSFactory().getFile(path);
+ TsFileWriter tsFileWriter = new TsFileWriter(f);
+ // add measurements into file schema
+ try {
+ for (Map.Entry<String, List<String>> entry : deviceSensorsMap.entrySet()) {
+ String device = entry.getKey();
+ for (String sensor : entry.getValue()) {
+ tsFileWriter.registerTimeseries(
+ new Path(device), new MeasurementSchema(sensor, TSDataType.INT64, TSEncoding.RLE));
+ }
+ }
+ } catch (WriteProcessException e) {
+ Assert.fail(e.getMessage());
+ }
+
+ for (long timestamp = 0; timestamp < maxTimestamp; timestamp += 1000) {
+ for (Map.Entry<String, List<String>> entry : deviceSensorsMap.entrySet()) {
+ String device = entry.getKey();
+ TSRecord tsRecord = new TSRecord(timestamp, device);
+ for (String sensor : entry.getValue()) {
+ DataPoint dataPoint = new LongDataPoint(sensor, timestamp + VALUE_OFFSET);
+ tsRecord.addTuple(dataPoint);
+ }
+ tsFileWriter.write(tsRecord);
+ }
+ }
+ tsFileWriter.flushAllChunkGroups();
+ tsFileWriter.close();
+ } catch (Throwable e) {
+ Assert.fail(e.getMessage());
+ }
+ }
+}
diff --git a/server/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java b/server/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
index 00a979691e..a71d5e81b3 100644
--- a/server/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
+++ b/server/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
@@ -28,7 +28,7 @@ import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.directories.DirectoryManager;
import org.apache.iotdb.db.constant.TestConstant;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.cache.BloomFilterCache;
import org.apache.iotdb.db.engine.cache.ChunkCache;
import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
@@ -141,7 +141,7 @@ public class EnvironmentUtils {
WALManager.getInstance().clear();
WALRecoverManager.getInstance().clear();
- StorageEngineV2.getInstance().stop();
+ StorageEngine.getInstance().stop();
CommonDescriptor.getInstance().getConfig().setNodeStatus(NodeStatus.Running);
// We must disable MQTT service as it will cost a lot of time to be shutdown, which may slow our
diff --git a/spark-iotdb-connector/src/test/scala/org/apache/iotdb/spark/db/EnvironmentUtils.java b/spark-iotdb-connector/src/test/scala/org/apache/iotdb/spark/db/EnvironmentUtils.java
index bdf607b028..7794821e81 100644
--- a/spark-iotdb-connector/src/test/scala/org/apache/iotdb/spark/db/EnvironmentUtils.java
+++ b/spark-iotdb-connector/src/test/scala/org/apache/iotdb/spark/db/EnvironmentUtils.java
@@ -29,7 +29,7 @@ import org.apache.iotdb.commons.exception.StartupException;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.conf.directories.DirectoryManager;
-import org.apache.iotdb.db.engine.StorageEngineV2;
+import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.cache.BloomFilterCache;
import org.apache.iotdb.db.engine.cache.ChunkCache;
import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
@@ -42,7 +42,6 @@ import org.apache.iotdb.db.service.IoTDB;
import org.apache.iotdb.commons.service.metric.MetricService;
import org.apache.iotdb.db.wal.WALManager;
import org.apache.iotdb.jdbc.Config;
-import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -111,7 +110,7 @@ public class EnvironmentUtils {
FileReaderManager.getInstance().closeAndRemoveAllOpenedReaders();
// clean database manager
- StorageEngineV2.getInstance().reset();
+ StorageEngine.getInstance().reset();
CommonDescriptor.getInstance().getConfig().setNodeStatus(NodeStatus.Running);
// clean wal
@@ -173,7 +172,7 @@ public class EnvironmentUtils {
} catch (AuthException e) {
throw new StartupException(e);
}
- StorageEngineV2.getInstance().reset();
+ StorageEngine.getInstance().reset();
WALManager.getInstance().start();
FlushManager.getInstance().start();
TEST_QUERY_JOB_ID = QueryResourceManager.getInstance().assignQueryId();