You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by le...@apache.org on 2022/10/11 11:35:31 UTC
[iotdb] branch research/M4-visualization updated: 25%
This is an automated email from the ASF dual-hosted git repository.
leirui pushed a commit to branch research/M4-visualization
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/research/M4-visualization by this push:
new 0cbceb9317 25%
0cbceb9317 is described below
commit 0cbceb93175eb2e64874c2bec70b8dde9f46e0b0
Author: Lei Rui <10...@qq.com>
AuthorDate: Tue Oct 11 19:35:39 2022 +0800
25%
---
.../dataset/groupby/LocalGroupByExecutor4CPV.java | 63 +--
.../apache/iotdb/db/integration/m4/MyTest1.java | 2 +-
.../apache/iotdb/db/integration/m4/MyTest2.java | 70 ++--
.../apache/iotdb/db/integration/m4/MyTest3.java | 14 +-
.../apache/iotdb/db/integration/m4/MyTest4.java | 2 +-
.../session/MySmallRealDataWriteQueryTest.java | 2 +-
.../encoding/decoder/DeltaBinaryDecoder.java | 72 +++-
.../iotdb/tsfile/read/reader/page/PageReader.java | 449 ++++++++++++++-------
8 files changed, 424 insertions(+), 250 deletions(-)
diff --git a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
index 59feabe4e1..b29a0b63d8 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/dataset/groupby/LocalGroupByExecutor4CPV.java
@@ -19,6 +19,14 @@
package org.apache.iotdb.db.query.dataset.groupby;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Set;
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.query.QueryProcessException;
@@ -51,15 +59,6 @@ import org.apache.iotdb.tsfile.read.reader.page.PageReader;
import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.utils.TsPrimitiveType;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Map;
-import java.util.Set;
-
/**
* Sql format: SELECT min_time(s0), max_time(s0), first_value(s0), last_value(s0), min_value(s0),
* max_value(s0) ROM root.xx group by ([tqs,tqe),IntervalLength). Requirements: (1) Don't change the
@@ -199,9 +198,9 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
/**
* @param curStartTime closed
- * @param curEndTime open
- * @param startTime closed
- * @param endTime open
+ * @param curEndTime open
+ * @param startTime closed
+ * @param endTime open
*/
@Override
public List<AggregateResult> calcResult(
@@ -226,7 +225,9 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
return results;
}
- /** 对BatchData应用deletes操作,获得更新的BatchData和statistics赋值到chunkSuit4CPV中 */
+ /**
+ * 对BatchData应用deletes操作,获得更新的BatchData和statistics赋值到chunkSuit4CPV中
+ */
private void updateBatchData(ChunkSuit4CPV chunkSuit4CPV, TSDataType dataType) {
if (chunkSuit4CPV.getBatchData() != null) {
BatchData batchData1 = BatchDataFactory.createBatchData(dataType, true, false);
@@ -323,8 +324,8 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
new Comparator<ChunkSuit4CPV>() { // TODO double check the sort order logic for version
public int compare(ChunkSuit4CPV o1, ChunkSuit4CPV o2) {
return new MergeReaderPriority(
- o2.getChunkMetadata().getVersion(),
- o2.getChunkMetadata().getOffsetOfChunkHeader())
+ o2.getChunkMetadata().getVersion(),
+ o2.getChunkMetadata().getOffsetOfChunkHeader())
.compareTo(
new MergeReaderPriority(
o1.getChunkMetadata().getVersion(),
@@ -411,7 +412,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
.get(4) // TODO check: minTimestamp, maxTimestamp, firstValue, lastValue,
// minValue[bottomTimestamp], maxValue[topTimestamp]
.updateResultUsingValues(
- new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
+ new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
// TODO check updateResult
return; // 计算结束
} else { // 是被overlap,则partial scan所有这些overlap的块
@@ -425,7 +426,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
List<ChunkSuit4CPV> tmpCurrentChunkList = new ArrayList<>();
for (IPageReader pageReader : pageReaderList) { // assume only one page in a chunk
isUpdate =
- ((PageReader) pageReader).partialScan(candidateTimestamp); // TODO check
+ ((PageReader) pageReader).partialScan4CPV(candidateTimestamp); // TODO check
}
} else {
// 对已经加载的batchData进行partial scan,直到点的时间戳大于或等于candidateTimestamp
@@ -455,7 +456,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
.get(4) // TODO check: minTimestamp, maxTimestamp, firstValue, lastValue,
// minValue[bottomTimestamp], maxValue[topTimestamp]
.updateResultUsingValues(
- new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
+ new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
// TODO check updateResult
return; // 计算结束
} else { // 找到这样的点,于是标记candidate point所在块为lazy
@@ -521,8 +522,8 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
new Comparator<ChunkSuit4CPV>() { // TODO double check the sort order logic for version
public int compare(ChunkSuit4CPV o1, ChunkSuit4CPV o2) {
return new MergeReaderPriority(
- o2.getChunkMetadata().getVersion(),
- o2.getChunkMetadata().getOffsetOfChunkHeader())
+ o2.getChunkMetadata().getVersion(),
+ o2.getChunkMetadata().getOffsetOfChunkHeader())
.compareTo(
new MergeReaderPriority(
o1.getChunkMetadata().getVersion(),
@@ -609,7 +610,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
.get(5) // TODO check: minTimestamp, maxTimestamp, firstValue, lastValue,
// minValue[bottomTimestamp], maxValue[topTimestamp]
.updateResultUsingValues(
- new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
+ new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
// TODO check updateResult
return; // 计算结束
} else { // 是被overlap,则partial scan所有这些overlap的块
@@ -623,7 +624,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
List<ChunkSuit4CPV> tmpCurrentChunkList = new ArrayList<>();
for (IPageReader pageReader : pageReaderList) { // assume only one page in a chunk
isUpdate =
- ((PageReader) pageReader).partialScan(candidateTimestamp); // TODO check
+ ((PageReader) pageReader).partialScan4CPV(candidateTimestamp); // TODO check
}
} else {
// 对已经加载的batchData进行partial scan,直到点的时间戳大于或等于candidateTimestamp
@@ -653,7 +654,7 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
.get(5) // TODO check: minTimestamp, maxTimestamp, firstValue, lastValue,
// minValue[bottomTimestamp], maxValue[topTimestamp]
.updateResultUsingValues(
- new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
+ new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
// TODO check updateResult
return; // 计算结束
} else { // 找到这样的点,于是标记candidate point所在块为lazy
@@ -701,8 +702,8 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
return res;
} else {
return new MergeReaderPriority(
- o2.getChunkMetadata().getVersion(),
- o2.getChunkMetadata().getOffsetOfChunkHeader())
+ o2.getChunkMetadata().getVersion(),
+ o2.getChunkMetadata().getOffsetOfChunkHeader())
.compareTo(
new MergeReaderPriority(
o1.getChunkMetadata().getVersion(),
@@ -767,11 +768,11 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
results
.get(0)
.updateResultUsingValues(
- new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
+ new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
results
.get(2)
.updateResultUsingValues(
- new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
+ new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
return;
}
}
@@ -798,8 +799,8 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
return res;
} else {
return new MergeReaderPriority(
- o2.getChunkMetadata().getVersion(),
- o2.getChunkMetadata().getOffsetOfChunkHeader())
+ o2.getChunkMetadata().getVersion(),
+ o2.getChunkMetadata().getOffsetOfChunkHeader())
.compareTo(
new MergeReaderPriority(
o1.getChunkMetadata().getVersion(),
@@ -864,11 +865,11 @@ public class LocalGroupByExecutor4CPV implements GroupByExecutor {
results
.get(1)
.updateResultUsingValues(
- new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
+ new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
results
.get(3)
.updateResultUsingValues(
- new long[] {candidateTimestamp}, 1, new Object[] {candidateValue});
+ new long[]{candidateTimestamp}, 1, new Object[]{candidateValue});
return;
}
}
diff --git a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest1.java b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest1.java
index a6a11054a5..b93c03f312 100644
--- a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest1.java
+++ b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest1.java
@@ -45,7 +45,7 @@ public class MyTest1 {
private static String[] creationSqls =
new String[] {
"SET STORAGE GROUP TO root.vehicle.d0",
- "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+ "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64",
};
private final String d0s0 = "root.vehicle.d0.s0";
diff --git a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest2.java b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest2.java
index cc9ef2ba6a..e935288743 100644
--- a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest2.java
+++ b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest2.java
@@ -42,9 +42,9 @@ public class MyTest2 {
private static final String TIMESTAMP_STR = "Time";
private static String[] creationSqls =
- new String[] {
- "SET STORAGE GROUP TO root.vehicle.d0",
- "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+ new String[]{
+ "SET STORAGE GROUP TO root.vehicle.d0",
+ "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64",
};
private final String d0s0 = "root.vehicle.d0.s0";
@@ -97,14 +97,14 @@ public class MyTest2 {
prepareData1();
String[] res =
- new String[] {
- "0,1,20,5,20,5[1],30[10]",
- "25,27,45,20,30,9[33],40[30]",
- "50,52,54,8,18,8[52],18[54]",
- "75,null,null,null,null,null,null"
+ new String[]{
+ "0,1,20,5,20,5[1],30[10]",
+ "25,27,45,20,30,9[33],40[30]",
+ "50,52,54,8,18,8[52],18[54]",
+ "75,null,null,null,null,null,null"
};
try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
boolean hasResultSet =
statement.execute(
@@ -144,8 +144,8 @@ public class MyTest2 {
// data:
// https://user-images.githubusercontent.com/33376433/152085323-321ecd70-1253-494f-81ab-fe227d1f5351.png
try (Connection connection =
- DriverManager.getConnection(
- Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection(
+ Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
for (String sql : creationSqls) {
@@ -184,14 +184,14 @@ public class MyTest2 {
prepareData2();
String[] res =
- new String[] {
- "0,1,20,5,5,5[1],5[1]",
- "25,30,40,5,5,5[30],5[30]",
- "50,55,72,5,5,5[65],5[65]",
- "75,80,90,5,5,5[80],5[80]"
+ new String[]{
+ "0,1,20,5,5,5[1],5[1]",
+ "25,30,40,5,5,5[30],5[30]",
+ "50,55,72,5,5,5[65],5[65]",
+ "75,80,90,5,5,5[80],5[80]"
};
try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
boolean hasResultSet =
statement.execute(
@@ -231,8 +231,8 @@ public class MyTest2 {
// data:
// https://user-images.githubusercontent.com/33376433/152085361-571f64dc-0c32-4f70-9481-bc30e4f6f78a.png
try (Connection connection =
- DriverManager.getConnection(
- Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection(
+ Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
for (String sql : creationSqls) {
@@ -271,14 +271,14 @@ public class MyTest2 {
prepareData3();
String[] res =
- new String[] {
- "0,1,20,5,15,5[1],15[2]",
- "25,30,40,5,15,5[30],15[40]",
- "50,55,72,5,15,5[65],15[66]",
- "75,80,90,5,15,5[80],15[82]"
+ new String[]{
+ "0,1,20,5,15,5[1],15[2]",
+ "25,30,40,5,15,5[30],15[40]",
+ "50,55,72,5,15,5[65],15[66]",
+ "75,80,90,5,15,5[80],15[82]"
};
try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
boolean hasResultSet =
statement.execute(
@@ -318,8 +318,8 @@ public class MyTest2 {
// data:
// https://user-images.githubusercontent.com/33376433/152085386-ebe57e83-cb49-49e8-b8f8-b80719547c42.png
try (Connection connection =
- DriverManager.getConnection(
- Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection(
+ Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
for (String sql : creationSqls) {
@@ -357,9 +357,9 @@ public class MyTest2 {
public void test4() {
prepareData4();
- String[] res = new String[] {"0,1,10,5,18,3[9],25[6]", "50,60,60,1,1,1[60],1[60]"};
+ String[] res = new String[]{"0,1,10,5,18,3[9],25[6]", "50,60,60,1,1,1[60],1[60]"};
try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
boolean hasResultSet =
statement.execute(
@@ -399,8 +399,8 @@ public class MyTest2 {
// data:
// https://user-images.githubusercontent.com/33376433/152088562-830e3272-749a-493a-83ca-1279e66ab145.png
try (Connection connection =
- DriverManager.getConnection(
- Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection(
+ Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
for (String sql : creationSqls) {
@@ -433,9 +433,9 @@ public class MyTest2 {
public void test5() {
prepareData5();
- String[] res = new String[] {"0,1,10,5,18,3[9],25[6]", "50,60,60,1,1,1[60],1[60]"};
+ String[] res = new String[]{"0,1,10,5,18,3[9],25[6]", "50,60,60,1,1,1[60],1[60]"};
try (Connection connection =
- DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
boolean hasResultSet =
statement.execute(
@@ -475,8 +475,8 @@ public class MyTest2 {
// data:
// https://user-images.githubusercontent.com/33376433/152088820-49351c49-9da2-43dd-8da1-2940ae81ae9d.png
try (Connection connection =
- DriverManager.getConnection(
- Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
+ DriverManager.getConnection(
+ Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root");
Statement statement = connection.createStatement()) {
for (String sql : creationSqls) {
diff --git a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest3.java b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest3.java
index 5bcce47a66..e587ec748f 100644
--- a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest3.java
+++ b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest3.java
@@ -45,7 +45,7 @@ public class MyTest3 {
private static String[] creationSqls =
new String[] {
"SET STORAGE GROUP TO root.vehicle.d0",
- "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=DOUBLE",
+ "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64",
};
private final String d0s0 = "root.vehicle.d0.s0";
@@ -84,9 +84,9 @@ public class MyTest3 {
String[] res =
new String[] {
- "0,1,20,5.0,20.0,5.0[1],30.0[10]",
- "25,25,45,8.0,30.0,8.0[25],40.0[30]",
- "50,52,54,8.0,18.0,8.0[52],18.0[54]",
+ "0,1,20,5,20,5[1],30[10]",
+ "25,25,45,8,30,8[25],40[30]",
+ "50,52,54,8,18,8[52],18[54]",
"75,null,null,null,null,null,null"
};
try (Connection connection =
@@ -174,9 +174,9 @@ public class MyTest3 {
String[] res =
new String[] {
- "0,FirstPoint=(1,5.0), LastPoint=(20,20.0), BottomPoint=(1,5.0), TopPoint=(10,30.0)",
- "25,FirstPoint=(25,8.0), LastPoint=(45,30.0), BottomPoint=(25,8.0), TopPoint=(30,40.0)",
- "50,FirstPoint=(52,8.0), LastPoint=(54,18.0), BottomPoint=(52,8.0), TopPoint=(54,18.0)",
+ "0,FirstPoint=(1,5), LastPoint=(20,20), BottomPoint=(1,5), TopPoint=(10,30)",
+ "25,FirstPoint=(25,8), LastPoint=(45,30), BottomPoint=(25,8), TopPoint=(30,40)",
+ "50,FirstPoint=(52,8), LastPoint=(54,18), BottomPoint=(52,8), TopPoint=(54,18)",
"75,empty"
};
try (Connection connection =
diff --git a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest4.java b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest4.java
index bafb22478b..6607eaf85b 100644
--- a/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest4.java
+++ b/server/src/test/java/org/apache/iotdb/db/integration/m4/MyTest4.java
@@ -45,7 +45,7 @@ public class MyTest4 {
private static String[] creationSqls =
new String[] {
"SET STORAGE GROUP TO root.vehicle.d0",
- "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE",
+ "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT64",
};
private final String d0s0 = "root.vehicle.d0.s0";
diff --git a/session/src/test/java/org/apache/iotdb/session/MySmallRealDataWriteQueryTest.java b/session/src/test/java/org/apache/iotdb/session/MySmallRealDataWriteQueryTest.java
index 6c177f146e..61ce07c88f 100644
--- a/session/src/test/java/org/apache/iotdb/session/MySmallRealDataWriteQueryTest.java
+++ b/session/src/test/java/org/apache/iotdb/session/MySmallRealDataWriteQueryTest.java
@@ -63,7 +63,7 @@ public class MySmallRealDataWriteQueryTest {
private static int valueIdx = 1; // 值idx,从0开始
private static int w = 3;
private static long range = total_time_length;
- private static boolean enableRegularityTimeDecode = true;
+ private static boolean enableRegularityTimeDecode = false;
private static long regularTimeInterval = 511996L;
// private static long regularTimeInterval = 511997L;
private static String approach = "cpv"; // 选择查询执行算法: 1: MAC, 2: MOC, 3: CPV
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DeltaBinaryDecoder.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DeltaBinaryDecoder.java
index ff8a15ec8c..cf80fdb01f 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DeltaBinaryDecoder.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/encoding/decoder/DeltaBinaryDecoder.java
@@ -19,23 +19,18 @@
package org.apache.iotdb.tsfile.encoding.decoder;
+import java.io.IOException;
+import java.nio.ByteBuffer;
import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
import org.apache.iotdb.tsfile.common.constant.TsFileConstant;
import org.apache.iotdb.tsfile.encoding.encoder.DeltaBinaryEncoder;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.utils.BytesUtils;
-import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-import java.util.Map;
-
/**
* This class is a decoder for decoding the byte array that encoded by {@code
- * DeltaBinaryEncoder}.DeltaBinaryDecoder just supports integer and long values.<br>
- * .
+ * DeltaBinaryEncoder}.DeltaBinaryDecoder just supports integer and long values.<br> .
*
* @see DeltaBinaryEncoder
*/
@@ -44,16 +39,24 @@ public abstract class DeltaBinaryDecoder extends Decoder {
protected long count = 0;
protected byte[] deltaBuf;
- /** the first value in one pack. */
+ /**
+ * the first value in one pack.
+ */
protected int readIntTotalCount = 0;
protected int nextReadIndex = 0;
- /** max bit length of all value in a pack. */
+ /**
+ * max bit length of all value in a pack.
+ */
protected int packWidth;
- /** data number in this pack. */
+ /**
+ * data number in this pack.
+ */
protected int packNum;
- /** how many bytes data takes after encoding. */
+ /**
+ * how many bytes data takes after encoding.
+ */
protected int encodingLength;
public DeltaBinaryDecoder() {
@@ -86,7 +89,9 @@ public abstract class DeltaBinaryDecoder extends Decoder {
private int firstValue;
private int[] data;
private int previous;
- /** minimum value for all difference. */
+ /**
+ * minimum value for all difference.
+ */
private int minDeltaBase;
public IntDeltaDecoder() {
@@ -170,17 +175,21 @@ public abstract class DeltaBinaryDecoder extends Decoder {
private long firstValue;
private long[] data;
private long previous;
- /** minimum value for all difference. */
+ /**
+ * minimum value for all difference.
+ */
private long minDeltaBase;
private boolean enableRegularityTimeDecode;
private long regularTimeInterval;
- private Map<Pair<Long, Integer>, byte[][]> allRegularBytes =
- new HashMap<>(); // <newRegularDelta,packWidth> -> (relativePos->bytes)
+// private Map<Pair<Long, Integer>, byte[][]> allRegularBytes =
+// new HashMap<>(); // <newRegularDelta,packWidth> -> (relativePos->bytes)
private int[][] allFallWithinMasks = new int[7][]; // packWidth(1~7) -> fallWithinMasks[]
+ private boolean isDataReady = false; // assuming only one pack in the buffer to be decoded
+
public LongDeltaDecoder() {
super();
this.enableRegularityTimeDecode =
@@ -189,6 +198,33 @@ public abstract class DeltaBinaryDecoder extends Decoder {
TSFileDescriptor.getInstance().getConfig().getRegularTimeInterval();
}
+ public long[] getDataArray4CPV(ByteBuffer buffer) {
+ // assuming only one pack in the buffer to be decoded
+ if (isDataReady) {
+ return data;
+ }
+ loadIntBatch(buffer);
+ isDataReady = true;
+ return data;
+ }
+
+// /**
+// * @return true if the point whose time equals candidateTimestamp exists, false if not
+// */
+// public boolean partialScan4CPV(long candidateTimestamp, ByteBuffer buffer) throws IOException {
+// long[] timeData = getDataArray4CPV(buffer);
+// for (long t : timeData) {
+// if (t > candidateTimestamp) {
+// return false; // not exist, return early
+// }
+// if (t == candidateTimestamp) {
+// return true; // exist
+// }
+// }
+// return false; // not exist
+// }
+
+
/**
* if there's no decoded data left, decode next pack into {@code data}.
*
@@ -209,7 +245,7 @@ public abstract class DeltaBinaryDecoder extends Decoder {
* @return long value
*/
protected long loadIntBatch(ByteBuffer buffer) {
- TsFileConstant.countLoadIntBatch++;
+// TsFileConstant.countLoadIntBatch++;
long start = System.nanoTime();
packNum = ReadWriteIOUtils.readInt(buffer);
@@ -225,7 +261,7 @@ public abstract class DeltaBinaryDecoder extends Decoder {
long newRegularDelta = regularTimeInterval - minDeltaBase;
if (packWidth == 0) {
// [CASE 1]
- encodingLength = ceil(packNum * packWidth);
+ encodingLength = ceil(0);
deltaBuf = new byte[encodingLength];
buffer.get(deltaBuf);
allocateDataArray();
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java
index 98a8763e18..d6629b40cb 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/reader/page/PageReader.java
@@ -18,14 +18,17 @@
*/
package org.apache.iotdb.tsfile.read.reader.page;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
import org.apache.iotdb.tsfile.encoding.decoder.Decoder;
-import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
+import org.apache.iotdb.tsfile.encoding.decoder.DeltaBinaryDecoder.LongDeltaDecoder;
import org.apache.iotdb.tsfile.file.header.PageHeader;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-import org.apache.iotdb.tsfile.file.metadata.statistics.DoubleStatistics;
-import org.apache.iotdb.tsfile.file.metadata.statistics.FloatStatistics;
-import org.apache.iotdb.tsfile.file.metadata.statistics.IntegerStatistics;
import org.apache.iotdb.tsfile.file.metadata.statistics.LongStatistics;
import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
import org.apache.iotdb.tsfile.read.common.BatchData;
@@ -35,37 +38,39 @@ import org.apache.iotdb.tsfile.read.common.TimeRange;
import org.apache.iotdb.tsfile.read.filter.basic.Filter;
import org.apache.iotdb.tsfile.read.filter.operator.AndFilter;
import org.apache.iotdb.tsfile.read.reader.IPageReader;
-import org.apache.iotdb.tsfile.utils.Binary;
import org.apache.iotdb.tsfile.utils.ReadWriteForEncodingUtils;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
public class PageReader implements IPageReader {
private PageHeader pageHeader;
protected TSDataType dataType;
- /** decoder for value column */
- protected Decoder valueDecoder;
+ /**
+ * decoder for value column
+ */
+ public Decoder valueDecoder;
- /** decoder for time column */
- protected Decoder timeDecoder;
+ /**
+ * decoder for time column
+ */
+ public Decoder timeDecoder;
- /** time column in memory */
- protected ByteBuffer timeBuffer;
+ /**
+ * time column in memory
+ */
+ public ByteBuffer timeBuffer;
- /** value column in memory */
- protected ByteBuffer valueBuffer;
+ /**
+ * value column in memory
+ */
+ public ByteBuffer valueBuffer;
protected Filter filter;
- /** A list of deleted intervals. */
+ /**
+ * A list of deleted intervals.
+ */
private List<TimeRange> deleteIntervalList;
private int deleteCursor = 0;
@@ -109,6 +114,148 @@ public class PageReader implements IPageReader {
valueBuffer.position(timeBufferLength);
}
+// public void split4CPV(
+// long startTime,
+// long endTime,
+// long interval,
+// long curStartTime,
+// List<ChunkSuit4CPV> currentChunkList,
+// Map<Integer, List<ChunkSuit4CPV>> splitChunkList,
+// ChunkMetadata chunkMetadata)
+// throws IOException { // note: [startTime,endTime), [curStartTime,curEndTime)
+// Map<Integer, BatchData> splitBatchDataMap = new HashMap<>();
+// Map<Integer, ChunkMetadata> splitChunkMetadataMap = new HashMap<>();
+// while (timeDecoder.hasNext(timeBuffer)) {
+// long timestamp = timeDecoder.readLong(timeBuffer);
+// // prepare corresponding batchData
+// if (timestamp < curStartTime) {
+// switch (dataType) {
+// case INT32:
+// valueDecoder.readInt(valueBuffer);
+// break;
+// case INT64:
+// valueDecoder.readLong(valueBuffer);
+// break;
+// case FLOAT:
+// valueDecoder.readFloat(valueBuffer);
+// break;
+// case DOUBLE:
+// valueDecoder.readDouble(valueBuffer);
+// break;
+// default:
+// throw new UnSupportedDataTypeException(String.valueOf(dataType));
+// }
+// continue;
+// }
+// if (timestamp >= endTime) {
+// break;
+// }
+// int idx = (int) Math.floor((timestamp - startTime) * 1.0 / interval);
+// if (!splitBatchDataMap.containsKey(idx)) {
+// // create batchData
+// BatchData batch1 = BatchDataFactory.createBatchData(dataType, true, false);
+// splitBatchDataMap.put(idx, batch1);
+// Statistics statistics = null;
+// switch (dataType) {
+// case INT32:
+// statistics = new IntegerStatistics();
+// break;
+// case INT64:
+// statistics = new LongStatistics();
+// break;
+// case FLOAT:
+// statistics = new FloatStatistics();
+// break;
+// case DOUBLE:
+// statistics = new DoubleStatistics();
+// break;
+// default:
+// break;
+// }
+// // create chunkMetaData
+// ChunkMetadata chunkMetadata1 =
+// new ChunkMetadata(
+// chunkMetadata.getMeasurementUid(),
+// chunkMetadata.getDataType(),
+// chunkMetadata.getOffsetOfChunkHeader(),
+// statistics);
+// chunkMetadata1.setVersion(chunkMetadata.getVersion()); // don't miss this
+//
+// // // important, used later for candidate point verification
+// // // (1) candidate point itself whether is in the deleted interval
+// // // (2) candidate point whether is overlapped by a chunk with a larger version
+// // number and
+// // // the chunk does not have a deleted interval overlapping this candidate point
+// // chunkMetadata1.setDeleteIntervalList(chunkMetadata.getDeleteIntervalList());
+// // // not use current Ii to modify deletedIntervalList any more
+//
+// splitChunkMetadataMap.put(idx, chunkMetadata1);
+// }
+// BatchData batchData1 = splitBatchDataMap.get(idx);
+// ChunkMetadata chunkMetadata1 = splitChunkMetadataMap.get(idx);
+// switch (dataType) {
+// case INT32:
+// int anInt = valueDecoder.readInt(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, anInt))) {
+// // update batchData1
+// batchData1.putInt(timestamp, anInt);
+// // update statistics of chunkMetadata1
+// chunkMetadata1.getStatistics().update(timestamp, anInt);
+// }
+// break;
+// case INT64:
+// long aLong = valueDecoder.readLong(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
+// // update batchData1
+// batchData1.putLong(timestamp, aLong);
+// // update statistics of chunkMetadata1
+// chunkMetadata1.getStatistics().update(timestamp, aLong);
+// }
+// break;
+// case FLOAT:
+// float aFloat = valueDecoder.readFloat(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aFloat))) {
+// // update batchData1
+// batchData1.putFloat(timestamp, aFloat);
+// // update statistics of chunkMetadata1
+// chunkMetadata1.getStatistics().update(timestamp, aFloat);
+// }
+// break;
+// case DOUBLE:
+// double aDouble = valueDecoder.readDouble(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aDouble))) {
+// // update batchData1
+// batchData1.putDouble(timestamp, aDouble);
+// // update statistics of chunkMetadata1
+// chunkMetadata1.getStatistics().update(timestamp, aDouble);
+// }
+// break;
+// default:
+// throw new UnSupportedDataTypeException(String.valueOf(dataType));
+// }
+// }
+//
+// int curIdx = (int) Math.floor((curStartTime - startTime) * 1.0 / interval);
+// for (Integer i : splitBatchDataMap.keySet()) {
+// if (!splitBatchDataMap.get(i).isEmpty()) {
+// if (i == curIdx) {
+// currentChunkList.add(
+// new ChunkSuit4CPV(splitChunkMetadataMap.get(i), splitBatchDataMap.get(i).flip()));
+// } else {
+// splitChunkList.computeIfAbsent(i, k -> new ArrayList<>());
+// splitChunkList
+// .get(i)
+// .add(
+// new ChunkSuit4CPV(splitChunkMetadataMap.get(i), splitBatchDataMap.get(i).flip()));
+// }
+// }
+// }
+// }
+
+ /**
+ * 负责当候选点因为M4 time span/删除/更新而失效而要去update的时候的update。 它会遍历这个page里的点,对取出来的点进行删除和过滤条件判断,并且按照M4 time
+ * spans拆分,变成落进相应的span里的batchData和chunkMetadata
+ */
public void split4CPV(
long startTime,
long endTime,
@@ -124,22 +271,7 @@ public class PageReader implements IPageReader {
long timestamp = timeDecoder.readLong(timeBuffer);
// prepare corresponding batchData
if (timestamp < curStartTime) {
- switch (dataType) {
- case INT32:
- valueDecoder.readInt(valueBuffer);
- break;
- case INT64:
- valueDecoder.readLong(valueBuffer);
- break;
- case FLOAT:
- valueDecoder.readFloat(valueBuffer);
- break;
- case DOUBLE:
- valueDecoder.readDouble(valueBuffer);
- break;
- default:
- throw new UnSupportedDataTypeException(String.valueOf(dataType));
- }
+ valueDecoder.readLong(valueBuffer); // hard-coded, assuming value is long data type
continue;
}
if (timestamp >= endTime) {
@@ -150,23 +282,7 @@ public class PageReader implements IPageReader {
// create batchData
BatchData batch1 = BatchDataFactory.createBatchData(dataType, true, false);
splitBatchDataMap.put(idx, batch1);
- Statistics statistics = null;
- switch (dataType) {
- case INT32:
- statistics = new IntegerStatistics();
- break;
- case INT64:
- statistics = new LongStatistics();
- break;
- case FLOAT:
- statistics = new FloatStatistics();
- break;
- case DOUBLE:
- statistics = new DoubleStatistics();
- break;
- default:
- break;
- }
+ Statistics statistics = new LongStatistics(); // hard-coded, assuming value is long data type
// create chunkMetaData
ChunkMetadata chunkMetadata1 =
new ChunkMetadata(
@@ -175,58 +291,17 @@ public class PageReader implements IPageReader {
chunkMetadata.getOffsetOfChunkHeader(),
statistics);
chunkMetadata1.setVersion(chunkMetadata.getVersion()); // don't miss this
-
- // // important, used later for candidate point verification
- // // (1) candidate point itself whether is in the deleted interval
- // // (2) candidate point whether is overlapped by a chunk with a larger version
- // number and
- // // the chunk does not have a deleted interval overlapping this candidate point
- // chunkMetadata1.setDeleteIntervalList(chunkMetadata.getDeleteIntervalList());
- // // not use current Ii to modify deletedIntervalList any more
-
splitChunkMetadataMap.put(idx, chunkMetadata1);
}
BatchData batchData1 = splitBatchDataMap.get(idx);
ChunkMetadata chunkMetadata1 = splitChunkMetadataMap.get(idx);
- switch (dataType) {
- case INT32:
- int anInt = valueDecoder.readInt(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, anInt))) {
- // update batchData1
- batchData1.putInt(timestamp, anInt);
- // update statistics of chunkMetadata1
- chunkMetadata1.getStatistics().update(timestamp, anInt);
- }
- break;
- case INT64:
- long aLong = valueDecoder.readLong(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
- // update batchData1
- batchData1.putLong(timestamp, aLong);
- // update statistics of chunkMetadata1
- chunkMetadata1.getStatistics().update(timestamp, aLong);
- }
- break;
- case FLOAT:
- float aFloat = valueDecoder.readFloat(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aFloat))) {
- // update batchData1
- batchData1.putFloat(timestamp, aFloat);
- // update statistics of chunkMetadata1
- chunkMetadata1.getStatistics().update(timestamp, aFloat);
- }
- break;
- case DOUBLE:
- double aDouble = valueDecoder.readDouble(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aDouble))) {
- // update batchData1
- batchData1.putDouble(timestamp, aDouble);
- // update statistics of chunkMetadata1
- chunkMetadata1.getStatistics().update(timestamp, aDouble);
- }
- break;
- default:
- throw new UnSupportedDataTypeException(String.valueOf(dataType));
+ long aLong = valueDecoder.readLong(
+ valueBuffer); // hard-coded, assuming value is long data type
+ if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
+ // update batchData1
+ batchData1.putLong(timestamp, aLong);
+ // update statistics of chunkMetadata1
+ chunkMetadata1.getStatistics().update(timestamp, aLong);
}
}
@@ -247,25 +322,41 @@ public class PageReader implements IPageReader {
}
}
- /**
- * chunk里点时间戳从小到大递增, 所以遍历直到点的时间戳大于或等于candidateTimestamp即可结束
- *
- * @return true if the point whose time equals candidateTimestamp exists, false if not
- */
- public boolean partialScan(long candidateTimestamp) throws IOException {
- while (timeDecoder.hasNext(timeBuffer)) {
- long timestamp = timeDecoder.readLong(timeBuffer);
- if (timestamp > candidateTimestamp) {
- return false;
+// /**
+// * chunk里点时间戳从小到大递增, 所以遍历直到点的时间戳大于或等于candidateTimestamp即可结束
+// *
+// * @return true if the point whose time equals candidateTimestamp exists, false if not
+// */
+// public boolean partialScan(long candidateTimestamp) throws IOException {
+// while (timeDecoder.hasNext(timeBuffer)) {
+// long timestamp = timeDecoder.readLong(timeBuffer);
+// if (timestamp > candidateTimestamp) {
+// return false;
+// }
+// if (timestamp == candidateTimestamp) {
+// return true;
+// }
+// }
+// return false;
+// }
+
+ public boolean partialScan4CPV(long candidateTimestamp) {
+// System.out.println("here here here");
+ long[] timeData = ((LongDeltaDecoder) timeDecoder).getDataArray4CPV(timeBuffer);
+ for (long t : timeData) {
+ if (t > candidateTimestamp) {
+ return false; // not exist, return early
}
- if (timestamp == candidateTimestamp) {
- return true;
+ if (t == candidateTimestamp) {
+ return true; // exist
}
}
- return false;
+ return false; // not exist
}
- /** @return the returned BatchData may be empty, but never be null */
+ /**
+ * @return the returned BatchData may be empty, but never be null
+ */
@SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
@Override
public BatchData getAllSatisfiedPageData(boolean ascending) throws IOException {
@@ -274,51 +365,97 @@ public class PageReader implements IPageReader {
while (timeDecoder.hasNext(timeBuffer)) { // TODO: timeDecoder.data
long timestamp = timeDecoder.readLong(timeBuffer);
- switch (dataType) {
- case BOOLEAN:
- boolean aBoolean = valueDecoder.readBoolean(valueBuffer);
- if (!isDeleted(timestamp)
- && (filter == null || filter.satisfy(timestamp, aBoolean))) { // TODO:remove
- pageData.putBoolean(timestamp, aBoolean);
- }
- break;
- case INT32:
- int anInt = valueDecoder.readInt(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, anInt))) {
- pageData.putInt(timestamp, anInt);
- }
- break;
- case INT64:
- long aLong = valueDecoder.readLong(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
- pageData.putLong(timestamp, aLong);
- }
- break;
- case FLOAT:
- float aFloat = valueDecoder.readFloat(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aFloat))) {
- pageData.putFloat(timestamp, aFloat);
- }
- break;
- case DOUBLE:
- double aDouble = valueDecoder.readDouble(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aDouble))) {
- pageData.putDouble(timestamp, aDouble);
- }
- break;
- case TEXT:
- Binary aBinary = valueDecoder.readBinary(valueBuffer);
- if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aBinary))) {
- pageData.putBinary(timestamp, aBinary);
- }
- break;
- default:
- throw new UnSupportedDataTypeException(String.valueOf(dataType));
+ long aLong = valueDecoder
+ .readLong(valueBuffer); // hard-coded, assuming value is long data type
+ if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
+ pageData.putLong(timestamp, aLong);
}
+
+// switch (dataType) {
+// case BOOLEAN:
+// boolean aBoolean = valueDecoder.readBoolean(valueBuffer);
+// if (!isDeleted(timestamp)
+// && (filter == null || filter.satisfy(timestamp, aBoolean))) { // TODO:remove
+// pageData.putBoolean(timestamp, aBoolean);
+// }
+// break;
+// case INT32:
+// int anInt = valueDecoder.readInt(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, anInt))) {
+// pageData.putInt(timestamp, anInt);
+// }
+// break;
+// case INT64:
+// long aLong = valueDecoder.readLong(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aLong))) {
+// pageData.putLong(timestamp, aLong);
+// }
+// break;
+// case FLOAT:
+// float aFloat = valueDecoder.readFloat(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aFloat))) {
+// pageData.putFloat(timestamp, aFloat);
+// }
+// break;
+// case DOUBLE:
+// double aDouble = valueDecoder.readDouble(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aDouble))) {
+// pageData.putDouble(timestamp, aDouble);
+// }
+// break;
+// case TEXT:
+// Binary aBinary = valueDecoder.readBinary(valueBuffer);
+// if (!isDeleted(timestamp) && (filter == null || filter.satisfy(timestamp, aBinary))) {
+// pageData.putBinary(timestamp, aBinary);
+// }
+// break;
+// default:
+// throw new UnSupportedDataTypeException(String.valueOf(dataType));
+// }
+
}
return pageData.flip();
}
+// public BatchData getAllSatisfiedPageData_new(boolean ascending) throws IOException {
+// // TODO: return null(value no need) or FP&LP
+//
+// BatchData pageData = BatchDataFactory.createBatchData(dataType, ascending, false);
+//
+// // originally there are chunk->page->pack, but here we write hard coded assuming chunk=page=pack
+// // therefore we get the data array directly in batch, instead of point-by-point
+// // Actually, originally decoder->data array->pageReader->batchData, iterating these points twice.
+// // Here, we try to merge into once.
+// long[] timestamps = ((LongDeltaDecoder)timeDecoder).getDataArray4CPV(timeBuffer);
+//
+// // here we write hard-coded assuming always long data type
+//// long aLong = valueDecoder.readLong(valueBuffer);
+// // TODO
+//
+//
+// // here we write hard-coded assuming no filters
+// if (!isDeleted(timestamp)) {
+// pageData.putLong(timestamp, aLong);
+// }
+//
+//// }
+//
+//// while (timeDecoder.hasNext(timeBuffer)) { // TODO: timeDecoder.data
+////
+//// long timestamp = timeDecoder.readLong(timeBuffer);
+////
+//// // here we write hard-coded assuming always long data type
+//// long aLong = valueDecoder.readLong(valueBuffer);
+////
+//// // here we write hard-coded assuming no filters
+//// if (!isDeleted(timestamp)) {
+//// pageData.putLong(timestamp, aLong);
+//// }
+////
+//// }
+// return pageData.flip();
+// }
+
@Override
public Statistics getStatistics() {
return pageHeader.getStatistics();