You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by qi...@apache.org on 2019/07/23 03:03:33 UTC
[incubator-iotdb] 03/05: fix bug in Example;
rename a test file in spark
This is an automated email from the ASF dual-hosted git repository.
qiaojialin pushed a commit to branch cherry_pick_rel
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git
commit 2ac70dcc4f61f30ef7bbaae083adb320ba441ac5
Author: RuiLei <ru...@gmail.com>
AuthorDate: Sun Jul 21 20:12:46 2019 +0800
fix bug in Example; rename a test file in spark
---
.../iotdb/tool/{TsFileWrite.java => TsFileWriteTool.java} | 2 +-
.../test/scala/org/apache/iotdb/tsfile/ConverterTest.scala | 8 ++++----
.../test/scala/org/apache/iotdb/tsfile/HDFSInputTest.java | 4 ++--
.../src/test/scala/org/apache/iotdb/tsfile/TSFileSuit.scala | 12 ++++++------
.../java/org/apache/iotdb/tsfile/TsFileSequenceRead.java | 2 +-
5 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/spark/src/test/scala/org/apache/iotdb/tool/TsFileWrite.java b/spark/src/test/scala/org/apache/iotdb/tool/TsFileWriteTool.java
similarity index 99%
rename from spark/src/test/scala/org/apache/iotdb/tool/TsFileWrite.java
rename to spark/src/test/scala/org/apache/iotdb/tool/TsFileWriteTool.java
index fb73f1b..b33f3d7 100644
--- a/spark/src/test/scala/org/apache/iotdb/tool/TsFileWrite.java
+++ b/spark/src/test/scala/org/apache/iotdb/tool/TsFileWriteTool.java
@@ -34,7 +34,7 @@ import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
/**
* An example of writing data to TsFile
*/
-public class TsFileWrite {
+public class TsFileWriteTool {
public static int largeNum = 1024 * 1024;
diff --git a/spark/src/test/scala/org/apache/iotdb/tsfile/ConverterTest.scala b/spark/src/test/scala/org/apache/iotdb/tsfile/ConverterTest.scala
index 86bbd82..5edc490 100644
--- a/spark/src/test/scala/org/apache/iotdb/tsfile/ConverterTest.scala
+++ b/spark/src/test/scala/org/apache/iotdb/tsfile/ConverterTest.scala
@@ -24,7 +24,7 @@ import java.util
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
-import org.apache.iotdb.tool.TsFileWrite
+import org.apache.iotdb.tool.TsFileWriteTool
import org.apache.iotdb.tsfile.common.constant.QueryConstant
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType
import org.apache.iotdb.tsfile.io.HDFSInput
@@ -33,7 +33,7 @@ import org.apache.iotdb.tsfile.read.common.Field
import org.apache.iotdb.tsfile.utils.Binary
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, GenericRowWithSchema}
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.junit.Assert
@@ -53,8 +53,8 @@ class ConverterTest extends FunSuite with BeforeAndAfterAll {
deleteDir(tsfile_folder)
}
tsfile_folder.mkdirs()
- new TsFileWrite().create1(tsfilePath1)
- new TsFileWrite().create2(tsfilePath2)
+ new TsFileWriteTool().create1(tsfilePath1)
+ new TsFileWriteTool().create2(tsfilePath2)
spark = SparkSession
.builder()
.config("spark.master", "local")
diff --git a/spark/src/test/scala/org/apache/iotdb/tsfile/HDFSInputTest.java b/spark/src/test/scala/org/apache/iotdb/tsfile/HDFSInputTest.java
index 94f8b0a..43a88a4 100644
--- a/spark/src/test/scala/org/apache/iotdb/tsfile/HDFSInputTest.java
+++ b/spark/src/test/scala/org/apache/iotdb/tsfile/HDFSInputTest.java
@@ -21,8 +21,8 @@ package org.apache.iotdb.tsfile;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
+import org.apache.iotdb.tool.TsFileWriteTool;
import org.apache.iotdb.tsfile.io.HDFSInput;
-import org.apache.iotdb.tool.TsFileWrite;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -41,7 +41,7 @@ public class HDFSInputTest {
deleteDir(tsfile_folder);
}
tsfile_folder.mkdirs();
- TsFileWrite tsFileWrite = new TsFileWrite();
+ TsFileWriteTool tsFileWrite = new TsFileWriteTool();
tsFileWrite.create1(path);
in = new HDFSInput(path);
}
diff --git a/spark/src/test/scala/org/apache/iotdb/tsfile/TSFileSuit.scala b/spark/src/test/scala/org/apache/iotdb/tsfile/TSFileSuit.scala
index 48d428c..232bf77 100644
--- a/spark/src/test/scala/org/apache/iotdb/tsfile/TSFileSuit.scala
+++ b/spark/src/test/scala/org/apache/iotdb/tsfile/TSFileSuit.scala
@@ -20,8 +20,8 @@ package org.apache.iotdb.tsfile
import java.io.File
+import org.apache.iotdb.tool.TsFileWriteTool
import org.apache.iotdb.tsfile.common.constant.QueryConstant
-import org.apache.iotdb.tool.TsFileWrite
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types._
import org.junit.Assert
@@ -63,9 +63,9 @@ class TSFileSuit extends FunSuite with BeforeAndAfterAll {
}
tsfile_folder2.mkdirs()
- new TsFileWrite().create1(tsfile1)
- new TsFileWrite().create2(tsfile2)
- new TsFileWrite().create3(tsfile3)
+ new TsFileWriteTool().create1(tsfile1)
+ new TsFileWriteTool().create2(tsfile2)
+ new TsFileWriteTool().create3(tsfile3)
val output = new File(outputPath)
if (output.exists())
@@ -133,7 +133,7 @@ class TSFileSuit extends FunSuite with BeforeAndAfterAll {
df.createOrReplaceTempView("tsfile_table")
val newDf = spark.sql("select * from tsfile_table")
val count = newDf.count()
- Assert.assertEquals(TsFileWrite.largeNum, count)
+ Assert.assertEquals(TsFileWriteTool.largeNum, count)
}
test("testCount") {
@@ -188,7 +188,7 @@ class TSFileSuit extends FunSuite with BeforeAndAfterAll {
test("testMultiFiles") {
val df = spark.read.tsfile(tsfileFolder1)
df.createOrReplaceTempView("tsfile_table")
- Assert.assertEquals(TsFileWrite.largeNum + 7, df.count())
+ Assert.assertEquals(TsFileWriteTool.largeNum + 7, df.count())
}
test("testMultiFilesWithFilter1") {
diff --git a/tsfile/example/src/main/java/org/apache/iotdb/tsfile/TsFileSequenceRead.java b/tsfile/example/src/main/java/org/apache/iotdb/tsfile/TsFileSequenceRead.java
index 6c25c21..2635d25 100644
--- a/tsfile/example/src/main/java/org/apache/iotdb/tsfile/TsFileSequenceRead.java
+++ b/tsfile/example/src/main/java/org/apache/iotdb/tsfile/TsFileSequenceRead.java
@@ -48,7 +48,7 @@ public class TsFileSequenceRead {
System.out.println("file magic head: " + reader.readHeadMagic());
System.out.println("file magic tail: " + reader.readTailMagic());
System.out.println("Level 1 metadata position: " + reader.getFileMetadataPos());
- System.out.println("Level 1 metadata size: " + reader.getFileMetadataPos());
+ System.out.println("Level 1 metadata size: " + reader.getFileMetadataSize());
TsFileMetaData metaData = reader.readFileMetadata();
// Sequential reading of one ChunkGroup now follows this order:
// first SeriesChunks (headers and data) in one ChunkGroup, then the CHUNK_GROUP_FOOTER