You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by sh...@apache.org on 2020/10/09 15:46:44 UTC
[iceberg] branch master updated: Parquet: Remove hard-coded file
paths from tests (#1562)
This is an automated email from the ASF dual-hosted git repository.
shardul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git
The following commit(s) were added to refs/heads/master by this push:
new 29c2454 Parquet: Remove hard-coded file paths from tests (#1562)
29c2454 is described below
commit 29c245471b9b9244e944953a2d6e556a347573ad
Author: Ryan Blue <bl...@apache.org>
AuthorDate: Fri Oct 9 08:46:12 2020 -0700
Parquet: Remove hard-coded file paths from tests (#1562)
* Remove hard-coded file paths from tests.
* Fix checkstyle in tests.
---
.../iceberg/data/TestMetricsRowGroupFilter.java | 24 ++++++++---------
.../parquet/TestDictionaryRowGroupFilter.java | 31 +++++++++++-----------
2 files changed, 28 insertions(+), 27 deletions(-)
diff --git a/data/src/test/java/org/apache/iceberg/data/TestMetricsRowGroupFilter.java b/data/src/test/java/org/apache/iceberg/data/TestMetricsRowGroupFilter.java
index 13ab017..d413f47 100644
--- a/data/src/test/java/org/apache/iceberg/data/TestMetricsRowGroupFilter.java
+++ b/data/src/test/java/org/apache/iceberg/data/TestMetricsRowGroupFilter.java
@@ -62,6 +62,7 @@ import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
+import org.junit.rules.TemporaryFolder;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -138,15 +139,16 @@ public class TestMetricsRowGroupFilter {
TOO_LONG_FOR_STATS_PARQUET = sb.toString();
}
- private static final File orcFile = new File("/tmp/stats-row-group-filter-test.orc");
-
- private static final File parquetFile = new File("/tmp/stats-row-group-filter-test.parquet");
- private static MessageType parquetSchema = null;
- private static BlockMetaData rowGroupMetadata = null;
-
private static final int INT_MIN_VALUE = 30;
private static final int INT_MAX_VALUE = 79;
+ private File orcFile = null;
+ private MessageType parquetSchema = null;
+ private BlockMetaData rowGroupMetadata = null;
+
+ @Rule
+ public TemporaryFolder temp = new TemporaryFolder();
+
@Before
public void createInputFile() throws IOException {
switch (format) {
@@ -162,9 +164,8 @@ public class TestMetricsRowGroupFilter {
}
public void createOrcInputFile() throws IOException {
- if (orcFile.exists()) {
- Assert.assertTrue(orcFile.delete());
- }
+ this.orcFile = temp.newFile();
+ Assert.assertTrue(orcFile.delete());
OutputFile outFile = Files.localOutput(orcFile);
try (FileAppender<GenericRecord> appender = ORC.write(outFile)
@@ -201,9 +202,8 @@ public class TestMetricsRowGroupFilter {
}
private void createParquetInputFile() throws IOException {
- if (parquetFile.exists()) {
- Assert.assertTrue(parquetFile.delete());
- }
+ File parquetFile = temp.newFile();
+ Assert.assertTrue(parquetFile.delete());
// build struct field schema
org.apache.avro.Schema structSchema = AvroSchemaUtil.convert(_structFieldType);
diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java
index f6ab643..894a8e4 100644
--- a/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java
+++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java
@@ -45,8 +45,10 @@ import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.parquet.hadoop.metadata.BlockMetaData;
import org.apache.parquet.schema.MessageType;
import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
import static org.apache.iceberg.avro.AvroSchemaUtil.convert;
import static org.apache.iceberg.expressions.Expressions.and;
@@ -107,24 +109,25 @@ public class TestDictionaryRowGroupFilter {
TOO_LONG_FOR_STATS = sb.toString();
}
- private static final File PARQUET_FILE = new File("/tmp/stats-row-group-filter-test.parquet");
- private static MessageType parquetSchema = null;
- private static BlockMetaData rowGroupMetadata = null;
- private static DictionaryPageReadStore dictionaryStore = null;
-
private static final int INT_MIN_VALUE = 30;
private static final int INT_MAX_VALUE = 79;
- @BeforeClass
- public static void createInputFile() throws IOException {
- if (PARQUET_FILE.exists()) {
- Assert.assertTrue(PARQUET_FILE.delete());
- }
+ private MessageType parquetSchema = null;
+ private BlockMetaData rowGroupMetadata = null;
+ private DictionaryPageReadStore dictionaryStore = null;
+
+ @Rule
+ public TemporaryFolder temp = new TemporaryFolder();
+
+ @Before
+ public void createInputFile() throws IOException {
+ File parquetFile = temp.newFile();
+ Assert.assertTrue(parquetFile.delete());
// build struct field schema
org.apache.avro.Schema structSchema = AvroSchemaUtil.convert(_structFieldType);
- OutputFile outFile = Files.localOutput(PARQUET_FILE);
+ OutputFile outFile = Files.localOutput(parquetFile);
try (FileAppender<Record> appender = Parquet.write(outFile)
.schema(FILE_SCHEMA)
.build()) {
@@ -150,7 +153,7 @@ public class TestDictionaryRowGroupFilter {
}
}
- InputFile inFile = Files.localInput(PARQUET_FILE);
+ InputFile inFile = Files.localInput(parquetFile);
ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile));
@@ -158,8 +161,6 @@ public class TestDictionaryRowGroupFilter {
rowGroupMetadata = reader.getRowGroups().get(0);
parquetSchema = reader.getFileMetaData().getSchema();
dictionaryStore = reader.getNextDictionaryReader();
-
- PARQUET_FILE.deleteOnExit();
}
@Test