You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by vi...@apache.org on 2020/04/22 06:10:33 UTC
[incubator-hudi] branch master updated: [HUDI-809] Migrate
CommonTestHarness to JUnit 5 (#1530)
This is an automated email from the ASF dual-hosted git repository.
vinoyang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hudi.git
The following commit(s) were added to refs/heads/master by this push:
new 6e15eeb [HUDI-809] Migrate CommonTestHarness to JUnit 5 (#1530)
6e15eeb is described below
commit 6e15eebd81da41b1076179a8ddcedcf07d1c9043
Author: Raymond Xu <27...@users.noreply.github.com>
AuthorDate: Tue Apr 21 23:10:25 2020 -0700
[HUDI-809] Migrate CommonTestHarness to JUnit 5 (#1530)
---
.../common/table/TestHoodieTableMetaClient.java | 54 ++--
.../hudi/common/table/TestTimelineLayout.java | 24 +-
.../table/view/TestHoodieTableFileSystemView.java | 335 ++++++++++-----------
.../table/view/TestRocksDbBasedFileSystemView.java | 4 +-
.../testutils/HoodieCommonTestHarnessJunit5.java | 52 ++++
.../apache/hudi/common/util/TestFileIOUtils.java | 20 +-
hudi-hadoop-mr/pom.xml | 6 -
.../apache/hudi/hadoop/InputFormatTestUtil.java | 67 ++---
.../hudi/hadoop/TestHoodieParquetInputFormat.java | 81 +++--
.../hudi/hadoop/TestHoodieROTablePathFilter.java | 26 +-
.../realtime/TestHoodieCombineHiveInputFormat.java | 52 ++--
.../realtime/TestHoodieRealtimeRecordReader.java | 127 ++++----
.../hudi/utilities/TestHoodieSnapshotCopier.java | 22 +-
.../TestKafkaConnectHdfsProvider.java | 20 +-
14 files changed, 460 insertions(+), 430 deletions(-)
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/TestHoodieTableMetaClient.java b/hudi-common/src/test/java/org/apache/hudi/common/table/TestHoodieTableMetaClient.java
index e1279d1..5e307bd 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/table/TestHoodieTableMetaClient.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/table/TestHoodieTableMetaClient.java
@@ -18,41 +18,41 @@
package org.apache.hudi.common.table;
-import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.HoodieTimeline;
+import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5;
import org.apache.hudi.common.util.Option;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
/**
* Tests hoodie table meta client {@link HoodieTableMetaClient}.
*/
-public class TestHoodieTableMetaClient extends HoodieCommonTestHarness {
+public class TestHoodieTableMetaClient extends HoodieCommonTestHarnessJunit5 {
- @Before
+ @BeforeEach
public void init() throws IOException {
initMetaClient();
}
@Test
public void checkMetadata() {
- assertEquals("Table name should be raw_trips", HoodieTestUtils.RAW_TRIPS_TEST_NAME,
- metaClient.getTableConfig().getTableName());
- assertEquals("Basepath should be the one assigned", basePath, metaClient.getBasePath());
- assertEquals("Metapath should be ${basepath}/.hoodie", basePath + "/.hoodie", metaClient.getMetaPath());
+ assertEquals(HoodieTestUtils.RAW_TRIPS_TEST_NAME, metaClient.getTableConfig().getTableName(), "Table name should be raw_trips");
+ assertEquals(basePath, metaClient.getBasePath(), "Basepath should be the one assigned");
+ assertEquals(basePath + "/.hoodie", metaClient.getMetaPath(), "Metapath should be ${basepath}/.hoodie");
}
@Test
@@ -67,16 +67,15 @@ public class TestHoodieTableMetaClient extends HoodieCommonTestHarness {
commitTimeline.saveAsComplete(instant, Option.of("test-detail".getBytes()));
commitTimeline = commitTimeline.reload();
HoodieInstant completedInstant = HoodieTimeline.getCompletedInstant(instant);
- assertEquals("Commit should be 1 and completed", completedInstant, commitTimeline.getInstants().findFirst().get());
- assertArrayEquals("Commit value should be \"test-detail\"", "test-detail".getBytes(),
- commitTimeline.getInstantDetails(completedInstant).get());
+ assertEquals(completedInstant, commitTimeline.getInstants().findFirst().get(), "Commit should be 1 and completed");
+ assertArrayEquals("test-detail".getBytes(), commitTimeline.getInstantDetails(completedInstant).get(), "Commit value should be \"test-detail\"");
}
@Test
public void checkCommitTimeline() {
HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline();
HoodieTimeline activeCommitTimeline = activeTimeline.getCommitTimeline();
- assertTrue("Should be empty commit timeline", activeCommitTimeline.empty());
+ assertTrue(activeCommitTimeline.empty(), "Should be empty commit timeline");
HoodieInstant instant = new HoodieInstant(true, HoodieTimeline.COMMIT_ACTION, "1");
activeTimeline.createNewInstant(instant);
@@ -85,21 +84,20 @@ public class TestHoodieTableMetaClient extends HoodieCommonTestHarness {
// Commit timeline should not auto-reload every time getActiveCommitTimeline(), it should be cached
activeTimeline = metaClient.getActiveTimeline();
activeCommitTimeline = activeTimeline.getCommitTimeline();
- assertTrue("Should be empty commit timeline", activeCommitTimeline.empty());
+ assertTrue(activeCommitTimeline.empty(), "Should be empty commit timeline");
HoodieInstant completedInstant = HoodieTimeline.getCompletedInstant(instant);
activeTimeline = activeTimeline.reload();
activeCommitTimeline = activeTimeline.getCommitTimeline();
- assertFalse("Should be the 1 commit we made", activeCommitTimeline.empty());
- assertEquals("Commit should be 1", completedInstant, activeCommitTimeline.getInstants().findFirst().get());
- assertArrayEquals("Commit value should be \"test-detail\"", "test-detail".getBytes(),
- activeCommitTimeline.getInstantDetails(completedInstant).get());
+ assertFalse(activeCommitTimeline.empty(), "Should be the 1 commit we made");
+ assertEquals(completedInstant, activeCommitTimeline.getInstants().findFirst().get(), "Commit should be 1");
+ assertArrayEquals("test-detail".getBytes(), activeCommitTimeline.getInstantDetails(completedInstant).get(), "Commit value should be \"test-detail\"");
}
@Test
public void testEquals() throws IOException {
- HoodieTableMetaClient metaClient1 = HoodieTestUtils.init(folder.getRoot().getAbsolutePath(), getTableType());
- HoodieTableMetaClient metaClient2 = HoodieTestUtils.init(folder.getRoot().getAbsolutePath(), getTableType());
+ HoodieTableMetaClient metaClient1 = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType());
+ HoodieTableMetaClient metaClient2 = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType());
assertEquals(metaClient1, metaClient1);
assertEquals(metaClient1, metaClient2);
assertNotEquals(metaClient1, null);
@@ -108,8 +106,8 @@ public class TestHoodieTableMetaClient extends HoodieCommonTestHarness {
@Test
public void testToString() throws IOException {
- HoodieTableMetaClient metaClient1 = HoodieTestUtils.init(folder.getRoot().getAbsolutePath(), getTableType());
- HoodieTableMetaClient metaClient2 = HoodieTestUtils.init(folder.getRoot().getAbsolutePath(), getTableType());
+ HoodieTableMetaClient metaClient1 = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType());
+ HoodieTableMetaClient metaClient2 = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType());
assertEquals(metaClient1.toString(), metaClient2.toString());
assertNotEquals(metaClient1.toString(), new Object().toString());
}
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/TestTimelineLayout.java b/hudi-common/src/test/java/org/apache/hudi/common/table/TestTimelineLayout.java
index a2553e8..0386922 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/table/TestTimelineLayout.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/table/TestTimelineLayout.java
@@ -24,13 +24,15 @@ import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.TimelineLayout;
import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
public class TestTimelineLayout {
@Test
@@ -57,23 +59,23 @@ public class TestTimelineLayout {
List<HoodieInstant> layout0Instants = TimelineLayout.getLayout(new TimelineLayoutVersion(0))
.filterHoodieInstants(rawInstants.stream()).collect(Collectors.toList());
- Assert.assertEquals(rawInstants, layout0Instants);
+ assertEquals(rawInstants, layout0Instants);
List<HoodieInstant> layout1Instants = TimelineLayout.getLayout(TimelineLayoutVersion.CURR_LAYOUT_VERSION)
.filterHoodieInstants(rawInstants.stream()).collect(Collectors.toList());
- Assert.assertEquals(7, layout1Instants.size());
- Assert.assertTrue(layout1Instants.contains(
+ assertEquals(7, layout1Instants.size());
+ assertTrue(layout1Instants.contains(
new HoodieInstant(State.INFLIGHT, HoodieTimeline.DELTA_COMMIT_ACTION, "007")));
- Assert.assertTrue(layout1Instants.contains(
+ assertTrue(layout1Instants.contains(
new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, "006")));
- Assert.assertTrue(layout1Instants.contains(
+ assertTrue(layout1Instants.contains(
new HoodieInstant(State.COMPLETED, HoodieTimeline.DELTA_COMMIT_ACTION, "005")));
- Assert.assertTrue(layout1Instants.contains(
+ assertTrue(layout1Instants.contains(
new HoodieInstant(State.INFLIGHT, HoodieTimeline.CLEAN_ACTION, "004")));
- Assert.assertTrue(layout1Instants.contains(
+ assertTrue(layout1Instants.contains(
new HoodieInstant(State.COMPLETED, HoodieTimeline.COMMIT_ACTION, "003")));
- Assert.assertTrue(layout1Instants.contains(
+ assertTrue(layout1Instants.contains(
new HoodieInstant(State.COMPLETED, HoodieTimeline.DELTA_COMMIT_ACTION, "002")));
- Assert.assertTrue(layout1Instants.contains(
+ assertTrue(layout1Instants.contains(
new HoodieInstant(State.COMPLETED, HoodieTimeline.CLEAN_ACTION, "001")));
}
}
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
index 0a24eff..15ce7ff 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
@@ -19,7 +19,6 @@
package org.apache.hudi.common.table.view;
import org.apache.hudi.avro.model.HoodieCompactionPlan;
-import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.CompactionOperation;
import org.apache.hudi.common.model.FileSlice;
@@ -35,6 +34,7 @@ import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.TimelineMetadataUtils;
import org.apache.hudi.common.table.view.TableFileSystemView.BaseFileOnlyView;
import org.apache.hudi.common.table.view.TableFileSystemView.SliceView;
+import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5;
import org.apache.hudi.common.util.CompactionUtils;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.collection.Pair;
@@ -43,9 +43,8 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
@@ -59,15 +58,15 @@ import java.util.UUID;
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests hoodie table file system view {@link HoodieTableFileSystemView}.
*/
@SuppressWarnings("ResultOfMethodCallIgnored")
-public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
+public class TestHoodieTableFileSystemView extends HoodieCommonTestHarnessJunit5 {
private static final Logger LOG = LogManager.getLogger(TestHoodieTableFileSystemView.class);
@@ -77,7 +76,7 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
protected BaseFileOnlyView roView;
protected SliceView rtView;
- @Before
+ @BeforeEach
public void init() throws IOException {
initMetaClient();
refreshFsView();
@@ -134,49 +133,49 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
refreshFsView();
List<HoodieBaseFile> dataFiles = roView.getLatestBaseFiles().collect(Collectors.toList());
- assertTrue("No data file expected", dataFiles.isEmpty());
+ assertTrue(dataFiles.isEmpty(), "No data file expected");
List<FileSlice> fileSliceList = rtView.getLatestFileSlices(partitionPath).collect(Collectors.toList());
assertEquals(1, fileSliceList.size());
FileSlice fileSlice = fileSliceList.get(0);
- assertEquals("File-Id must be set correctly", fileId, fileSlice.getFileId());
- assertFalse("Data file for base instant must be present", fileSlice.getBaseFile().isPresent());
- assertEquals("Base Instant for file-group set correctly", instantTime1, fileSlice.getBaseInstantTime());
+ assertEquals(fileId, fileSlice.getFileId(), "File-Id must be set correctly");
+ assertFalse(fileSlice.getBaseFile().isPresent(), "Data file for base instant must be present");
+ assertEquals(instantTime1, fileSlice.getBaseInstantTime(), "Base Instant for file-group set correctly");
List<HoodieLogFile> logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
- assertEquals("Correct number of log-files shows up in file-slice", 2, logFiles.size());
- assertEquals("Log File Order check", fileName2, logFiles.get(0).getFileName());
- assertEquals("Log File Order check", fileName1, logFiles.get(1).getFileName());
+ assertEquals(2, logFiles.size(), "Correct number of log-files shows up in file-slice");
+ assertEquals(fileName2, logFiles.get(0).getFileName(), "Log File Order check");
+ assertEquals(fileName1, logFiles.get(1).getFileName(), "Log File Order check");
// Check Merged File Slices API
fileSliceList =
rtView.getLatestMergedFileSlicesBeforeOrOn(partitionPath, deltaInstantTime2).collect(Collectors.toList());
assertEquals(1, fileSliceList.size());
fileSlice = fileSliceList.get(0);
- assertEquals("File-Id must be set correctly", fileId, fileSlice.getFileId());
- assertFalse("Data file for base instant must be present", fileSlice.getBaseFile().isPresent());
- assertEquals("Base Instant for file-group set correctly", instantTime1, fileSlice.getBaseInstantTime());
+ assertEquals(fileId, fileSlice.getFileId(), "File-Id must be set correctly");
+ assertFalse(fileSlice.getBaseFile().isPresent(), "Data file for base instant must be present");
+ assertEquals(instantTime1, fileSlice.getBaseInstantTime(), "Base Instant for file-group set correctly");
logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
- assertEquals("Correct number of log-files shows up in file-slice", 2, logFiles.size());
- assertEquals("Log File Order check", fileName2, logFiles.get(0).getFileName());
- assertEquals("Log File Order check", fileName1, logFiles.get(1).getFileName());
+ assertEquals(2, logFiles.size(), "Correct number of log-files shows up in file-slice");
+ assertEquals(fileName2, logFiles.get(0).getFileName(), "Log File Order check");
+ assertEquals(fileName1, logFiles.get(1).getFileName(), "Log File Order check");
// Check UnCompacted File Slices API
fileSliceList = rtView.getLatestUnCompactedFileSlices(partitionPath).collect(Collectors.toList());
assertEquals(1, fileSliceList.size());
fileSlice = fileSliceList.get(0);
- assertEquals("File-Id must be set correctly", fileId, fileSlice.getFileId());
- assertFalse("Data file for base instant must be present", fileSlice.getBaseFile().isPresent());
- assertEquals("Base Instant for file-group set correctly", instantTime1, fileSlice.getBaseInstantTime());
+ assertEquals(fileId, fileSlice.getFileId(), "File-Id must be set correctly");
+ assertFalse(fileSlice.getBaseFile().isPresent(), "Data file for base instant must be present");
+ assertEquals(instantTime1, fileSlice.getBaseInstantTime(), "Base Instant for file-group set correctly");
logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
- assertEquals("Correct number of log-files shows up in file-slice", 2, logFiles.size());
- assertEquals("Log File Order check", fileName2, logFiles.get(0).getFileName());
- assertEquals("Log File Order check", fileName1, logFiles.get(1).getFileName());
-
- assertEquals("Total number of file-slices in view matches expected", expNumTotalFileSlices,
- rtView.getAllFileSlices(partitionPath).count());
- assertEquals("Total number of data-files in view matches expected", expNumTotalDataFiles,
- roView.getAllBaseFiles(partitionPath).count());
- assertEquals("Total number of file-groups in view matches expected", 1,
- fsView.getAllFileGroups(partitionPath).count());
+ assertEquals(2, logFiles.size(), "Correct number of log-files shows up in file-slice");
+ assertEquals(fileName2, logFiles.get(0).getFileName(), "Log File Order check");
+ assertEquals(fileName1, logFiles.get(1).getFileName(), "Log File Order check");
+
+ assertEquals(expNumTotalFileSlices, rtView.getAllFileSlices(partitionPath).count(),
+ "Total number of file-slices in view matches expected");
+ assertEquals(expNumTotalDataFiles, roView.getAllBaseFiles(partitionPath).count(),
+ "Total number of data-files in view matches expected");
+ assertEquals(1, fsView.getAllFileGroups(partitionPath).count(),
+ "Total number of file-groups in view matches expected");
}
@Test
@@ -288,11 +287,10 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
// View immediately after scheduling compaction
refreshFsView();
List<FileSlice> slices = rtView.getLatestFileSlices(partitionPath).collect(Collectors.toList());
- assertEquals("Expected latest file-slices", 1, slices.size());
- assertEquals("Base-Instant must be compaction Instant", compactionRequestedTime,
- slices.get(0).getBaseInstantTime());
- assertFalse("Latest File Slice must not have data-file", slices.get(0).getBaseFile().isPresent());
- assertEquals("Latest File Slice must not have any log-files", 0, slices.get(0).getLogFiles().count());
+ assertEquals(1, slices.size(), "Expected latest file-slices");
+ assertEquals(compactionRequestedTime, slices.get(0).getBaseInstantTime(), "Base-Instant must be compaction Instant");
+ assertFalse(slices.get(0).getBaseFile().isPresent(), "Latest File Slice must not have data-file");
+ assertEquals(0, slices.get(0).getLogFiles().count(), "Latest File Slice must not have any log-files");
// Fake delta-ingestion after compaction-requested
String deltaInstantTime4 = "5";
@@ -313,73 +311,71 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
List<HoodieBaseFile> dataFiles = roView.getAllBaseFiles(partitionPath).collect(Collectors.toList());
if (skipCreatingDataFile) {
- assertTrue("No data file expected", dataFiles.isEmpty());
+ assertTrue(dataFiles.isEmpty(), "No data file expected");
} else {
- assertEquals("One data-file is expected as there is only one file-group", 1, dataFiles.size());
- assertEquals("Expect only valid data-file", dataFileName, dataFiles.get(0).getFileName());
+ assertEquals(1, dataFiles.size(), "One data-file is expected as there is only one file-group");
+ assertEquals(dataFileName, dataFiles.get(0).getFileName(), "Expect only valid data-file");
}
// Merge API Tests
List<FileSlice> fileSliceList =
rtView.getLatestMergedFileSlicesBeforeOrOn(partitionPath, deltaInstantTime5).collect(Collectors.toList());
- assertEquals("Expect file-slice to be merged", 1, fileSliceList.size());
+ assertEquals(1, fileSliceList.size(), "Expect file-slice to be merged");
FileSlice fileSlice = fileSliceList.get(0);
assertEquals(fileId, fileSlice.getFileId());
if (!skipCreatingDataFile) {
- assertEquals("Data file must be present", dataFileName, fileSlice.getBaseFile().get().getFileName());
+ assertEquals(dataFileName, fileSlice.getBaseFile().get().getFileName(), "Data file must be present");
} else {
- assertFalse("No data-file expected as it was not created", fileSlice.getBaseFile().isPresent());
+ assertFalse(fileSlice.getBaseFile().isPresent(), "No data-file expected as it was not created");
}
- assertEquals("Base Instant of penultimate file-slice must be base instant", instantTime1,
- fileSlice.getBaseInstantTime());
+ assertEquals(instantTime1, fileSlice.getBaseInstantTime(), "Base Instant of penultimate file-slice must be base instant");
List<HoodieLogFile> logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
- assertEquals("Log files must include those after compaction request", 4, logFiles.size());
- assertEquals("Log File Order check", fileName4, logFiles.get(0).getFileName());
- assertEquals("Log File Order check", fileName3, logFiles.get(1).getFileName());
- assertEquals("Log File Order check", fileName2, logFiles.get(2).getFileName());
- assertEquals("Log File Order check", fileName1, logFiles.get(3).getFileName());
+ assertEquals(4, logFiles.size(), "Log files must include those after compaction request");
+ assertEquals(fileName4, logFiles.get(0).getFileName(), "Log File Order check");
+ assertEquals(fileName3, logFiles.get(1).getFileName(), "Log File Order check");
+ assertEquals(fileName2, logFiles.get(2).getFileName(), "Log File Order check");
+ assertEquals(fileName1, logFiles.get(3).getFileName(), "Log File Order check");
fileSliceList =
rtView.getLatestFileSlicesBeforeOrOn(partitionPath, deltaInstantTime5, true).collect(Collectors.toList());
- assertEquals("Expect only one file-id", 1, fileSliceList.size());
+ assertEquals(1, fileSliceList.size(), "Expect only one file-id");
fileSlice = fileSliceList.get(0);
assertEquals(fileId, fileSlice.getFileId());
- assertFalse("No data-file expected in latest file-slice", fileSlice.getBaseFile().isPresent());
- assertEquals("Compaction requested instant must be base instant", compactionRequestedTime,
- fileSlice.getBaseInstantTime());
+ assertFalse(fileSlice.getBaseFile().isPresent(), "No data-file expected in latest file-slice");
+ assertEquals(compactionRequestedTime, fileSlice.getBaseInstantTime(), "Compaction requested instant must be base instant");
logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
- assertEquals("Log files must include only those after compaction request", 2, logFiles.size());
- assertEquals("Log File Order check", fileName4, logFiles.get(0).getFileName());
- assertEquals("Log File Order check", fileName3, logFiles.get(1).getFileName());
+ assertEquals(2, logFiles.size(), "Log files must include only those after compaction request");
+ assertEquals(fileName4, logFiles.get(0).getFileName(), "Log File Order check");
+ assertEquals(fileName3, logFiles.get(1).getFileName(), "Log File Order check");
// Data Files API tests
dataFiles = roView.getLatestBaseFiles().collect(Collectors.toList());
if (skipCreatingDataFile) {
- assertEquals("Expect no data file to be returned", 0, dataFiles.size());
+ assertEquals(0, dataFiles.size(), "Expect no data file to be returned");
} else {
- assertEquals("Expect only one data-file to be sent", 1, dataFiles.size());
- dataFiles.forEach(df -> assertEquals("Expect data-file for instant 1 be returned", df.getCommitTime(), instantTime1));
+ assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
+ dataFiles.forEach(df -> assertEquals(df.getCommitTime(), instantTime1, "Expect data-file for instant 1 be returned"));
}
dataFiles = roView.getLatestBaseFiles(partitionPath).collect(Collectors.toList());
if (skipCreatingDataFile) {
- assertEquals("Expect no data file to be returned", 0, dataFiles.size());
+ assertEquals(0, dataFiles.size(), "Expect no data file to be returned");
} else {
- assertEquals("Expect only one data-file to be sent", 1, dataFiles.size());
- dataFiles.forEach(df -> assertEquals("Expect data-file for instant 1 be returned", df.getCommitTime(), instantTime1));
+ assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
+ dataFiles.forEach(df -> assertEquals(df.getCommitTime(), instantTime1, "Expect data-file for instant 1 be returned"));
}
dataFiles = roView.getLatestBaseFilesBeforeOrOn(partitionPath, deltaInstantTime5).collect(Collectors.toList());
if (skipCreatingDataFile) {
- assertEquals("Expect no data file to be returned", 0, dataFiles.size());
+ assertEquals(0, dataFiles.size(), "Expect no data file to be returned");
} else {
- assertEquals("Expect only one data-file to be sent", 1, dataFiles.size());
- dataFiles.forEach(df -> assertEquals("Expect data-file for instant 1 be returned", df.getCommitTime(), instantTime1));
+ assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
+ dataFiles.forEach(df -> assertEquals(df.getCommitTime(), instantTime1, "Expect data-file for instant 1 be returned"));
}
dataFiles = roView.getLatestBaseFilesInRange(allInstantTimes).collect(Collectors.toList());
if (skipCreatingDataFile) {
- assertEquals("Expect no data file to be returned", 0, dataFiles.size());
+ assertEquals(0, dataFiles.size(), "Expect no data file to be returned");
} else {
- assertEquals("Expect only one data-file to be sent", 1, dataFiles.size());
- dataFiles.forEach(df -> assertEquals("Expect data-file for instant 1 be returned", df.getCommitTime(), instantTime1));
+ assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
+ dataFiles.forEach(df -> assertEquals(df.getCommitTime(), instantTime1, "Expect data-file for instant 1 be returned"));
}
// Inflight/Orphan File-groups needs to be in the view
@@ -420,22 +416,22 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
}).collect(Collectors.toList());
if (includeInvalidAndInflight) {
- assertEquals("Inflight/Orphan data-file is also expected",
- 2 + (isCompactionInFlight ? 1 : 0) + (skipCreatingDataFile ? 0 : 1), dataFiles.size());
+ assertEquals(2 + (isCompactionInFlight ? 1 : 0) + (skipCreatingDataFile ? 0 : 1), dataFiles.size(),
+ "Inflight/Orphan data-file is also expected");
Set<String> fileNames = dataFiles.stream().map(HoodieBaseFile::getFileName).collect(Collectors.toSet());
- assertTrue("Expect orphan data-file to be present", fileNames.contains(orphanDataFileName));
- assertTrue("Expect inflight data-file to be present", fileNames.contains(inflightDataFileName));
+ assertTrue(fileNames.contains(orphanDataFileName), "Expect orphan data-file to be present");
+ assertTrue(fileNames.contains(inflightDataFileName), "Expect inflight data-file to be present");
if (!skipCreatingDataFile) {
- assertTrue("Expect old committed data-file", fileNames.contains(dataFileName));
+ assertTrue(fileNames.contains(dataFileName), "Expect old committed data-file");
}
if (isCompactionInFlight) {
- assertTrue("Expect inflight compacted data file to be present", fileNames.contains(compactDataFileName));
+ assertTrue(fileNames.contains(compactDataFileName), "Expect inflight compacted data file to be present");
}
fileSliceList = getLatestRawFileSlices(partitionPath).collect(Collectors.toList());
- assertEquals("Expect both inflight and orphan file-slice to be included", includeInvalidAndInflight ? 5 : 1,
- fileSliceList.size());
+ assertEquals(includeInvalidAndInflight ? 5 : 1, fileSliceList.size(),
+ "Expect both inflight and orphan file-slice to be included");
Map<String, FileSlice> fileSliceMap =
fileSliceList.stream().collect(Collectors.toMap(FileSlice::getFileId, r -> r));
FileSlice orphanFileSliceWithDataFile = fileSliceMap.get(orphanFileId1);
@@ -443,33 +439,33 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
FileSlice inflightFileSliceWithDataFile = fileSliceMap.get(inflightFileId1);
FileSlice inflightFileSliceWithLogFile = fileSliceMap.get(inflightFileId2);
- assertEquals("Orphan File Slice with data-file check base-commit", invalidInstantId,
- orphanFileSliceWithDataFile.getBaseInstantTime());
- assertEquals("Orphan File Slice with data-file check data-file", orphanDataFileName,
- orphanFileSliceWithDataFile.getBaseFile().get().getFileName());
- assertEquals("Orphan File Slice with data-file check data-file", 0,
- orphanFileSliceWithDataFile.getLogFiles().count());
- assertEquals("Inflight File Slice with data-file check base-commit", inflightDeltaInstantTime,
- inflightFileSliceWithDataFile.getBaseInstantTime());
- assertEquals("Inflight File Slice with data-file check data-file", inflightDataFileName,
- inflightFileSliceWithDataFile.getBaseFile().get().getFileName());
- assertEquals("Inflight File Slice with data-file check data-file", 0,
- inflightFileSliceWithDataFile.getLogFiles().count());
- assertEquals("Orphan File Slice with log-file check base-commit", invalidInstantId,
- orphanFileSliceWithLogFile.getBaseInstantTime());
- assertFalse("Orphan File Slice with log-file check data-file",
- orphanFileSliceWithLogFile.getBaseFile().isPresent());
+ assertEquals(invalidInstantId, orphanFileSliceWithDataFile.getBaseInstantTime(),
+ "Orphan File Slice with data-file check base-commit");
+ assertEquals(orphanDataFileName, orphanFileSliceWithDataFile.getBaseFile().get().getFileName(),
+ "Orphan File Slice with data-file check data-file");
+ assertEquals(0, orphanFileSliceWithDataFile.getLogFiles().count(),
+ "Orphan File Slice with data-file check data-file");
+ assertEquals(inflightDeltaInstantTime, inflightFileSliceWithDataFile.getBaseInstantTime(),
+ "Inflight File Slice with data-file check base-commit");
+ assertEquals(inflightDataFileName, inflightFileSliceWithDataFile.getBaseFile().get().getFileName(),
+ "Inflight File Slice with data-file check data-file");
+ assertEquals(0, inflightFileSliceWithDataFile.getLogFiles().count(),
+ "Inflight File Slice with data-file check data-file");
+ assertEquals(invalidInstantId, orphanFileSliceWithLogFile.getBaseInstantTime(),
+ "Orphan File Slice with log-file check base-commit");
+ assertFalse(orphanFileSliceWithLogFile.getBaseFile().isPresent(),
+ "Orphan File Slice with log-file check data-file");
logFiles = orphanFileSliceWithLogFile.getLogFiles().collect(Collectors.toList());
- assertEquals("Orphan File Slice with log-file check data-file", 1, logFiles.size());
- assertEquals("Orphan File Slice with log-file check data-file", orphanLogFileName, logFiles.get(0).getFileName());
- assertEquals("Inflight File Slice with log-file check base-commit", inflightDeltaInstantTime,
- inflightFileSliceWithLogFile.getBaseInstantTime());
- assertFalse("Inflight File Slice with log-file check data-file",
- inflightFileSliceWithLogFile.getBaseFile().isPresent());
+ assertEquals(1, logFiles.size(), "Orphan File Slice with log-file check data-file");
+ assertEquals(orphanLogFileName, logFiles.get(0).getFileName(), "Orphan File Slice with log-file check data-file");
+ assertEquals(inflightDeltaInstantTime, inflightFileSliceWithLogFile.getBaseInstantTime(),
+ "Inflight File Slice with log-file check base-commit");
+ assertFalse(inflightFileSliceWithLogFile.getBaseFile().isPresent(),
+ "Inflight File Slice with log-file check data-file");
logFiles = inflightFileSliceWithLogFile.getLogFiles().collect(Collectors.toList());
- assertEquals("Inflight File Slice with log-file check data-file", 1, logFiles.size());
- assertEquals("Inflight File Slice with log-file check data-file", inflightLogFileName,
- logFiles.get(0).getFileName());
+ assertEquals(1, logFiles.size(), "Inflight File Slice with log-file check data-file");
+ assertEquals(inflightLogFileName, logFiles.get(0).getFileName(),
+ "Inflight File Slice with log-file check data-file");
}
compactionInstant = new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, compactionRequestedTime);
@@ -490,41 +486,41 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
LOG.info("FILESLICE LIST=" + fileSliceList);
dataFiles = fileSliceList.stream().map(FileSlice::getBaseFile).filter(Option::isPresent).map(Option::get)
.collect(Collectors.toList());
- assertEquals("Expect only one data-files in latest view as there is only one file-group", 1, dataFiles.size());
- assertEquals("Data Filename must match", compactDataFileName, dataFiles.get(0).getFileName());
- assertEquals("Only one latest file-slice in the partition", 1, fileSliceList.size());
+ assertEquals(1, dataFiles.size(), "Expect only one data-files in latest view as there is only one file-group");
+ assertEquals(compactDataFileName, dataFiles.get(0).getFileName(), "Data Filename must match");
+ assertEquals(1, fileSliceList.size(), "Only one latest file-slice in the partition");
fileSlice = fileSliceList.get(0);
- assertEquals("Check file-Id is set correctly", fileId, fileSlice.getFileId());
- assertEquals("Check data-filename is set correctly", compactDataFileName,
- fileSlice.getBaseFile().get().getFileName());
- assertEquals("Ensure base-instant is now compaction request instant", compactionRequestedTime,
- fileSlice.getBaseInstantTime());
+ assertEquals(fileId, fileSlice.getFileId(), "Check file-Id is set correctly");
+ assertEquals(compactDataFileName, fileSlice.getBaseFile().get().getFileName(),
+ "Check data-filename is set correctly");
+ assertEquals(compactionRequestedTime, fileSlice.getBaseInstantTime(),
+ "Ensure base-instant is now compaction request instant");
logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
- assertEquals("Only log-files after compaction request shows up", 2, logFiles.size());
- assertEquals("Log File Order check", fileName4, logFiles.get(0).getFileName());
- assertEquals("Log File Order check", fileName3, logFiles.get(1).getFileName());
+ assertEquals(2, logFiles.size(), "Only log-files after compaction request shows up");
+ assertEquals(fileName4, logFiles.get(0).getFileName(), "Log File Order check");
+ assertEquals(fileName3, logFiles.get(1).getFileName(), "Log File Order check");
// Data Files API tests
dataFiles = roView.getLatestBaseFiles().collect(Collectors.toList());
- assertEquals("Expect only one data-file to be sent", 1, dataFiles.size());
- dataFiles.forEach(df -> assertEquals("Expect data-file created by compaction be returned", df.getCommitTime(), compactionRequestedTime));
+ assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
+ dataFiles.forEach(df -> assertEquals(df.getCommitTime(), compactionRequestedTime, "Expect data-file created by compaction be returned"));
dataFiles = roView.getLatestBaseFiles(partitionPath).collect(Collectors.toList());
- assertEquals("Expect only one data-file to be sent", 1, dataFiles.size());
- dataFiles.forEach(df -> assertEquals("Expect data-file created by compaction be returned", df.getCommitTime(), compactionRequestedTime));
+ assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
+ dataFiles.forEach(df -> assertEquals(df.getCommitTime(), compactionRequestedTime, "Expect data-file created by compaction be returned"));
dataFiles = roView.getLatestBaseFilesBeforeOrOn(partitionPath, deltaInstantTime5).collect(Collectors.toList());
- assertEquals("Expect only one data-file to be sent", 1, dataFiles.size());
- dataFiles.forEach(df -> assertEquals("Expect data-file created by compaction be returned", df.getCommitTime(), compactionRequestedTime));
+ assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
+ dataFiles.forEach(df -> assertEquals(df.getCommitTime(), compactionRequestedTime, "Expect data-file created by compaction be returned"));
dataFiles = roView.getLatestBaseFilesInRange(allInstantTimes).collect(Collectors.toList());
- assertEquals("Expect only one data-file to be sent", 1, dataFiles.size());
- dataFiles.forEach(df -> assertEquals("Expect data-file created by compaction be returned", df.getCommitTime(), compactionRequestedTime));
+ assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
+ dataFiles.forEach(df -> assertEquals(df.getCommitTime(), compactionRequestedTime, "Expect data-file created by compaction be returned"));
- assertEquals("Total number of file-slices in partitions matches expected", expTotalFileSlices,
- rtView.getAllFileSlices(partitionPath).count());
- assertEquals("Total number of data-files in partitions matches expected", expTotalDataFiles,
- roView.getAllBaseFiles(partitionPath).count());
+ assertEquals(expTotalFileSlices, rtView.getAllFileSlices(partitionPath).count(),
+ "Total number of file-slices in partitions matches expected");
+ assertEquals(expTotalDataFiles, roView.getAllBaseFiles(partitionPath).count(),
+ "Total number of data-files in partitions matches expected");
// file-groups includes inflight/invalid file-ids
- assertEquals("Total number of file-groups in partitions matches expected", 5,
- fsView.getAllFileGroups(partitionPath).count());
+ assertEquals(5, fsView.getAllFileGroups(partitionPath).count(),
+ "Total number of file-groups in partitions matches expected");
}
@Test
@@ -533,23 +529,23 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
new File(basePath + "/" + partitionPath).mkdirs();
String fileId = UUID.randomUUID().toString();
- assertFalse("No commit, should not find any data file", roView.getLatestBaseFiles(partitionPath)
- .anyMatch(dfile -> dfile.getFileId().equals(fileId)));
+ assertFalse(roView.getLatestBaseFiles(partitionPath).anyMatch(dfile -> dfile.getFileId().equals(fileId)),
+ "No commit, should not find any data file");
// Only one commit, but is not safe
String commitTime1 = "1";
String fileName1 = FSUtils.makeDataFileName(commitTime1, TEST_WRITE_TOKEN, fileId);
new File(basePath + "/" + partitionPath + "/" + fileName1).createNewFile();
refreshFsView();
- assertFalse("No commit, should not find any data file", roView.getLatestBaseFiles(partitionPath)
- .anyMatch(dfile -> dfile.getFileId().equals(fileId)));
+ assertFalse(roView.getLatestBaseFiles(partitionPath).anyMatch(dfile -> dfile.getFileId().equals(fileId)),
+ "No commit, should not find any data file");
// Make this commit safe
HoodieActiveTimeline commitTimeline = metaClient.getActiveTimeline();
HoodieInstant instant1 = new HoodieInstant(true, HoodieTimeline.COMMIT_ACTION, commitTime1);
saveAsComplete(commitTimeline, instant1, Option.empty());
refreshFsView();
- assertEquals("", fileName1, roView.getLatestBaseFiles(partitionPath)
+ assertEquals(fileName1, roView.getLatestBaseFiles(partitionPath)
.filter(dfile -> dfile.getFileId().equals(fileId)).findFirst().get().getFileName());
// Do another commit, but not safe
@@ -557,14 +553,14 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
String fileName2 = FSUtils.makeDataFileName(commitTime2, TEST_WRITE_TOKEN, fileId);
new File(basePath + "/" + partitionPath + "/" + fileName2).createNewFile();
refreshFsView();
- assertEquals("", fileName1, roView.getLatestBaseFiles(partitionPath)
+ assertEquals(fileName1, roView.getLatestBaseFiles(partitionPath)
.filter(dfile -> dfile.getFileId().equals(fileId)).findFirst().get().getFileName());
// Make it safe
HoodieInstant instant2 = new HoodieInstant(true, HoodieTimeline.COMMIT_ACTION, commitTime2);
saveAsComplete(commitTimeline, instant2, Option.empty());
refreshFsView();
- assertEquals("", fileName2, roView.getLatestBaseFiles(partitionPath)
+ assertEquals(fileName2, roView.getLatestBaseFiles(partitionPath)
.filter(dfile -> dfile.getFileId().equals(fileId)).findFirst().get().getFileName());
}
@@ -738,7 +734,7 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
String fileId = fileGroup.getFileGroupId().getFileId();
Set<String> filenames = new HashSet<>();
fileGroup.getAllBaseFiles().forEach(dataFile -> {
- assertEquals("All same fileId should be grouped", fileId, dataFile.getFileId());
+ assertEquals(fileId, dataFile.getFileId(), "All same fileId should be grouped");
filenames.add(dataFile.getFileName());
});
Set<String> expFileNames = new HashSet<>();
@@ -1036,13 +1032,13 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
List<HoodieFileGroup> groups = Stream.of(partitionPath1, partitionPath2, partitionPath3)
.flatMap(p -> fsView.getAllFileGroups(p)).collect(Collectors.toList());
- Assert.assertEquals("Expected number of file-groups", 3, groups.size());
- Assert.assertEquals("Partitions must be different for file-groups", 3,
- groups.stream().map(HoodieFileGroup::getPartitionPath).collect(Collectors.toSet()).size());
+ assertEquals(3, groups.size(), "Expected number of file-groups");
+ assertEquals(3, groups.stream().map(HoodieFileGroup::getPartitionPath).collect(Collectors.toSet()).size(),
+ "Partitions must be different for file-groups");
Set<String> fileIds = groups.stream().map(HoodieFileGroup::getFileGroupId).map(HoodieFileGroupId::getFileId)
.collect(Collectors.toSet());
- Assert.assertEquals("File Id must be same", 1, fileIds.size());
- Assert.assertTrue("Expected FileId", fileIds.contains(fileId));
+ assertEquals(1, fileIds.size(), "File Id must be same");
+ assertTrue(fileIds.contains(fileId), "Expected FileId");
// Setup Pending compaction for all of these fileIds.
List<Pair<String, FileSlice>> partitionFileSlicesPairs = new ArrayList<>();
@@ -1091,59 +1087,58 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
// Test Data Files
List<HoodieBaseFile> dataFiles = roView.getAllBaseFiles(partitionPath1).collect(Collectors.toList());
- assertEquals("One data-file is expected as there is only one file-group", 1, dataFiles.size());
- assertEquals("Expect only valid commit", "1", dataFiles.get(0).getCommitTime());
+ assertEquals(1, dataFiles.size(), "One data-file is expected as there is only one file-group");
+ assertEquals("1", dataFiles.get(0).getCommitTime(), "Expect only valid commit");
dataFiles = roView.getAllBaseFiles(partitionPath2).collect(Collectors.toList());
- assertEquals("One data-file is expected as there is only one file-group", 1, dataFiles.size());
- assertEquals("Expect only valid commit", "1", dataFiles.get(0).getCommitTime());
+ assertEquals(1, dataFiles.size(), "One data-file is expected as there is only one file-group");
+ assertEquals("1", dataFiles.get(0).getCommitTime(), "Expect only valid commit");
// Merge API Tests
Arrays.asList(partitionPath1, partitionPath2, partitionPath3).forEach(partitionPath -> {
List<FileSlice> fileSliceList =
rtView.getLatestMergedFileSlicesBeforeOrOn(partitionPath, deltaInstantTime5).collect(Collectors.toList());
- assertEquals("Expect file-slice to be merged", 1, fileSliceList.size());
+ assertEquals(1, fileSliceList.size(), "Expect file-slice to be merged");
FileSlice fileSlice = fileSliceList.get(0);
assertEquals(fileId, fileSlice.getFileId());
- assertEquals("Data file must be present", dataFileName, fileSlice.getBaseFile().get().getFileName());
- assertEquals("Base Instant of penultimate file-slice must be base instant", instantTime1,
- fileSlice.getBaseInstantTime());
+ assertEquals(dataFileName, fileSlice.getBaseFile().get().getFileName(), "Data file must be present");
+ assertEquals(instantTime1, fileSlice.getBaseInstantTime(),
+ "Base Instant of penultimate file-slice must be base instant");
List<HoodieLogFile> logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
- assertEquals("Log files must include those after compaction request", 3, logFiles.size());
- assertEquals("Log File Order check", fileName4, logFiles.get(0).getFileName());
- assertEquals("Log File Order check", fileName3, logFiles.get(1).getFileName());
- assertEquals("Log File Order check", fileName1, logFiles.get(2).getFileName());
+ assertEquals(3, logFiles.size(), "Log files must include those after compaction request");
+ assertEquals(fileName4, logFiles.get(0).getFileName(), "Log File Order check");
+ assertEquals(fileName3, logFiles.get(1).getFileName(), "Log File Order check");
+ assertEquals(fileName1, logFiles.get(2).getFileName(), "Log File Order check");
fileSliceList =
rtView.getLatestFileSlicesBeforeOrOn(partitionPath, deltaInstantTime5, true).collect(Collectors.toList());
- assertEquals("Expect only one file-id", 1, fileSliceList.size());
+ assertEquals(1, fileSliceList.size(), "Expect only one file-id");
fileSlice = fileSliceList.get(0);
assertEquals(fileId, fileSlice.getFileId());
- assertFalse("No data-file expected in latest file-slice", fileSlice.getBaseFile().isPresent());
- assertEquals("Compaction requested instant must be base instant", compactionRequestedTime,
- fileSlice.getBaseInstantTime());
+ assertFalse(fileSlice.getBaseFile().isPresent(), "No data-file expected in latest file-slice");
+ assertEquals(compactionRequestedTime, fileSlice.getBaseInstantTime(), "Compaction requested instant must be base instant");
logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
- assertEquals("Log files must include only those after compaction request", 2, logFiles.size());
- assertEquals("Log File Order check", fileName4, logFiles.get(0).getFileName());
- assertEquals("Log File Order check", fileName3, logFiles.get(1).getFileName());
+ assertEquals(2, logFiles.size(), "Log files must include only those after compaction request");
+ assertEquals(fileName4, logFiles.get(0).getFileName(), "Log File Order check");
+ assertEquals(fileName3, logFiles.get(1).getFileName(), "Log File Order check");
// Check getLatestFileSlicesBeforeOrOn excluding fileIds in pending compaction
fileSliceList =
rtView.getLatestFileSlicesBeforeOrOn(partitionPath, deltaInstantTime5, false).collect(Collectors.toList());
- assertEquals("Expect empty list as file-id is in pending compaction", 0, fileSliceList.size());
+ assertEquals(0, fileSliceList.size(), "Expect empty list as file-id is in pending compaction");
});
- Assert.assertEquals(3, fsView.getPendingCompactionOperations().count());
+ assertEquals(3, fsView.getPendingCompactionOperations().count());
Set<String> partitionsInCompaction = fsView.getPendingCompactionOperations().map(Pair::getValue)
.map(CompactionOperation::getPartitionPath).collect(Collectors.toSet());
- Assert.assertEquals(3, partitionsInCompaction.size());
- Assert.assertTrue(partitionsInCompaction.contains(partitionPath1));
- Assert.assertTrue(partitionsInCompaction.contains(partitionPath2));
- Assert.assertTrue(partitionsInCompaction.contains(partitionPath3));
+ assertEquals(3, partitionsInCompaction.size());
+ assertTrue(partitionsInCompaction.contains(partitionPath1));
+ assertTrue(partitionsInCompaction.contains(partitionPath2));
+ assertTrue(partitionsInCompaction.contains(partitionPath3));
Set<String> fileIdsInCompaction = fsView.getPendingCompactionOperations().map(Pair::getValue)
.map(CompactionOperation::getFileId).collect(Collectors.toSet());
- Assert.assertEquals(1, fileIdsInCompaction.size());
- Assert.assertTrue(fileIdsInCompaction.contains(fileId));
+ assertEquals(1, fileIdsInCompaction.size());
+ assertTrue(fileIdsInCompaction.contains(fileId));
}
private static void saveAsComplete(HoodieActiveTimeline timeline, HoodieInstant inflight, Option<byte[]> data) {
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestRocksDbBasedFileSystemView.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestRocksDbBasedFileSystemView.java
index c23944b..b09b007 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestRocksDbBasedFileSystemView.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestRocksDbBasedFileSystemView.java
@@ -21,6 +21,7 @@ package org.apache.hudi.common.table.view;
import org.apache.hudi.common.table.timeline.HoodieTimeline;
import java.io.IOException;
+import java.nio.file.Files;
/**
* Tests RocksDB based file system view {@link SyncableFileSystemView}.
@@ -28,7 +29,8 @@ import java.io.IOException;
public class TestRocksDbBasedFileSystemView extends TestHoodieTableFileSystemView {
protected SyncableFileSystemView getFileSystemView(HoodieTimeline timeline) throws IOException {
+ String subdirPath = Files.createTempDirectory(tempDir, null).toAbsolutePath().toString();
return new RocksDbBasedFileSystemView(metaClient, timeline,
- FileSystemViewStorageConfig.newBuilder().withRocksDBPath(folder.newFolder().getAbsolutePath()).build());
+ FileSystemViewStorageConfig.newBuilder().withRocksDBPath(subdirPath).build());
}
}
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarnessJunit5.java b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarnessJunit5.java
new file mode 100644
index 0000000..ba25036
--- /dev/null
+++ b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarnessJunit5.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.testutils;
+
+import org.apache.hudi.common.model.HoodieTestUtils;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+
+import org.junit.jupiter.api.io.TempDir;
+
+import java.io.IOException;
+
+/**
+ * The JUnit 5 version of {@link org.apache.hudi.common.HoodieCommonTestHarness}.
+ * <p>
+ * To incrementally migrate test classes.
+ */
+public class HoodieCommonTestHarnessJunit5 extends org.apache.hudi.common.HoodieCommonTestHarness {
+
+ @TempDir
+ public java.nio.file.Path tempDir;
+
+ /**
+ * Initializes basePath.
+ */
+ protected void initPath() {
+ this.basePath = tempDir.toAbsolutePath().toString();
+ }
+
+ /**
+ * Initializes an instance of {@link HoodieTableMetaClient} with a special table type specified by {@code getTableType()}.
+ */
+ protected void initMetaClient() throws IOException {
+ metaClient = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType());
+ basePath = metaClient.getBasePath();
+ }
+}
diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestFileIOUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestFileIOUtils.java
index 7d0c287..5292f07 100644
--- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestFileIOUtils.java
+++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestFileIOUtils.java
@@ -18,39 +18,39 @@
package org.apache.hudi.common.util;
-import org.apache.hudi.common.HoodieCommonTestHarness;
+import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests file I/O utils.
*/
-public class TestFileIOUtils extends HoodieCommonTestHarness {
+public class TestFileIOUtils extends HoodieCommonTestHarnessJunit5 {
@Test
public void testMkdirAndDelete() throws IOException {
try {
- FileIOUtils.mkdir(folder.getRoot());
+ FileIOUtils.mkdir(tempDir.toFile());
} catch (IOException e) {
fail("Should not error out if dir exists already");
}
- File dir = new File(folder.getRoot().getAbsolutePath() + "/dir");
+ File dir = tempDir.resolve("dir").toFile();
FileIOUtils.mkdir(dir);
assertTrue(dir.exists());
new File(dir, "t.txt").createNewFile();
new File(dir, "subdir").mkdirs();
- new File(dir, "subdir/z.txt").createNewFile();
+ new File(dir, "subdir" + File.pathSeparator + "z.txt").createNewFile();
FileIOUtils.deleteDirectory(dir);
assertFalse(dir.exists());
}
diff --git a/hudi-hadoop-mr/pom.xml b/hudi-hadoop-mr/pom.xml
index 8d21bda..fe95199 100644
--- a/hudi-hadoop-mr/pom.xml
+++ b/hudi-hadoop-mr/pom.xml
@@ -97,12 +97,6 @@
</dependency>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
-
- <dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<scope>test</scope>
diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/InputFormatTestUtil.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/InputFormatTestUtil.java
index a575403..801380d 100644
--- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/InputFormatTestUtil.java
+++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/InputFormatTestUtil.java
@@ -39,10 +39,11 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.parquet.avro.AvroParquetWriter;
-import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -57,19 +58,19 @@ public class InputFormatTestUtil {
private static String TEST_WRITE_TOKEN = "1-0-1";
- public static File prepareTable(TemporaryFolder basePath, int numberOfFiles, String commitNumber)
+ public static File prepareTable(java.nio.file.Path basePath, int numberOfFiles, String commitNumber)
throws IOException {
- basePath.create();
- HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath.getRoot().toString());
- File partitionPath = basePath.newFolder("2016", "05", "01");
- return simulateInserts(partitionPath, "fileId1", numberOfFiles, commitNumber);
+ HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath.toString());
+ java.nio.file.Path partitionPath = basePath.resolve(Paths.get("2016", "05", "01"));
+ Files.createDirectories(partitionPath);
+ return simulateInserts(partitionPath.toFile(), "fileId1", numberOfFiles, commitNumber);
}
public static File simulateInserts(File partitionPath, String fileId, int numberOfFiles, String commitNumber)
- throws IOException {
+ throws IOException {
for (int i = 0; i < numberOfFiles; i++) {
- File dataFile = new File(partitionPath, FSUtils.makeDataFileName(commitNumber, TEST_WRITE_TOKEN, fileId + i));
- dataFile.createNewFile();
+ Files.createFile(partitionPath.toPath()
+ .resolve(FSUtils.makeDataFileName(commitNumber, TEST_WRITE_TOKEN, fileId + i)));
}
return partitionPath;
}
@@ -86,19 +87,18 @@ public class InputFormatTestUtil {
List<File> toUpdateList = dataFiles.subList(0, Math.min(numberOfFilesUpdated, dataFiles.size()));
for (File file : toUpdateList) {
String fileId = FSUtils.getFileId(file.getName());
- File dataFile = new File(directory, FSUtils.makeDataFileName(newCommit, TEST_WRITE_TOKEN, fileId));
- dataFile.createNewFile();
+ Files.createFile(directory.toPath().resolve(FSUtils.makeDataFileName(newCommit, TEST_WRITE_TOKEN, fileId)));
}
}
- public static void commit(TemporaryFolder basePath, String commitNumber) throws IOException {
+ public static void commit(java.nio.file.Path basePath, String commitNumber) throws IOException {
// create the commit
- new File(basePath.getRoot().toString() + "/.hoodie/", commitNumber + ".commit").createNewFile();
+ Files.createFile(basePath.resolve(Paths.get(".hoodie", commitNumber + ".commit")));
}
- public static void deltaCommit(TemporaryFolder basePath, String commitNumber) throws IOException {
+ public static void deltaCommit(java.nio.file.Path basePath, String commitNumber) throws IOException {
// create the commit
- new File(basePath.getRoot().toString() + "/.hoodie/", commitNumber + ".deltacommit").createNewFile();
+ Files.createFile(basePath.resolve(Paths.get(".hoodie", commitNumber + ".deltacommit")));
}
public static void setupIncremental(JobConf jobConf, String startCommit, int numberOfCommitsToPull) {
@@ -119,40 +119,35 @@ public class InputFormatTestUtil {
return new Schema.Parser().parse(InputFormatTestUtil.class.getResourceAsStream(location));
}
- public static File prepareParquetTable(TemporaryFolder basePath, Schema schema, int numberOfFiles,
+ public static File prepareParquetTable(java.nio.file.Path basePath, Schema schema, int numberOfFiles,
int numberOfRecords, String commitNumber) throws IOException {
- basePath.create();
- HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath.getRoot().toString());
- File partitionPath = basePath.newFolder("2016", "05", "01");
+ HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath.toString());
+ java.nio.file.Path partitionPath = basePath.resolve(Paths.get("2016", "05", "01"));
createData(schema, partitionPath, numberOfFiles, numberOfRecords, commitNumber);
- return partitionPath;
+ return partitionPath.toFile();
}
- public static File prepareSimpleParquetTable(TemporaryFolder basePath, Schema schema, int numberOfFiles,
+ public static File prepareSimpleParquetTable(java.nio.file.Path basePath, Schema schema, int numberOfFiles,
int numberOfRecords, String commitNumber) throws Exception {
- basePath.create();
- HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath.getRoot().toString());
- File partitionPath = basePath.newFolder("2016", "05", "01");
+ HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath.toString());
+ java.nio.file.Path partitionPath = basePath.resolve(Paths.get("2016", "05", "01"));
createSimpleData(schema, partitionPath, numberOfFiles, numberOfRecords, commitNumber);
- return partitionPath;
+ return partitionPath.toFile();
}
- public static File prepareNonPartitionedParquetTable(TemporaryFolder baseDir, Schema schema, int numberOfFiles,
+ public static File prepareNonPartitionedParquetTable(java.nio.file.Path basePath, Schema schema, int numberOfFiles,
int numberOfRecords, String commitNumber) throws IOException {
- baseDir.create();
- HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), baseDir.getRoot().toString());
- File basePath = baseDir.getRoot();
+ HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath.toString());
createData(schema, basePath, numberOfFiles, numberOfRecords, commitNumber);
- return basePath;
+ return basePath.toFile();
}
- private static void createData(Schema schema, File partitionPath, int numberOfFiles, int numberOfRecords,
+ private static void createData(Schema schema, java.nio.file.Path partitionPath, int numberOfFiles, int numberOfRecords,
String commitNumber) throws IOException {
AvroParquetWriter parquetWriter;
for (int i = 0; i < numberOfFiles; i++) {
String fileId = FSUtils.makeDataFileName(commitNumber, TEST_WRITE_TOKEN, "fileid" + i);
- File dataFile = new File(partitionPath, fileId);
- parquetWriter = new AvroParquetWriter(new Path(dataFile.getAbsolutePath()), schema);
+ parquetWriter = new AvroParquetWriter(new Path(partitionPath.resolve(fileId).toString()), schema);
try {
for (GenericRecord record : generateAvroRecords(schema, numberOfRecords, commitNumber, fileId)) {
parquetWriter.write(record);
@@ -163,13 +158,12 @@ public class InputFormatTestUtil {
}
}
- private static void createSimpleData(Schema schema, File partitionPath, int numberOfFiles, int numberOfRecords,
+ private static void createSimpleData(Schema schema, java.nio.file.Path partitionPath, int numberOfFiles, int numberOfRecords,
String commitNumber) throws Exception {
AvroParquetWriter parquetWriter;
for (int i = 0; i < numberOfFiles; i++) {
String fileId = FSUtils.makeDataFileName(commitNumber, "1", "fileid" + i);
- File dataFile = new File(partitionPath, fileId);
- parquetWriter = new AvroParquetWriter(new Path(dataFile.getAbsolutePath()), schema);
+ parquetWriter = new AvroParquetWriter(new Path(partitionPath.resolve(fileId).toString()), schema);
try {
List<IndexedRecord> records = SchemaTestUtil.generateTestRecords(0, numberOfRecords);
Schema hoodieFieldsSchema = HoodieAvroUtils.addMetadataFields(schema);
@@ -212,7 +206,6 @@ public class InputFormatTestUtil {
parquetWriter.write(record);
}
}
-
}
public static HoodieLogFormat.Writer writeRollback(File partitionDir, FileSystem fs, String fileId, String baseCommit,
diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieParquetInputFormat.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieParquetInputFormat.java
index 536e8f6..8a6aee4 100644
--- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieParquetInputFormat.java
+++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieParquetInputFormat.java
@@ -23,6 +23,11 @@ import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.HoodieCommitMetadata;
import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.model.HoodieWriteStat;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
+import org.apache.hudi.common.table.timeline.HoodieInstant;
+import org.apache.hudi.common.table.timeline.HoodieTimeline;
+import org.apache.hudi.common.table.timeline.TimelineMetadataUtils;
import org.apache.avro.Schema;
import org.apache.hadoop.fs.FileStatus;
@@ -33,15 +38,9 @@ import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapreduce.Job;
-import org.apache.hudi.common.table.HoodieTableMetaClient;
-import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
-import org.apache.hudi.common.table.timeline.HoodieInstant;
-import org.apache.hudi.common.table.timeline.HoodieTimeline;
-import org.apache.hudi.common.table.timeline.TimelineMetadataUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.FileOutputStream;
@@ -50,15 +49,12 @@ import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestHoodieParquetInputFormat {
- @Rule
- public TemporaryFolder basePath = new TemporaryFolder();
-
private HoodieParquetInputFormat inputFormat;
private JobConf jobConf;
@@ -70,16 +66,19 @@ public class TestHoodieParquetInputFormat {
count++;
}
}
- assertEquals(msg, expected, count);
+ assertEquals(expected, count, msg);
}
- @Before
+ @BeforeEach
public void setUp() {
inputFormat = new HoodieParquetInputFormat();
jobConf = new JobConf();
inputFormat.setConf(jobConf);
}
+ @TempDir
+ public java.nio.file.Path basePath;
+
// Verify that HoodieParquetInputFormat does not return instants after pending compaction
@Test
public void testPendingCompactionWithActiveCommits() throws IOException {
@@ -98,7 +97,7 @@ public class TestHoodieParquetInputFormat {
instants.add(t4);
instants.add(t5);
instants.add(t6);
- HoodieTableMetaClient metaClient = HoodieTestUtils.init(basePath.getRoot().getAbsolutePath().toString());
+ HoodieTableMetaClient metaClient = HoodieTestUtils.init(basePath.toString());
HoodieActiveTimeline timeline = new HoodieActiveTimeline(metaClient);
timeline.setInstants(instants);
@@ -196,16 +195,16 @@ public class TestHoodieParquetInputFormat {
InputFormatTestUtil.setupIncremental(jobConf, "100", 1);
FileStatus[] files = inputFormat.listStatus(jobConf);
- assertEquals("We should exclude commit 100 when returning incremental pull with start commit time as 100", 0,
- files.length);
+ assertEquals(0, files.length,
+ "We should exclude commit 100 when returning incremental pull with start commit time as 100");
}
- private void createCommitFile(TemporaryFolder basePath, String commitNumber, String partitionPath)
+ private void createCommitFile(java.nio.file.Path basePath, String commitNumber, String partitionPath)
throws IOException {
List<HoodieWriteStat> writeStats = HoodieTestUtils.generateFakeHoodieWriteStat(1);
HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata();
writeStats.forEach(stat -> commitMetadata.addWriteStat(partitionPath, stat));
- File file = new File(basePath.getRoot().toString() + "/.hoodie/", commitNumber + ".commit");
+ File file = basePath.resolve(".hoodie").resolve(commitNumber + ".commit").toFile();
file.createNewFile();
FileOutputStream fileOutputStream = new FileOutputStream(file);
fileOutputStream.write(commitMetadata.toJsonString().getBytes(StandardCharsets.UTF_8));
@@ -213,10 +212,10 @@ public class TestHoodieParquetInputFormat {
fileOutputStream.close();
}
- private File createCompactionFile(TemporaryFolder basePath, String commitTime)
- throws IOException {
- File file = new File(basePath.getRoot().toString() + "/.hoodie/",
- HoodieTimeline.makeRequestedCompactionFileName(commitTime));
+ private File createCompactionFile(java.nio.file.Path basePath, String commitTime)
+ throws IOException {
+ File file = basePath.resolve(".hoodie")
+ .resolve(HoodieTimeline.makeRequestedCompactionFileName(commitTime)).toFile();
assertTrue(file.createNewFile());
FileOutputStream os = new FileOutputStream(file);
try {
@@ -255,14 +254,14 @@ public class TestHoodieParquetInputFormat {
InputFormatTestUtil.setupIncremental(jobConf, "100", 1);
FileStatus[] files = inputFormat.listStatus(jobConf);
- assertEquals("Pulling 1 commit from 100, should get us the 5 files committed at 200", 5, files.length);
+ assertEquals(5, files.length, "Pulling 1 commit from 100, should get us the 5 files committed at 200");
ensureFilesInCommit("Pulling 1 commit from 100, should get us the 5 files committed at 200", files, "200", 5);
InputFormatTestUtil.setupIncremental(jobConf, "100", 3);
files = inputFormat.listStatus(jobConf);
- assertEquals("Pulling 3 commits from 100, should get us the 3 files from 400 commit, 1 file from 300 "
- + "commit and 1 file from 200 commit", 5, files.length);
+ assertEquals(5, files.length, "Pulling 3 commits from 100, should get us the 3 files from 400 commit, 1 file from 300 "
+ + "commit and 1 file from 200 commit");
ensureFilesInCommit("Pulling 3 commits from 100, should get us the 3 files from 400 commit", files, "400", 3);
ensureFilesInCommit("Pulling 3 commits from 100, should get us the 1 files from 300 commit", files, "300", 1);
ensureFilesInCommit("Pulling 3 commits from 100, should get us the 1 files from 200 commit", files, "200", 1);
@@ -270,8 +269,8 @@ public class TestHoodieParquetInputFormat {
InputFormatTestUtil.setupIncremental(jobConf, "100", HoodieHiveUtil.MAX_COMMIT_ALL);
files = inputFormat.listStatus(jobConf);
- assertEquals("Pulling all commits from 100, should get us the 1 file from each of 200,300,400,500,400 commits",
- 5, files.length);
+ assertEquals(5, files.length,
+ "Pulling all commits from 100, should get us the 1 file from each of 200,300,400,500,400 commits");
ensureFilesInCommit("Pulling all commits from 100, should get us the 1 files from 600 commit", files, "600", 1);
ensureFilesInCommit("Pulling all commits from 100, should get us the 1 files from 500 commit", files, "500", 1);
ensureFilesInCommit("Pulling all commits from 100, should get us the 1 files from 400 commit", files, "400", 1);
@@ -335,29 +334,29 @@ public class TestHoodieParquetInputFormat {
File compactionFile = createCompactionFile(basePath, "300");
// write inserts into new bucket
- InputFormatTestUtil.simulateInserts(partitionDir, "fileId2", 10, "400");
+ InputFormatTestUtil.simulateInserts(partitionDir, "fileId2", 10, "400");
createCommitFile(basePath, "400", "2016/05/01");
// Add the paths
FileInputFormat.setInputPaths(jobConf, partitionDir.getPath());
InputFormatTestUtil.setupIncremental(jobConf, "0", -1);
FileStatus[] files = inputFormat.listStatus(jobConf);
- assertEquals("Pulling all commit from beginning, should not return instants after begin compaction",
- 10, files.length);
+ assertEquals(10, files.length,
+ "Pulling all commit from beginning, should not return instants after begin compaction");
ensureFilesInCommit("Pulling all commit from beginning, should not return instants after begin compaction",
- files, "100", 10);
+ files, "100", 10);
// delete compaction and verify inserts show up
compactionFile.delete();
InputFormatTestUtil.setupIncremental(jobConf, "0", -1);
files = inputFormat.listStatus(jobConf);
- assertEquals("after deleting compaction, should get all inserted files",
- 20, files.length);
+ assertEquals(20, files.length,
+ "after deleting compaction, should get all inserted files");
ensureFilesInCommit("Pulling all commit from beginning, should return instants before requested compaction",
- files, "100", 10);
+ files, "100", 10);
ensureFilesInCommit("Pulling all commit from beginning, should return instants after requested compaction",
- files, "400", 10);
+ files, "400", 10);
}
@@ -381,7 +380,7 @@ public class TestHoodieParquetInputFormat {
totalCount++;
}
}
- assertEquals(msg, expectedNumberOfRecordsInCommit, actualCount);
- assertEquals(msg, totalExpected, totalCount);
+ assertEquals(expectedNumberOfRecordsInCommit, actualCount, msg);
+ assertEquals(totalExpected, totalCount, msg);
}
}
diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieROTablePathFilter.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieROTablePathFilter.java
index 8d46b10..f41f09c 100644
--- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieROTablePathFilter.java
+++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieROTablePathFilter.java
@@ -18,28 +18,28 @@
package org.apache.hudi.hadoop;
-import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5;
import org.apache.hadoop.fs.Path;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
*
*/
-public class TestHoodieROTablePathFilter extends HoodieCommonTestHarness {
+public class TestHoodieROTablePathFilter extends HoodieCommonTestHarnessJunit5 {
- @Before
+ @BeforeEach
public void setUp() throws Exception {
initMetaClient();
}
@@ -61,7 +61,7 @@ public class TestHoodieROTablePathFilter extends HoodieCommonTestHarness {
HoodieROTablePathFilter pathFilter = new HoodieROTablePathFilter();
Path partitionPath = new Path("file://" + basePath + File.separator + "2017/01/01");
- assertTrue("Directories should be accepted", pathFilter.accept(partitionPath));
+ assertTrue(pathFilter.accept(partitionPath), "Directories should be accepted");
assertTrue(
pathFilter.accept(new Path("file:///" + HoodieTestUtils.getDataFilePath(basePath, "2017/01/01", "001", "f1"))));
@@ -87,10 +87,8 @@ public class TestHoodieROTablePathFilter extends HoodieCommonTestHarness {
}
@Test
- public void testNonHoodiePaths() throws IOException {
- TemporaryFolder folder = new TemporaryFolder();
- folder.create();
- String basePath = folder.getRoot().getAbsolutePath();
+ public void testNonHoodiePaths(@TempDir java.nio.file.Path tempDir) throws IOException {
+ String basePath = tempDir.toAbsolutePath().toString();
HoodieROTablePathFilter pathFilter = new HoodieROTablePathFilter();
String path = basePath + File.separator + "nonhoodiefolder";
@@ -100,7 +98,5 @@ public class TestHoodieROTablePathFilter extends HoodieCommonTestHarness {
path = basePath + File.separator + "nonhoodiefolder/somefile";
new File(path).createNewFile();
assertTrue(pathFilter.accept(new Path("file:///" + path)));
-
- folder.delete();
}
}
diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieCombineHiveInputFormat.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieCombineHiveInputFormat.java
index b1839ff..8ef295e 100644
--- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieCombineHiveInputFormat.java
+++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieCombineHiveInputFormat.java
@@ -18,19 +18,15 @@
package org.apache.hudi.hadoop.realtime;
-import static org.apache.hadoop.hive.ql.exec.Utilities.HAS_MAP_WORK;
-import static org.apache.hadoop.hive.ql.exec.Utilities.MAPRED_MAPPER_CLASS;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
import org.apache.hudi.avro.HoodieAvroUtils;
-import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.minicluster.MiniClusterUtil;
import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.log.HoodieLogFormat;
+import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5;
import org.apache.hudi.common.util.SchemaTestUtil;
import org.apache.hudi.hadoop.InputFormatTestUtil;
+import org.apache.hudi.hadoop.hive.HoodieCombineHiveInputFormat;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
@@ -47,60 +43,60 @@ import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hudi.hadoop.hive.HoodieCombineHiveInputFormat;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.util.LinkedHashMap;
-public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarness {
+import static org.apache.hadoop.hive.ql.exec.Utilities.HAS_MAP_WORK;
+import static org.apache.hadoop.hive.ql.exec.Utilities.MAPRED_MAPPER_CLASS;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarnessJunit5 {
- @Rule
- public TemporaryFolder basePath = new TemporaryFolder();
private JobConf jobConf;
private FileSystem fs;
private Configuration hadoopConf;
- @BeforeClass
+ @BeforeAll
public static void setUpClass() throws IOException, InterruptedException {
// Append is not supported in LocalFileSystem. HDFS needs to be setup.
MiniClusterUtil.setUp();
}
- @AfterClass
+ @AfterAll
public static void tearDownClass() {
MiniClusterUtil.shutdown();
}
- @Before
+ @BeforeEach
public void setUp() throws IOException, InterruptedException {
this.fs = MiniClusterUtil.fileSystem;
jobConf = new JobConf();
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
assertTrue(fs.mkdirs(new Path(folder.getRoot().getPath())));
- HoodieTestUtils.init(MiniClusterUtil.configuration, basePath.getRoot().getPath(), HoodieTableType.MERGE_ON_READ);
+ HoodieTestUtils.init(MiniClusterUtil.configuration, tempDir.toAbsolutePath().toString(), HoodieTableType.MERGE_ON_READ);
}
@Test
- @Ignore
+ @Disabled
public void testHoodieRealtimeCombineHoodieInputFormat() throws Exception {
Configuration conf = new Configuration();
// initial commit
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getEvolvedSchema());
- HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
+ HoodieTestUtils.init(hadoopConf, tempDir.toAbsolutePath().toString(), HoodieTableType.MERGE_ON_READ);
String commitTime = "100";
final int numRecords = 1000;
// Create 3 parquet files with 1000 records each
- File partitionDir = InputFormatTestUtil.prepareParquetTable(basePath, schema, 3, numRecords, commitTime);
- InputFormatTestUtil.commit(basePath, commitTime);
+ File partitionDir = InputFormatTestUtil.prepareParquetTable(tempDir, schema, 3, numRecords, commitTime);
+ InputFormatTestUtil.commit(tempDir, commitTime);
// insert 1000 update records to log file 0
String newCommitTime = "101";
@@ -124,10 +120,10 @@ public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarness {
tblDesc.setInputFileFormatClass(HoodieCombineHiveInputFormat.class);
PartitionDesc partDesc = new PartitionDesc(tblDesc, null);
LinkedHashMap<Path, PartitionDesc> pt = new LinkedHashMap<>();
- pt.put(new Path(basePath.getRoot().getAbsolutePath()), partDesc);
+ pt.put(new Path(tempDir.toAbsolutePath().toString()), partDesc);
MapredWork mrwork = new MapredWork();
mrwork.getMapWork().setPathToPartitionInfo(pt);
- Path mapWorkPath = new Path(basePath.getRoot().getAbsolutePath());
+ Path mapWorkPath = new Path(tempDir.toAbsolutePath().toString());
Utilities.setMapRedWork(conf, mrwork, mapWorkPath);
jobConf = new JobConf(conf);
// Add the paths
@@ -143,7 +139,7 @@ public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarness {
InputFormatTestUtil.setPropsForInputFormat(jobConf, schema, tripsHiveColumnTypes);
InputSplit[] splits = combineHiveInputFormat.getSplits(jobConf, 1);
// Since the SPLIT_SIZE is 3, we should create only 1 split with all 3 file groups
- assertEquals(splits.length, 1);
+ assertEquals(1, splits.length);
RecordReader<NullWritable, ArrayWritable> recordReader =
combineHiveInputFormat.getRecordReader(splits[0], jobConf, null);
NullWritable nullWritable = recordReader.createKey();
diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java
index be444b4..4d2bebd 100644
--- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java
+++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java
@@ -52,11 +52,9 @@ import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -68,29 +66,29 @@ import java.util.Set;
import java.util.stream.Collectors;
import static org.apache.hudi.hadoop.realtime.HoodieRealtimeRecordReader.REALTIME_SKIP_MERGE_PROP;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestHoodieRealtimeRecordReader {
private static final String PARTITION_COLUMN = "datestr";
- @Rule
- public TemporaryFolder basePath = new TemporaryFolder();
private JobConf jobConf;
private FileSystem fs;
private Configuration hadoopConf;
- @Before
+ @BeforeEach
public void setUp() {
jobConf = new JobConf();
jobConf.set(AbstractRealtimeRecordReader.MAX_DFS_STREAM_BUFFER_SIZE_PROP, String.valueOf(1024 * 1024));
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
- fs = FSUtils.getFs(basePath.getRoot().getAbsolutePath(), hadoopConf);
+ fs = FSUtils.getFs(basePath.toString(), hadoopConf);
}
- private Writer writeLogFile(File partitionDir, Schema schema, String fileId, String baseCommit, String
- newCommit,
+ @TempDir
+ public java.nio.file.Path basePath;
+
+ private Writer writeLogFile(File partitionDir, Schema schema, String fileId, String baseCommit, String newCommit,
int numberOfRecords) throws InterruptedException, IOException {
return InputFormatTestUtil.writeDataBlockToLogFile(partitionDir, fs, schema, fileId, baseCommit, newCommit,
numberOfRecords, 0,
@@ -125,7 +123,7 @@ public class TestHoodieRealtimeRecordReader {
private void testReader(boolean partitioned) throws Exception {
// initial commit
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getEvolvedSchema());
- HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
+ HoodieTestUtils.init(hadoopConf, basePath.toString(), HoodieTableType.MERGE_ON_READ);
String baseInstant = "100";
File partitionDir = partitioned ? InputFormatTestUtil.prepareParquetTable(basePath, schema, 1, 100, baseInstant)
: InputFormatTestUtil.prepareNonPartitionedParquetTable(basePath, schema, 1, 100, baseInstant);
@@ -139,7 +137,7 @@ public class TestHoodieRealtimeRecordReader {
// TODO: HUDI-154 Once Hive 2.x PR (PR-674) is merged, enable this change
// logVersionsWithAction.add(Pair.of(HoodieTimeline.ROLLBACK_ACTION, 3));
FileSlice fileSlice =
- new FileSlice(partitioned ? FSUtils.getRelativePartitionPath(new Path(basePath.getRoot().getAbsolutePath()),
+ new FileSlice(partitioned ? FSUtils.getRelativePartitionPath(new Path(basePath.toString()),
new Path(partitionDir.getAbsolutePath())) : "default", baseInstant, "fileid0");
logVersionsWithAction.forEach(logVersionWithAction -> {
try {
@@ -163,13 +161,13 @@ public class TestHoodieRealtimeRecordReader {
}
long size = writer.getCurrentSize();
writer.close();
- assertTrue("block - size should be > 0", size > 0);
+ assertTrue(size > 0, "block - size should be > 0");
// create a split with baseFile (parquet file written earlier) and new log file(s)
fileSlice.addLogFile(writer.getLogFile());
HoodieRealtimeFileSplit split = new HoodieRealtimeFileSplit(
new FileSplit(new Path(partitionDir + "/fileid0_1-0-1_" + baseInstant + ".parquet"), 0, 1, jobConf),
- basePath.getRoot().getPath(), fileSlice.getLogFiles().sorted(HoodieLogFile.getLogFileComparator())
+ basePath.toString(), fileSlice.getLogFiles().sorted(HoodieLogFile.getLogFileComparator())
.map(h -> h.getPath().toString()).collect(Collectors.toList()),
instantTime);
@@ -210,7 +208,7 @@ public class TestHoodieRealtimeRecordReader {
public void testUnMergedReader() throws Exception {
// initial commit
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getEvolvedSchema());
- HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
+ HoodieTestUtils.init(hadoopConf, basePath.toString(), HoodieTableType.MERGE_ON_READ);
String instantTime = "100";
final int numRecords = 1000;
final int firstBatchLastRecordKey = numRecords - 1;
@@ -227,13 +225,13 @@ public class TestHoodieRealtimeRecordReader {
numRecords, numRecords, 0);
long size = writer.getCurrentSize();
writer.close();
- assertTrue("block - size should be > 0", size > 0);
+ assertTrue(size > 0, "block - size should be > 0");
// create a split with baseFile (parquet file written earlier) and new log file(s)
String logFilePath = writer.getLogFile().getPath().toString();
HoodieRealtimeFileSplit split = new HoodieRealtimeFileSplit(
new FileSplit(new Path(partitionDir + "/fileid0_1-0-1_" + instantTime + ".parquet"), 0, 1, jobConf),
- basePath.getRoot().getPath(), Collections.singletonList(logFilePath), newCommitTime);
+ basePath.toString(), Collections.singletonList(logFilePath), newCommitTime);
// create a RecordReader to be used by HoodieRealtimeRecordReader
RecordReader<NullWritable, ArrayWritable> reader = new MapredParquetInputFormat().getRecordReader(
@@ -262,17 +260,17 @@ public class TestHoodieRealtimeRecordReader {
int gotKey = Integer.parseInt(keyStr.substring("key".length()));
if (gotCommit.equals(newCommitTime)) {
numRecordsAtCommit2++;
- Assert.assertTrue(gotKey > firstBatchLastRecordKey);
- Assert.assertTrue(gotKey <= secondBatchLastRecordKey);
+ assertTrue(gotKey > firstBatchLastRecordKey);
+ assertTrue(gotKey <= secondBatchLastRecordKey);
assertEquals(gotKey, lastSeenKeyFromLog + 1);
lastSeenKeyFromLog++;
} else {
numRecordsAtCommit1++;
- Assert.assertTrue(gotKey >= 0);
- Assert.assertTrue(gotKey <= firstBatchLastRecordKey);
+ assertTrue(gotKey >= 0);
+ assertTrue(gotKey <= firstBatchLastRecordKey);
}
// Ensure unique key
- Assert.assertFalse(seenKeys.contains(gotKey));
+ assertFalse(seenKeys.contains(gotKey));
seenKeys.add(gotKey);
key = recordReader.createKey();
value = recordReader.createValue();
@@ -288,7 +286,7 @@ public class TestHoodieRealtimeRecordReader {
public void testReaderWithNestedAndComplexSchema() throws Exception {
// initial commit
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getComplexEvolvedSchema());
- HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
+ HoodieTestUtils.init(hadoopConf, basePath.toString(), HoodieTableType.MERGE_ON_READ);
String instantTime = "100";
int numberOfRecords = 100;
int numberOfLogRecords = numberOfRecords / 2;
@@ -303,14 +301,14 @@ public class TestHoodieRealtimeRecordReader {
writeLogFile(partitionDir, schema, "fileid0", instantTime, newCommitTime, numberOfLogRecords);
long size = writer.getCurrentSize();
writer.close();
- assertTrue("block - size should be > 0", size > 0);
+ assertTrue(size > 0, "block - size should be > 0");
InputFormatTestUtil.deltaCommit(basePath, newCommitTime);
// create a split with baseFile (parquet file written earlier) and new log file(s)
String logFilePath = writer.getLogFile().getPath().toString();
HoodieRealtimeFileSplit split = new HoodieRealtimeFileSplit(
new FileSplit(new Path(partitionDir + "/fileid0_1-0-1_" + instantTime + ".parquet"), 0, 1, jobConf),
- basePath.getRoot().getPath(), Collections.singletonList(logFilePath), newCommitTime);
+ basePath.toString(), Collections.singletonList(logFilePath), newCommitTime);
// create a RecordReader to be used by HoodieRealtimeRecordReader
RecordReader<NullWritable, ArrayWritable> reader = new MapredParquetInputFormat().getRecordReader(
@@ -345,66 +343,69 @@ public class TestHoodieRealtimeRecordReader {
value = recordReader.createValue();
// Assert type STRING
- assertEquals("test value for field: field1", values[5].toString(), "field" + currentRecordNo);
- assertEquals("test value for field: field2", values[6].toString(),
- "field" + currentRecordNo + recordCommitTimeSuffix);
- assertEquals("test value for field: name", values[7].toString(), "name" + currentRecordNo);
+ assertEquals(values[5].toString(), "field" + currentRecordNo, "test value for field: field1");
+ assertEquals(values[6].toString(), "field" + currentRecordNo + recordCommitTimeSuffix,
+ "test value for field: field2");
+ assertEquals(values[7].toString(), "name" + currentRecordNo,
+ "test value for field: name");
// Assert type INT
IntWritable intWritable = (IntWritable) values[8];
- assertEquals("test value for field: favoriteIntNumber", intWritable.get(),
- currentRecordNo + recordCommitTime.hashCode());
+ assertEquals(intWritable.get(), currentRecordNo + recordCommitTime.hashCode(),
+ "test value for field: favoriteIntNumber");
// Assert type LONG
LongWritable longWritable = (LongWritable) values[9];
- assertEquals("test value for field: favoriteNumber", longWritable.get(),
- currentRecordNo + recordCommitTime.hashCode());
+ assertEquals(longWritable.get(), currentRecordNo + recordCommitTime.hashCode(),
+ "test value for field: favoriteNumber");
// Assert type FLOAT
FloatWritable floatWritable = (FloatWritable) values[10];
- assertEquals("test value for field: favoriteFloatNumber", floatWritable.get(),
- (float) ((currentRecordNo + recordCommitTime.hashCode()) / 1024.0), 0);
+ assertEquals(floatWritable.get(), (float) ((currentRecordNo + recordCommitTime.hashCode()) / 1024.0), 0,
+ "test value for field: favoriteFloatNumber");
// Assert type DOUBLE
DoubleWritable doubleWritable = (DoubleWritable) values[11];
- assertEquals("test value for field: favoriteDoubleNumber", doubleWritable.get(),
- (currentRecordNo + recordCommitTime.hashCode()) / 1024.0, 0);
+ assertEquals(doubleWritable.get(), (currentRecordNo + recordCommitTime.hashCode()) / 1024.0, 0,
+ "test value for field: favoriteDoubleNumber");
// Assert type MAP
ArrayWritable mapItem = (ArrayWritable) values[12];
Writable mapItemValue1 = mapItem.get()[0];
Writable mapItemValue2 = mapItem.get()[1];
- assertEquals("test value for field: tags", ((ArrayWritable) mapItemValue1).get()[0].toString(),
- "mapItem1");
- assertEquals("test value for field: tags", ((ArrayWritable) mapItemValue2).get()[0].toString(),
- "mapItem2");
- assertEquals("test value for field: tags", ((ArrayWritable) mapItemValue1).get().length, 2);
- assertEquals("test value for field: tags", ((ArrayWritable) mapItemValue2).get().length, 2);
+ assertEquals(((ArrayWritable) mapItemValue1).get()[0].toString(), "mapItem1",
+ "test value for field: tags");
+ assertEquals(((ArrayWritable) mapItemValue2).get()[0].toString(), "mapItem2",
+ "test value for field: tags");
+ assertEquals(((ArrayWritable) mapItemValue1).get().length, 2,
+ "test value for field: tags");
+ assertEquals(((ArrayWritable) mapItemValue2).get().length, 2,
+ "test value for field: tags");
Writable mapItemValue1value = ((ArrayWritable) mapItemValue1).get()[1];
Writable mapItemValue2value = ((ArrayWritable) mapItemValue2).get()[1];
- assertEquals("test value for field: tags[\"mapItem1\"].item1",
- ((ArrayWritable) mapItemValue1value).get()[0].toString(), "item" + currentRecordNo);
- assertEquals("test value for field: tags[\"mapItem2\"].item1",
- ((ArrayWritable) mapItemValue2value).get()[0].toString(), "item2" + currentRecordNo);
- assertEquals("test value for field: tags[\"mapItem1\"].item2",
- ((ArrayWritable) mapItemValue1value).get()[1].toString(), "item" + currentRecordNo + recordCommitTimeSuffix);
- assertEquals("test value for field: tags[\"mapItem2\"].item2",
- ((ArrayWritable) mapItemValue2value).get()[1].toString(), "item2" + currentRecordNo + recordCommitTimeSuffix);
+ assertEquals(((ArrayWritable) mapItemValue1value).get()[0].toString(), "item" + currentRecordNo,
+ "test value for field: tags[\"mapItem1\"].item1");
+ assertEquals(((ArrayWritable) mapItemValue2value).get()[0].toString(), "item2" + currentRecordNo,
+ "test value for field: tags[\"mapItem2\"].item1");
+ assertEquals(((ArrayWritable) mapItemValue1value).get()[1].toString(), "item" + currentRecordNo + recordCommitTimeSuffix,
+ "test value for field: tags[\"mapItem1\"].item2");
+ assertEquals(((ArrayWritable) mapItemValue2value).get()[1].toString(), "item2" + currentRecordNo + recordCommitTimeSuffix,
+ "test value for field: tags[\"mapItem2\"].item2");
// Assert type RECORD
ArrayWritable recordItem = (ArrayWritable) values[13];
Writable[] nestedRecord = recordItem.get();
- assertFalse("test value for field: testNestedRecord.isAdmin", ((BooleanWritable) nestedRecord[0]).get());
- assertEquals("test value for field: testNestedRecord.userId", nestedRecord[1].toString(),
- "UserId" + currentRecordNo + recordCommitTimeSuffix);
+ assertFalse(((BooleanWritable) nestedRecord[0]).get(), "test value for field: testNestedRecord.isAdmin");
+ assertEquals(nestedRecord[1].toString(), "UserId" + currentRecordNo + recordCommitTimeSuffix,
+ "test value for field: testNestedRecord.userId");
// Assert type ARRAY
ArrayWritable arrayValue = (ArrayWritable) values[14];
Writable[] arrayValues = arrayValue.get();
for (int i = 0; i < arrayValues.length; i++) {
- assertEquals("test value for field: stringArray", "stringArray" + i + recordCommitTimeSuffix,
- arrayValues[i].toString());
+ assertEquals("stringArray" + i + recordCommitTimeSuffix, arrayValues[i].toString(),
+ "test value for field: stringArray");
}
}
}
@@ -414,7 +415,7 @@ public class TestHoodieRealtimeRecordReader {
// initial commit
List<String> logFilePaths = new ArrayList<>();
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getSimpleSchema());
- HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
+ HoodieTestUtils.init(hadoopConf, basePath.toString(), HoodieTableType.MERGE_ON_READ);
String instantTime = "100";
int numberOfRecords = 100;
int numberOfLogRecords = numberOfRecords / 2;
@@ -434,7 +435,7 @@ public class TestHoodieRealtimeRecordReader {
long size = writer.getCurrentSize();
logFilePaths.add(writer.getLogFile().getPath().toString());
writer.close();
- assertTrue("block - size should be > 0", size > 0);
+ assertTrue(size > 0, "block - size should be > 0");
// write rollback for the previous block in new log file version
newCommitTime = "102";
@@ -447,7 +448,7 @@ public class TestHoodieRealtimeRecordReader {
// create a split with baseFile (parquet file written earlier) and new log file(s)
HoodieRealtimeFileSplit split = new HoodieRealtimeFileSplit(
new FileSplit(new Path(partitionDir + "/fileid0_1_" + instantTime + ".parquet"), 0, 1, jobConf),
- basePath.getRoot().getPath(), logFilePaths, newCommitTime);
+ basePath.toString(), logFilePaths, newCommitTime);
// create a RecordReader to be used by HoodieRealtimeRecordReader
RecordReader<NullWritable, ArrayWritable> reader = new MapredParquetInputFormat().getRecordReader(
diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieSnapshotCopier.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieSnapshotCopier.java
index fa1ca79..97ab12c 100644
--- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieSnapshotCopier.java
+++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieSnapshotCopier.java
@@ -18,28 +18,28 @@
package org.apache.hudi.utilities;
-import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.HoodieTestDataGenerator;
import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.HoodieTestUtils;
+import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
-public class TestHoodieSnapshotCopier extends HoodieCommonTestHarness {
+public class TestHoodieSnapshotCopier extends HoodieCommonTestHarnessJunit5 {
private static final String TEST_WRITE_TOKEN = "1-0-1";
@@ -49,10 +49,10 @@ public class TestHoodieSnapshotCopier extends HoodieCommonTestHarness {
private FileSystem fs = null;
private JavaSparkContext jsc = null;
- @Before
+ @BeforeEach
public void init() throws IOException {
// Prepare directories
- rootPath = "file://" + folder.getRoot().getAbsolutePath();
+ rootPath = "file://" + tempDir.toString();
basePath = rootPath + "/" + HoodieTestUtils.RAW_TRIPS_TEST_NAME;
outputPath = rootPath + "/output";
@@ -147,7 +147,7 @@ public class TestHoodieSnapshotCopier extends HoodieCommonTestHarness {
assertTrue(fs.exists(new Path(outputPath + "/_SUCCESS")));
}
- @After
+ @AfterEach
public void cleanup() {
if (rootPath != null) {
new File(rootPath).delete();
diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/checkpointing/TestKafkaConnectHdfsProvider.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/checkpointing/TestKafkaConnectHdfsProvider.java
index e79a574..8a59bf9 100644
--- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/checkpointing/TestKafkaConnectHdfsProvider.java
+++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/checkpointing/TestKafkaConnectHdfsProvider.java
@@ -18,24 +18,26 @@
package org.apache.hudi.utilities.checkpointing;
-import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.config.TypedProperties;
import org.apache.hudi.common.model.HoodieTestUtils;
+import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5;
import org.apache.hudi.exception.HoodieException;
import org.apache.hadoop.conf.Configuration;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import java.io.File;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+public class TestKafkaConnectHdfsProvider extends HoodieCommonTestHarnessJunit5 {
-public class TestKafkaConnectHdfsProvider extends HoodieCommonTestHarness {
private String topicPath = null;
private Configuration hadoopConf = null;
- @Before
+ @BeforeEach
public void init() {
// Prepare directories
initPath();
@@ -71,10 +73,10 @@ public class TestKafkaConnectHdfsProvider extends HoodieCommonTestHarness {
props.put("hoodie.deltastreamer.checkpoint.provider.path", topicPath);
final InitialCheckPointProvider provider = new KafkaConnectHdfsProvider(props);
provider.init(hadoopConf);
- assertEquals(provider.getCheckpoint(), "topic1,0:300,1:200");
+ assertEquals("topic1,0:300,1:200", provider.getCheckpoint());
}
- @Test(expected = HoodieException.class)
+ @Test
public void testMissingPartition() throws Exception {
topicPath = basePath + "/topic2";
new File(topicPath).mkdirs();
@@ -92,6 +94,6 @@ public class TestKafkaConnectHdfsProvider extends HoodieCommonTestHarness {
props.put("hoodie.deltastreamer.checkpoint.provider.path", topicPath);
final InitialCheckPointProvider provider = new KafkaConnectHdfsProvider(props);
provider.init(hadoopConf);
- provider.getCheckpoint();
+ assertThrows(HoodieException.class, provider::getCheckpoint);
}
}