You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by fo...@apache.org on 2022/10/21 18:00:23 UTC

[iceberg] branch master updated: Spark/Flink: Replace & Ban Hamcrest usage (#6030)

This is an automated email from the ASF dual-hosted git repository.

fokko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git


The following commit(s) were added to refs/heads/master by this push:
     new c103b93a94 Spark/Flink: Replace & Ban Hamcrest usage (#6030)
c103b93a94 is described below

commit c103b93a94891219f5d5db38f97306744fd03078
Author: Eduard Tudenhöfner <et...@gmail.com>
AuthorDate: Fri Oct 21 20:00:16 2022 +0200

    Spark/Flink: Replace & Ban Hamcrest usage (#6030)
    
    Most of the time using Hamcrest Matchers is much more clunky and so we
    should rather use AssertJ assertions as those are more fluent and more
    flexible in their usage & readability.
---
 .baseline/checkstyle/checkstyle.xml                         |  5 +++++
 .../source/enumerator/TestContinuousIcebergEnumerator.java  | 13 +++++--------
 .../source/enumerator/TestContinuousIcebergEnumerator.java  | 13 +++++--------
 .../apache/iceberg/spark/data/TestSparkParquetReader.java   |  4 ++--
 .../apache/iceberg/spark/data/TestSparkParquetReader.java   |  4 ++--
 .../apache/iceberg/spark/data/TestSparkParquetReader.java   |  4 ++--
 .../apache/iceberg/spark/data/TestSparkParquetReader.java   |  4 ++--
 .../apache/iceberg/spark/data/TestSparkParquetReader.java   |  4 ++--
 8 files changed, 25 insertions(+), 26 deletions(-)

diff --git a/.baseline/checkstyle/checkstyle.xml b/.baseline/checkstyle/checkstyle.xml
index 3425288817..b2f9ef1244 100644
--- a/.baseline/checkstyle/checkstyle.xml
+++ b/.baseline/checkstyle/checkstyle.xml
@@ -394,6 +394,11 @@
             <property name="illegalClasses" value="org.junit.rules.ExpectedException"/>
             <message key="import.illegal" value="Prefer using Assertions.assertThatThrownBy(...).isInstanceOf(...) instead."/>
         </module>
+        <module name="IllegalImport">
+            <property name="id" value="BanHamcrestUsage"/>
+            <property name="illegalPkgs" value="org.hamcrest"/>
+            <message key="import.illegal" value="Prefer using org.assertj.core.api.Assertions instead."/>
+        </module>
         <module name="RegexpSinglelineJava">
             <property name="ignoreComments" value="true"/>
             <property name="format" value="@Json(S|Des)erialize"/>
diff --git a/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/source/enumerator/TestContinuousIcebergEnumerator.java b/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/source/enumerator/TestContinuousIcebergEnumerator.java
index aad2769af0..50b730ae52 100644
--- a/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/source/enumerator/TestContinuousIcebergEnumerator.java
+++ b/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/source/enumerator/TestContinuousIcebergEnumerator.java
@@ -32,8 +32,7 @@ import org.apache.iceberg.flink.source.split.IcebergSourceSplit;
 import org.apache.iceberg.flink.source.split.IcebergSourceSplitState;
 import org.apache.iceberg.flink.source.split.IcebergSourceSplitStatus;
 import org.apache.iceberg.flink.source.split.SplitRequestEvent;
-import org.hamcrest.CoreMatchers;
-import org.hamcrest.MatcherAssert;
+import org.assertj.core.api.Assertions;
 import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -97,9 +96,8 @@ public class TestContinuousIcebergEnumerator {
     enumeratorContext.triggerAllActions();
 
     Assert.assertTrue(enumerator.snapshotState(1).pendingSplits().isEmpty());
-    MatcherAssert.assertThat(
-        enumeratorContext.getSplitAssignments().get(2).getAssignedSplits(),
-        CoreMatchers.hasItem(splits.get(0)));
+    Assertions.assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
+        .contains(splits.get(0));
   }
 
   @Test
@@ -145,9 +143,8 @@ public class TestContinuousIcebergEnumerator {
     enumerator.handleSourceEvent(2, new SplitRequestEvent());
 
     Assert.assertTrue(enumerator.snapshotState(2).pendingSplits().isEmpty());
-    MatcherAssert.assertThat(
-        enumeratorContext.getSplitAssignments().get(2).getAssignedSplits(),
-        CoreMatchers.hasItem(splits.get(0)));
+    Assertions.assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
+        .contains(splits.get(0));
   }
 
   private static ContinuousIcebergEnumerator createEnumerator(
diff --git a/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/source/enumerator/TestContinuousIcebergEnumerator.java b/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/source/enumerator/TestContinuousIcebergEnumerator.java
index aad2769af0..50b730ae52 100644
--- a/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/source/enumerator/TestContinuousIcebergEnumerator.java
+++ b/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/source/enumerator/TestContinuousIcebergEnumerator.java
@@ -32,8 +32,7 @@ import org.apache.iceberg.flink.source.split.IcebergSourceSplit;
 import org.apache.iceberg.flink.source.split.IcebergSourceSplitState;
 import org.apache.iceberg.flink.source.split.IcebergSourceSplitStatus;
 import org.apache.iceberg.flink.source.split.SplitRequestEvent;
-import org.hamcrest.CoreMatchers;
-import org.hamcrest.MatcherAssert;
+import org.assertj.core.api.Assertions;
 import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -97,9 +96,8 @@ public class TestContinuousIcebergEnumerator {
     enumeratorContext.triggerAllActions();
 
     Assert.assertTrue(enumerator.snapshotState(1).pendingSplits().isEmpty());
-    MatcherAssert.assertThat(
-        enumeratorContext.getSplitAssignments().get(2).getAssignedSplits(),
-        CoreMatchers.hasItem(splits.get(0)));
+    Assertions.assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
+        .contains(splits.get(0));
   }
 
   @Test
@@ -145,9 +143,8 @@ public class TestContinuousIcebergEnumerator {
     enumerator.handleSourceEvent(2, new SplitRequestEvent());
 
     Assert.assertTrue(enumerator.snapshotState(2).pendingSplits().isEmpty());
-    MatcherAssert.assertThat(
-        enumeratorContext.getSplitAssignments().get(2).getAssignedSplits(),
-        CoreMatchers.hasItem(splits.get(0)));
+    Assertions.assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
+        .contains(splits.get(0));
   }
 
   private static ContinuousIcebergEnumerator createEnumerator(
diff --git a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index d4b7443e2e..ac284d4733 100644
--- a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++ b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -57,7 +57,7 @@ import org.apache.spark.sql.types.DataTypes;
 import org.apache.spark.sql.types.Metadata;
 import org.apache.spark.sql.types.StructField;
 import org.apache.spark.sql.types.StructType;
-import org.hamcrest.CoreMatchers;
+import org.assertj.core.api.Assertions;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
@@ -158,7 +158,7 @@ public class TestSparkParquetReader extends AvroDataTest {
     InputFile parquetInputFile = Files.localInput(outputFilePath);
     List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
     Assert.assertEquals(rows.size(), readRows.size());
-    Assert.assertThat(readRows, CoreMatchers.is(rows));
+    Assertions.assertThat(readRows).isEqualTo(rows);
 
     // Now we try to import that file as an Iceberg table to make sure Iceberg can read
     // Int96 end to end.
diff --git a/spark/v3.0/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java b/spark/v3.0/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index d4b7443e2e..ac284d4733 100644
--- a/spark/v3.0/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++ b/spark/v3.0/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -57,7 +57,7 @@ import org.apache.spark.sql.types.DataTypes;
 import org.apache.spark.sql.types.Metadata;
 import org.apache.spark.sql.types.StructField;
 import org.apache.spark.sql.types.StructType;
-import org.hamcrest.CoreMatchers;
+import org.assertj.core.api.Assertions;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
@@ -158,7 +158,7 @@ public class TestSparkParquetReader extends AvroDataTest {
     InputFile parquetInputFile = Files.localInput(outputFilePath);
     List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
     Assert.assertEquals(rows.size(), readRows.size());
-    Assert.assertThat(readRows, CoreMatchers.is(rows));
+    Assertions.assertThat(readRows).isEqualTo(rows);
 
     // Now we try to import that file as an Iceberg table to make sure Iceberg can read
     // Int96 end to end.
diff --git a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index d4b7443e2e..ac284d4733 100644
--- a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++ b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -57,7 +57,7 @@ import org.apache.spark.sql.types.DataTypes;
 import org.apache.spark.sql.types.Metadata;
 import org.apache.spark.sql.types.StructField;
 import org.apache.spark.sql.types.StructType;
-import org.hamcrest.CoreMatchers;
+import org.assertj.core.api.Assertions;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
@@ -158,7 +158,7 @@ public class TestSparkParquetReader extends AvroDataTest {
     InputFile parquetInputFile = Files.localInput(outputFilePath);
     List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
     Assert.assertEquals(rows.size(), readRows.size());
-    Assert.assertThat(readRows, CoreMatchers.is(rows));
+    Assertions.assertThat(readRows).isEqualTo(rows);
 
     // Now we try to import that file as an Iceberg table to make sure Iceberg can read
     // Int96 end to end.
diff --git a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index d4b7443e2e..ac284d4733 100644
--- a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++ b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -57,7 +57,7 @@ import org.apache.spark.sql.types.DataTypes;
 import org.apache.spark.sql.types.Metadata;
 import org.apache.spark.sql.types.StructField;
 import org.apache.spark.sql.types.StructType;
-import org.hamcrest.CoreMatchers;
+import org.assertj.core.api.Assertions;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
@@ -158,7 +158,7 @@ public class TestSparkParquetReader extends AvroDataTest {
     InputFile parquetInputFile = Files.localInput(outputFilePath);
     List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
     Assert.assertEquals(rows.size(), readRows.size());
-    Assert.assertThat(readRows, CoreMatchers.is(rows));
+    Assertions.assertThat(readRows).isEqualTo(rows);
 
     // Now we try to import that file as an Iceberg table to make sure Iceberg can read
     // Int96 end to end.
diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index ba24d848ad..ab434b051f 100644
--- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -57,7 +57,7 @@ import org.apache.spark.sql.types.DataTypes;
 import org.apache.spark.sql.types.Metadata;
 import org.apache.spark.sql.types.StructField;
 import org.apache.spark.sql.types.StructType;
-import org.hamcrest.CoreMatchers;
+import org.assertj.core.api.Assertions;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
@@ -159,7 +159,7 @@ public class TestSparkParquetReader extends AvroDataTest {
     InputFile parquetInputFile = Files.localInput(outputFilePath);
     List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
     Assert.assertEquals(rows.size(), readRows.size());
-    Assert.assertThat(readRows, CoreMatchers.is(rows));
+    Assertions.assertThat(readRows).isEqualTo(rows);
 
     // Now we try to import that file as an Iceberg table to make sure Iceberg can read
     // Int96 end to end.