You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@paimon.apache.org by cz...@apache.org on 2023/03/27 02:35:43 UTC

[incubator-paimon] branch master updated: [spark] Use new ReadBuilder and WriteBuilder API in tests of paimon-spark module (#717)

This is an automated email from the ASF dual-hosted git repository.

czweng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new bcde73a16 [spark] Use new ReadBuilder and WriteBuilder API in tests of paimon-spark module (#717)
bcde73a16 is described below

commit bcde73a16ba7536340ef17ee3a2edb328c031a80
Author: Tyrantlucifer <ty...@apache.org>
AuthorDate: Mon Mar 27 10:35:38 2023 +0800

    [spark] Use new ReadBuilder and WriteBuilder API in tests of paimon-spark module (#717)
---
 .../test/java/org/apache/paimon/spark/SimpleTableTestHelper.java   | 7 ++++---
 .../src/test/java/org/apache/paimon/spark/SparkReadITCase.java     | 4 +++-
 .../src/test/java/org/apache/paimon/spark/SparkReadTestBase.java   | 6 ++++--
 3 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/paimon-spark/paimon-spark-2/src/test/java/org/apache/paimon/spark/SimpleTableTestHelper.java b/paimon-spark/paimon-spark-2/src/test/java/org/apache/paimon/spark/SimpleTableTestHelper.java
index 7eddf3de9..a7df4cee0 100644
--- a/paimon-spark/paimon-spark-2/src/test/java/org/apache/paimon/spark/SimpleTableTestHelper.java
+++ b/paimon-spark/paimon-spark-2/src/test/java/org/apache/paimon/spark/SimpleTableTestHelper.java
@@ -29,6 +29,7 @@ import org.apache.paimon.table.FileStoreTable;
 import org.apache.paimon.table.FileStoreTableFactory;
 import org.apache.paimon.table.sink.StreamTableCommit;
 import org.apache.paimon.table.sink.StreamTableWrite;
+import org.apache.paimon.table.sink.StreamWriteBuilder;
 import org.apache.paimon.types.RowType;
 
 import java.util.Collections;
@@ -59,9 +60,9 @@ public class SimpleTableTestHelper {
         conf.setString("path", path.toString());
         FileStoreTable table = FileStoreTableFactory.create(LocalFileIO.create(), conf);
 
-        String commitUser = "user";
-        this.writer = table.newWrite(commitUser);
-        this.commit = table.newCommit(commitUser);
+        StreamWriteBuilder streamWriteBuilder = table.newStreamWriteBuilder();
+        writer = streamWriteBuilder.newWrite();
+        commit = streamWriteBuilder.newCommit();
 
         this.commitIdentifier = 0;
     }
diff --git a/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadITCase.java b/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadITCase.java
index 16b7fd5cf..08653c8af 100644
--- a/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadITCase.java
+++ b/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadITCase.java
@@ -78,7 +78,9 @@ public class SparkReadITCase extends SparkReadTestBase {
                 spark.table("`t1$snapshots`")
                         .select("snapshot_id", "schema_id", "commit_user", "commit_kind")
                         .collectAsList();
-        assertThat(rows.toString()).isEqualTo("[[1,0,user,APPEND]]");
+        String commitUser = rows.get(0).getString(2);
+        String rowString = String.format("[[1,0,%s,APPEND]]", commitUser);
+        assertThat(rows.toString()).isEqualTo(rowString);
 
         spark.sql(
                 "CREATE TABLE schemasTable (\n"
diff --git a/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadTestBase.java b/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadTestBase.java
index 3a3cb0b52..a5f8b2978 100644
--- a/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadTestBase.java
+++ b/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadTestBase.java
@@ -27,6 +27,7 @@ import org.apache.paimon.table.FileStoreTable;
 import org.apache.paimon.table.FileStoreTableFactory;
 import org.apache.paimon.table.sink.StreamTableCommit;
 import org.apache.paimon.table.sink.StreamTableWrite;
+import org.apache.paimon.table.sink.StreamWriteBuilder;
 import org.apache.paimon.types.DataField;
 import org.apache.paimon.types.RowKind;
 
@@ -173,8 +174,9 @@ public abstract class SparkReadTestBase {
                 FileStoreTableFactory.create(
                         LocalFileIO.create(),
                         new Path(warehousePath, String.format("default.db/%s", tableName)));
-        StreamTableWrite writer = fileStoreTable.newWrite(COMMIT_USER);
-        StreamTableCommit commit = fileStoreTable.newCommit(COMMIT_USER);
+        StreamWriteBuilder streamWriteBuilder = fileStoreTable.newStreamWriteBuilder();
+        StreamTableWrite writer = streamWriteBuilder.newWrite();
+        StreamTableCommit commit = streamWriteBuilder.newCommit();
         for (GenericRow row : rows) {
             writer.write(row);
         }