You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by vi...@apache.org on 2021/05/10 21:19:27 UTC

[hudi] 01/01: Trying to reprod the schema evolution test break

This is an automated email from the ASF dual-hosted git repository.

vinoth pushed a commit to branch flaky-ci
in repository https://gitbox.apache.org/repos/asf/hudi.git

commit 12cf04d53eb7f10be51c11888c946f5e88d11486
Author: Vinoth Chandar <vi...@apache.org>
AuthorDate: Mon May 10 14:18:42 2021 -0700

    Trying to reprod the schema evolution test break
---
 .../apache/hudi/functional/HoodieSparkSqlWriterSuite.scala | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/HoodieSparkSqlWriterSuite.scala b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/HoodieSparkSqlWriterSuite.scala
index 606435a..a4fb4ca 100644
--- a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/HoodieSparkSqlWriterSuite.scala
+++ b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/HoodieSparkSqlWriterSuite.scala
@@ -17,10 +17,6 @@
 
 package org.apache.hudi.functional
 
-import java.time.Instant
-import java.util
-import java.util.{Collections, Date, UUID}
-
 import org.apache.commons.io.FileUtils
 import org.apache.hadoop.fs.Path
 import org.apache.hudi.DataSourceWriteOptions._
@@ -29,8 +25,8 @@ import org.apache.hudi.common.model.{HoodieRecord, HoodieRecordPayload}
 import org.apache.hudi.common.testutils.HoodieTestDataGenerator
 import org.apache.hudi.config.{HoodieBootstrapConfig, HoodieWriteConfig}
 import org.apache.hudi.exception.HoodieException
-import org.apache.hudi.keygen.{NonpartitionedKeyGenerator, SimpleKeyGenerator}
 import org.apache.hudi.hive.HiveSyncConfig
+import org.apache.hudi.keygen.{NonpartitionedKeyGenerator, SimpleKeyGenerator}
 import org.apache.hudi.testutils.DataSourceTestUtils
 import org.apache.hudi.{AvroConversionUtils, DataSourceUtils, DataSourceWriteOptions, HoodieSparkSqlWriter, HoodieWriterUtils}
 import org.apache.spark.SparkContext
@@ -38,12 +34,15 @@ import org.apache.spark.api.java.JavaSparkContext
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.types.StructType
 import org.apache.spark.sql.{Row, SQLContext, SaveMode, SparkSession}
+import org.junit.jupiter.api.Assertions.assertEquals
 import org.mockito.ArgumentMatchers.any
 import org.mockito.Mockito.{spy, times, verify}
 import org.scalatest.{FunSuite, Matchers}
 
+import java.time.Instant
+import java.util
+import java.util.{Collections, Date, UUID}
 import scala.collection.JavaConversions._
-import org.junit.jupiter.api.Assertions.assertEquals
 
 class HoodieSparkSqlWriterSuite extends FunSuite with Matchers {
 
@@ -459,6 +458,9 @@ class HoodieSparkSqlWriterSuite extends FunSuite with Matchers {
             .drop(HoodieRecord.HOODIE_META_COLUMNS.get(2)).drop(HoodieRecord.HOODIE_META_COLUMNS.get(3))
             .drop(HoodieRecord.HOODIE_META_COLUMNS.get(4))
 
+          updatesDf.printSchema()
+          trimmedDf2.printSchema()
+
           // ensure 2nd batch of updates matches.
           assert(updatesDf.intersect(trimmedDf2).except(updatesDf).count() == 0)