You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by GitBox <gi...@apache.org> on 2022/02/10 05:56:55 UTC

[GitHub] [hudi] jintaoguan commented on a change in pull request #3985: [HUDI-2754] Performance improvement for IncrementalRelation

jintaoguan commented on a change in pull request #3985:
URL: https://github.com/apache/hudi/pull/3985#discussion_r803324790



##########
File path: hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/hudi/IncrementalRelation.scala
##########
@@ -157,27 +161,40 @@ class IncrementalRelation(val sqlContext: SQLContext,
     } else {
       log.info("Additional Filters to be applied to incremental source are :" + filters)
 
-      var df: DataFrame = sqlContext.createDataFrame(sqlContext.sparkContext.emptyRDD[Row], usedSchema)
+      var prunedSchema = StructType(Seq())
+      if (!requiredColumns.contains(HoodieRecord.COMMIT_TIME_METADATA_FIELD)) {
+        prunedSchema = prunedSchema.add(usedSchema(HoodieRecord.COMMIT_TIME_METADATA_FIELD))
+      }
+      requiredColumns.foreach(col => {
+        val field = usedSchema.find(_.name == col)
+        if (field.isDefined) {
+          prunedSchema = prunedSchema.add(field.get)
+        }
+      })
+      var df: DataFrame = sqlContext.createDataFrame(sqlContext.sparkContext.emptyRDD[Row], prunedSchema)
 
       if (metaBootstrapFileIdToFullPath.nonEmpty) {
         df = sqlContext.sparkSession.read
                .format("hudi")
-               .schema(usedSchema)
+               .schema(prunedSchema)
                .option(DataSourceReadOptions.READ_PATHS.key, filteredMetaBootstrapFullPaths.mkString(","))
                .load()
       }
 
       if (regularFileIdToFullPath.nonEmpty)
       {
         df = df.union(sqlContext.read.options(sOpts)
-                        .schema(usedSchema)
+                        .schema(prunedSchema)
                         .parquet(filteredRegularFullPaths.toList: _*)
                         .filter(String.format("%s >= '%s'", HoodieRecord.COMMIT_TIME_METADATA_FIELD,
                           commitsToReturn.head.getTimestamp))
                         .filter(String.format("%s <= '%s'", HoodieRecord.COMMIT_TIME_METADATA_FIELD,
                           commitsToReturn.last.getTimestamp)))
       }
 
+      if (!requiredColumns.contains(HoodieRecord.COMMIT_TIME_METADATA_FIELD)) {

Review comment:
       Because IncrementalRelation extends PrunedScan here, the SparkSQL engine passes the required columns to IncrementalRelation and it expects the returned RDD only to have the required columns.
   If we don't remove the non-required column here, it will cause schema mismatch errors. 




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@hudi.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org