You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2020/03/16 05:18:00 UTC

[GitHub] [spark] viirya commented on a change in pull request #27888: [SPARK-31116][SQL] Fix nested schema case-sensitivity in ParquetRowConverter

viirya commented on a change in pull request #27888: [SPARK-31116][SQL] Fix nested schema case-sensitivity in ParquetRowConverter
URL: https://github.com/apache/spark/pull/27888#discussion_r392786863
 
 

 ##########
 File path: sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRowConverter.scala
 ##########
 @@ -176,15 +178,38 @@ private[parquet] class ParquetRowConverter(
    */
   def currentRecord: InternalRow = currentRow
 
+
   // Converters for each field.
   private[this] val fieldConverters: Array[Converter with HasParentContainerUpdater] = {
-    parquetType.getFields.asScala.map { parquetField =>
-      val fieldIndex = catalystType.fieldIndex(parquetField.getName)
-      val catalystField = catalystType(fieldIndex)
-      // Converted field value should be set to the `fieldIndex`-th cell of `currentRow`
-      newConverter(parquetField, catalystField.dataType, new RowUpdater(currentRow, fieldIndex))
-    }.toArray
-  }
+
+    // (SPARK-31116) There is an issue when schema pruning is enabled, so we keep original codes
+    if (schemaPruning) {
+      // (SPARK-31116) For letter case issue, create name to field index based on case sensitivity
+      val catalystFieldNameToIndex = if (caseSensitive) {
+        catalystType.fieldNames.zipWithIndex.toMap
+      } else {
+        CaseInsensitiveMap(catalystType.fieldNames.zipWithIndex.toMap)
+      }
+      parquetType.getFields.asScala.map { parquetField =>
+        val fieldIndex = catalystFieldNameToIndex.getOrElse(parquetField.getName,
+          throw new IllegalArgumentException(
+            s"${parquetField.getName} does not exist. " +
+              s"Available: ${catalystType.fieldNames.mkString(", ")}")
+        )
+        val catalystField = catalystType(fieldIndex)
+        // Converted field value should be set to the `fieldIndex`-th cell of `currentRow`
+        newConverter(parquetField, catalystField.dataType, new RowUpdater(currentRow, fieldIndex))
+      }.toArray
+    } else {
+      parquetType.getFields.asScala.zip(catalystType).zipWithIndex.map {
+        case ((parquetFieldType, catalystField), ordinal) =>
+          // Converted field value should be set to the `ordinal`-th cell of `currentRow`
+          newConverter(
+            parquetFieldType, catalystField.dataType, new RowUpdater(currentRow, ordinal))
+      }.toArray
+    }
 
 Review comment:
   Why add this part of code? Seems to me the above code inside `if (schemaPruning) { ... }` looks reasonable. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org