You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2020/11/20 01:10:57 UTC

[GitHub] [spark] maropu commented on a change in pull request #30411: [SPARK-31962][SQL] Provide modifiedAfter and modifiedBefore options when filtering from a batch-based file data source

maropu commented on a change in pull request #30411:
URL: https://github.com/apache/spark/pull/30411#discussion_r527325885



##########
File path: examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala
##########
@@ -81,6 +81,27 @@ object SQLDataSourceExample {
     // |file1.parquet|
     // +-------------+
     // $example off:load_with_path_glob_filter$
+    // $example on:load_with_modified_time_filter$
+    val beforeFilterDF = spark.read.format("parquet")
+        // Files modified before 07/01/2020 at 05:30 are allowed
+        .option("modifiedBefore", "2020-07-01T05:30:00")

Review comment:
       nit: two indents to follow the other examples.

##########
File path: python/pyspark/sql/readwriter.py
##########
@@ -765,7 +832,8 @@ def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPar
         """
         if properties is None:
             properties = dict()
-        jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
+        jprop = JavaClass("java.util.Properties",
+                          self._spark._sc._gateway._gateway_client)()

Review comment:
       a unnecessary change?

##########
File path: docs/sql-data-sources-generic-options.md
##########
@@ -119,3 +119,40 @@ To load all files recursively, you can use:
 {% include_example recursive_file_lookup r/RSparkSQLExample.R %}
 </div>
 </div>
+
+### Modification Time Path Filters
+
+`modifiedBefore` and `modifiedAfter` are options that can be 
+applied together or separately in order to achieve greater
+granularity over which files may load during a Spark batch query.
+(Structured Streaming file source doesn't support these options.)

Review comment:
       nit: `(Structured Streaming file source doesn't support these options.)` -> `Note that Structured Streaming file sources don't support these options.`?

##########
File path: sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamOptions.scala
##########
@@ -32,6 +33,16 @@ class FileStreamOptions(parameters: CaseInsensitiveMap[String]) extends Logging
 
   def this(parameters: Map[String, String]) = this(CaseInsensitiveMap(parameters))
 
+  checkDisallowedOptions(parameters)
+
+  private def checkDisallowedOptions(options: Map[String, String]): Unit = {
+    Seq(ModifiedBeforeFilter.PARAM_NAME, ModifiedAfterFilter.PARAM_NAME).foreach { param =>
+      if (parameters.contains(param)) {
+        throw new IllegalArgumentException(s"option '$param' is not allowed in file stream source")

Review comment:
       nit: `file stream source` -> `file stream sources`?

##########
File path: sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileIndex.scala
##########
@@ -57,13 +57,10 @@ abstract class PartitioningAwareFileIndex(
   protected def leafDirToChildrenFiles: Map[Path, Array[FileStatus]]
 
   private val caseInsensitiveMap = CaseInsensitiveMap(parameters)
+  protected val pathFilters = PathFilterFactory.create(caseInsensitiveMap)

Review comment:
       `protected` -> `private`?

##########
File path: examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala
##########
@@ -81,6 +81,27 @@ object SQLDataSourceExample {
     // |file1.parquet|
     // +-------------+
     // $example off:load_with_path_glob_filter$
+    // $example on:load_with_modified_time_filter$
+    val beforeFilterDF = spark.read.format("parquet")
+        // Files modified before 07/01/2020 at 05:30 are allowed
+        .option("modifiedBefore", "2020-07-01T05:30:00")
+        .load("examples/src/main/resources/dir1");
+    beforeFilterDF.show();
+    // +-------------+
+    // |         file|
+    // +-------------+
+    // |file1.parquet|
+    // +-------------+
+    val afterFilterDF = spark.read.format("parquet")
+         // Files modified after 06/01/2020 at 05:30 are allowed
+        .option("modifiedAfter", "2020-06-01T05:30:00")

Review comment:
       ditto

##########
File path: python/pyspark/sql/readwriter.py
##########
@@ -777,7 +845,8 @@ def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPar
                                                int(numPartitions), jprop))
         if predicates is not None:
             gateway = self._spark._sc._gateway
-            jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
+            jpredicates = utils.toJArray(
+                gateway, gateway.jvm.java.lang.String, predicates)

Review comment:
       ditto (I have the same comments on the changes below, too).




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org