You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2019/05/08 12:37:37 UTC

[GitHub] [spark] cloud-fan commented on a change in pull request #24068: [SPARK-27105][SQL] Optimize away exponential complexity in ORC predicate conversion

cloud-fan commented on a change in pull request #24068: [SPARK-27105][SQL] Optimize away exponential complexity in ORC predicate conversion
URL: https://github.com/apache/spark/pull/24068#discussion_r282043784
 
 

 ##########
 File path: sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
 ##########
 @@ -371,12 +403,64 @@ object FilterPushdownBenchmark extends BenchmarkBase with SQLHelper {
         withTempTable("orcTable", "parquetTable") {
           saveAsTable(df, dir)
           Seq(1, 250, 500).foreach { numFilter =>
-            val whereExpr = (1 to numFilter).map(i => s"c$i = 0").mkString(" and ")
+            val whereExpr = (1 to numFilter).map(i => s"c$i = 0").mkString(" or ")
             // Note: InferFiltersFromConstraints will add more filters to this given filters
             filterPushDownBenchmark(numRows, s"Select 1 row with $numFilter filters", whereExpr)
           }
         }
       }
     }
+
+    runBenchmark(s"Predicate conversion benchmark with unbalanced Expression") {
+      val numRows = 1
+      val width = 2000
+
+      val columns = (1 to width).map(i => s"id c$i")
+      val df = spark.range(1).selectExpr(columns: _*)
+      Seq(25, 5000, 15000).foreach { numFilter =>
+        val whereExpression = (1 to numFilter)
+          .map {
+            i =>
+              EqualTo(
+                Literal(0),
+                AttributeReference(
+                  s"c1",
+                  IntegerType,
+                  nullable = true)()
+              ).asInstanceOf[Expression]
+          }
+          .foldLeft[Expression](Literal.FalseLiteral)((x, y) => Or(x, y))
+        val benchmark = new Benchmark(s"Select 1 row with $numFilter filters",
+          numRows, minNumIters = 5, output = output)
+        val name = s"Native ORC Vectorized (Pushdown)"
+        benchmark.addCase(name) { _ =>
+          OrcFilters.createFilter(df.schema,
+            DataSourceStrategy.translateFilter(whereExpression).toSeq)
+        }
+        benchmark.run()
+      }
+    }
+
+    runBenchmark(s"Pushdown benchmark with unbalanced Column") {
 
 Review comment:
   what's the difference between `Column` and `Expression` in this benchmark?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org