You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2021/03/31 21:24:15 UTC

[GitHub] [spark] MaxGekk commented on a change in pull request #32015: [SPARK-34821][INFRA] Set up a workflow for developers to run benchmark in their fork

MaxGekk commented on a change in pull request #32015:
URL: https://github.com/apache/spark/pull/32015#discussion_r605224574



##########
File path: core/src/test/scala/org/apache/spark/benchmark/Benchmarks.scala
##########
@@ -30,44 +31,64 @@ import com.google.common.reflect.ClassPath
  *
  * {{{
  *   1. with spark-submit
- *      bin/spark-submit --class <this class> --jars <all spark test jars> <spark core test jar>
+ *      bin/spark-submit --class <this class> --jars <all spark test jars>
+ *        <spark external package jar> <spark core test jar> <glob pattern for class>
  *   2. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 bin/spark-submit --class <this class> --jars
- *        <all spark test jars> <spark core test jar>
+ *        <all spark test jars> <spark external package jar>
+ *        <spark core test jar> <glob pattern for class>
  *      Results will be written to all corresponding files under "benchmarks/".
  *      Notice that it detects the sub-project's directories from jar's paths so the provided jars
  *      should be properly placed under target (Maven build) or target/scala-* (SBT) when you
  *      generate the files.
  * }}}
  *
  * In Mac, you can use a command as below to find all the test jars.
+ * Make sure to do not select duplicated jars created by different versions of builds or tools.
  * {{{
- *   find . -name "*3.2.0-SNAPSHOT-tests.jar" | paste -sd ',' -
+ *   find . -name '*-SNAPSHOT-tests.jar' | paste -sd ',' -
  * }}}
  *
- * Full command example:
+ * The example below runs all benchmarks and generates the results:
  * {{{
  *   SPARK_GENERATE_BENCHMARK_FILES=1 bin/spark-submit --class \
  *     org.apache.spark.benchmark.Benchmarks --jars \
- *     "`find . -name "*3.2.0-SNAPSHOT-tests.jar" | paste -sd ',' -`" \
- *     ./core/target/scala-2.12/spark-core_2.12-3.2.0-SNAPSHOT-tests.jar
+ *     "`find . -name '*-SNAPSHOT-tests.jar' -o -name '*avro*-SNAPSHOT.jar' | paste -sd ',' -`" \
+ *     "`find . -name 'spark-core*-SNAPSHOT-tests.jar'`" \
+ *     "*"
+ * }}}
+ *
+ * The example below runs all benchmarks under "org.apache.spark.sql.execution.datasources"
+ * {{{
+ *   bin/spark-submit --class \
+ *     org.apache.spark.benchmark.Benchmarks --jars \
+ *     "`find . -name '*-SNAPSHOT-tests.jar' -o -name '*avro*-SNAPSHOT.jar' | paste -sd ',' -`" \
+ *     "`find . -name 'spark-core*-SNAPSHOT-tests.jar'`" \
+ *     "org.apache.spark.sql.execution.datasources.*"
  * }}}
  */
 
 object Benchmarks {
   def main(args: Array[String]): Unit = {
+    var isBenchmarkFound = false
     ClassPath.from(
       Thread.currentThread.getContextClassLoader
     ).getTopLevelClassesRecursive("org.apache.spark").asScala.foreach { info =>
       lazy val clazz = info.load
       lazy val runBenchmark = clazz.getMethod("main", classOf[Array[String]])
       // isAssignableFrom seems not working with the reflected class from Guava's
       // getTopLevelClassesRecursive.
+      val matcher = args.headOption.map(pattern =>
+        FileSystems.getDefault.getPathMatcher(s"glob:$pattern"))
       if (
           info.getName.endsWith("Benchmark") &&
+          // TPCDSQueryBenchmark requires setup and arguments. Exclude it for now.
+          !info.getName.endsWith("TPCDSQueryBenchmark") &&
+          matcher.forall(_.matches(Paths.get(info.getName))) &&

Review comment:
       hmm, interpreting of the class name as a path in file system looks fresh. Why not just regular regexp or even `.contrains`?

##########
File path: core/src/test/scala/org/apache/spark/benchmark/Benchmarks.scala
##########
@@ -30,44 +31,64 @@ import com.google.common.reflect.ClassPath
  *
  * {{{
  *   1. with spark-submit
- *      bin/spark-submit --class <this class> --jars <all spark test jars> <spark core test jar>
+ *      bin/spark-submit --class <this class> --jars <all spark test jars>
+ *        <spark external package jar> <spark core test jar> <glob pattern for class>
  *   2. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 bin/spark-submit --class <this class> --jars
- *        <all spark test jars> <spark core test jar>
+ *        <all spark test jars> <spark external package jar>
+ *        <spark core test jar> <glob pattern for class>
  *      Results will be written to all corresponding files under "benchmarks/".
  *      Notice that it detects the sub-project's directories from jar's paths so the provided jars
  *      should be properly placed under target (Maven build) or target/scala-* (SBT) when you
  *      generate the files.
  * }}}
  *
  * In Mac, you can use a command as below to find all the test jars.
+ * Make sure to do not select duplicated jars created by different versions of builds or tools.
  * {{{
- *   find . -name "*3.2.0-SNAPSHOT-tests.jar" | paste -sd ',' -
+ *   find . -name '*-SNAPSHOT-tests.jar' | paste -sd ',' -
  * }}}
  *
- * Full command example:
+ * The example below runs all benchmarks and generates the results:
  * {{{
  *   SPARK_GENERATE_BENCHMARK_FILES=1 bin/spark-submit --class \
  *     org.apache.spark.benchmark.Benchmarks --jars \
- *     "`find . -name "*3.2.0-SNAPSHOT-tests.jar" | paste -sd ',' -`" \
- *     ./core/target/scala-2.12/spark-core_2.12-3.2.0-SNAPSHOT-tests.jar
+ *     "`find . -name '*-SNAPSHOT-tests.jar' -o -name '*avro*-SNAPSHOT.jar' | paste -sd ',' -`" \
+ *     "`find . -name 'spark-core*-SNAPSHOT-tests.jar'`" \
+ *     "*"
+ * }}}
+ *
+ * The example below runs all benchmarks under "org.apache.spark.sql.execution.datasources"
+ * {{{
+ *   bin/spark-submit --class \
+ *     org.apache.spark.benchmark.Benchmarks --jars \
+ *     "`find . -name '*-SNAPSHOT-tests.jar' -o -name '*avro*-SNAPSHOT.jar' | paste -sd ',' -`" \
+ *     "`find . -name 'spark-core*-SNAPSHOT-tests.jar'`" \
+ *     "org.apache.spark.sql.execution.datasources.*"
  * }}}
  */
 
 object Benchmarks {
   def main(args: Array[String]): Unit = {
+    var isBenchmarkFound = false

Review comment:
       Can't you just combine `.map().getOrElse` instead of the variable? Or at least, store the list of classes to a val and call `isDefined` at the end?

##########
File path: core/src/test/scala/org/apache/spark/benchmark/Benchmarks.scala
##########
@@ -78,8 +99,12 @@ object Benchmarks {
           // Maven build
           targetDirOrProjDir.getCanonicalPath
         }
+        // Force GC to minimize the side effect.
+        System.gc()
         runBenchmark.invoke(null, Array(projDir))

Review comment:
       How about to continue if a benchmark crashes, and complete others. I would imagine an use cases when I run a few benchmarks during night, and re-run only crashed benchmarks in the morning.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org