You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@beam.apache.org by ie...@apache.org on 2019/05/10 09:47:14 UTC
[beam] 03/03: fixup hadoop-format is not mandataory to run
ValidatesRunner tests
This is an automated email from the ASF dual-hosted git repository.
iemejia pushed a commit to branch spark-runner_structured-streaming
in repository https://gitbox.apache.org/repos/asf/beam.git
commit 1d9155d6b66a506ef79a057df94188c095911836
Author: Ismaël Mejía <ie...@gmail.com>
AuthorDate: Fri May 10 11:36:23 2019 +0200
fixup hadoop-format is not mandataory to run ValidatesRunner tests
---
runners/spark/build.gradle | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/runners/spark/build.gradle b/runners/spark/build.gradle
index 46e5c25..9619c5e 100644
--- a/runners/spark/build.gradle
+++ b/runners/spark/build.gradle
@@ -178,7 +178,9 @@ task validatesStructuredStreamingRunnerBatch(type: Test) {
systemProperty "spark.ui.showConsoleProgress", "false"
classpath = configurations.validatesRunner
- testClassesDirs = files(project(":beam-sdks-java-core").sourceSets.test.output.classesDirs) + files(project(":beam-sdks-java-io-hadoop-format").sourceSets.test.output.classesDirs) + files(project.sourceSets.test.output.classesDirs)
+ testClassesDirs += files(project(":beam-sdks-java-core").sourceSets.test.output.classesDirs)
+ testClassesDirs += files(project.sourceSets.test.output.classesDirs)
+
// Only one SparkContext may be running in a JVM (SPARK-2243)
forkEvery 1
maxParallelForks 4