You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@beam.apache.org by ec...@apache.org on 2019/03/01 10:26:43 UTC

[beam] branch spark-runner_structured-streaming updated (20fad2c -> 46914af)

This is an automated email from the ASF dual-hosted git repository.

echauchot pushed a change to branch spark-runner_structured-streaming
in repository https://gitbox.apache.org/repos/asf/beam.git.


 discard 20fad2c  [TO REVERT] Change de wordcount build to test on new spark runner
     new 46914af  [TO UPGRADE WITH THE 2 SPARK RUNNERS BEFORE MERGE] Change de wordcount build to test on new spark runner

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (20fad2c)
            \
             N -- N -- N   refs/heads/spark-runner_structured-streaming (46914af)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:


[beam] 01/01: [TO UPGRADE WITH THE 2 SPARK RUNNERS BEFORE MERGE] Change de wordcount build to test on new spark runner

Posted by ec...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

echauchot pushed a commit to branch spark-runner_structured-streaming
in repository https://gitbox.apache.org/repos/asf/beam.git

commit 46914af5de53254bca46cc9b2da5db28649ac397
Author: Etienne Chauchot <ec...@apache.org>
AuthorDate: Thu Feb 28 14:35:05 2019 +0100

    [TO UPGRADE WITH THE 2 SPARK RUNNERS BEFORE MERGE] Change de wordcount build to test on new spark runner
---
 examples/java/build.gradle | 35 ++++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/examples/java/build.gradle b/examples/java/build.gradle
index 7b7fe3e..69b4a9d 100644
--- a/examples/java/build.gradle
+++ b/examples/java/build.gradle
@@ -37,18 +37,23 @@ def preCommitRunners = ["directRunner", "flinkRunner", "sparkRunner"]
 for (String runner : preCommitRunners) {
   configurations.create(runner + "PreCommit")
 }
-configurations.sparkRunnerPreCommit {
-  // Ban certain dependencies to prevent a StackOverflow within Spark
-  // because JUL -> SLF4J -> JUL, and similarly JDK14 -> SLF4J -> JDK14
-  exclude group: "org.slf4j", module: "jul-to-slf4j"
-  exclude group: "org.slf4j", module: "slf4j-jdk14"
-}
-
 dependencies {
   compile library.java.guava
   shadow project(path: ":beam-sdks-java-core", configuration: "shadow")
   shadow project(path: ":beam-sdks-java-extensions-google-cloud-platform-core", configuration: "shadow")
   shadow project(path: ":beam-sdks-java-io-google-cloud-platform", configuration: "shadow")
+  shadow project(path: ":beam-runners-spark-structured-streaming", configuration: "shadow")
+  shadow library.java.guava
+  shadow library.java.slf4j_api
+  shadow library.java.joda_time
+  provided library.java.spark_sql
+  provided library.java.commons_compress
+  provided library.java.commons_lang3
+  provided library.java.commons_io_2x
+  provided library.java.hamcrest_core
+  provided library.java.hamcrest_library
+  shadow "com.fasterxml.jackson.module:jackson-module-scala_2.11:2.9.8"
+
   shadow library.java.google_api_client
   shadow library.java.google_api_services_bigquery
   shadow library.java.google_http_client
@@ -79,13 +84,7 @@ dependencies {
   // apexRunnerPreCommit project(path: ":beam-runners-apex", configuration: "shadow")
   directRunnerPreCommit project(path: ":beam-runners-direct-java", configuration: "shadow")
   flinkRunnerPreCommit project(path: ":beam-runners-flink_2.11", configuration: "shadow")
-  // TODO: Make the netty version used configurable, we add netty-all 4.1.17.Final so it appears on the classpath
-  // before 4.1.8.Final defined by Apache Beam
-  sparkRunnerPreCommit "io.netty:netty-all:4.1.17.Final"
-  sparkRunnerPreCommit project(path: ":beam-runners-spark", configuration: "shadow")
-  sparkRunnerPreCommit project(path: ":beam-sdks-java-io-hadoop-file-system", configuration: "shadow")
-  sparkRunnerPreCommit library.java.spark_streaming
-  sparkRunnerPreCommit library.java.spark_core
+  sparkRunnerPreCommit project(path: ":beam-runners-spark-structured-streaming", configuration: "shadow")
 }
 
 /*
@@ -96,7 +95,7 @@ def preCommitRunnerClass = [
   apexRunner: "org.apache.beam.runners.apex.TestApexRunner",
   directRunner: "org.apache.beam.runners.direct.DirectRunner",
   flinkRunner: "org.apache.beam.runners.flink.TestFlinkRunner",
-  sparkRunner: "org.apache.beam.runners.spark.TestSparkRunner",
+  sparkRunner: "org.apache.beam.runners.spark.structuredstreaming.SparkRunner",
 ]
 def gcpProject = project.findProperty('gcpProject') ?: 'apache-beam-testing'
 def gcsTempRoot = project.findProperty('gcsTempRoot') ?: 'gs://temp-storage-for-end-to-end-tests/'
@@ -124,5 +123,11 @@ task preCommit() {
   for (String runner : preCommitRunners) {
     dependsOn runner + "PreCommit"
   }
+
+  configurations.all {
+    // Ban certain dependencies to prevent a StackOverflow within Spark
+    // because JUL -> SLF4J -> JUL, and similarly JDK14 -> SLF4J -> JDK14
+    exclude group: "org.slf4j", module: "slf4j-jdk14"
+  }
 }