You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by sr...@apache.org on 2017/02/16 12:32:55 UTC
[4/8] spark git commit: [SPARK-19550][BUILD][CORE][WIP] Remove Java 7
support
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/launcher/src/main/java/org/apache/spark/launcher/OutputRedirector.java
----------------------------------------------------------------------
diff --git a/launcher/src/main/java/org/apache/spark/launcher/OutputRedirector.java b/launcher/src/main/java/org/apache/spark/launcher/OutputRedirector.java
index c7959ae..ff80453 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/OutputRedirector.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/OutputRedirector.java
@@ -44,12 +44,7 @@ class OutputRedirector {
OutputRedirector(InputStream in, String loggerName, ThreadFactory tf) {
this.active = true;
this.reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
- this.thread = tf.newThread(new Runnable() {
- @Override
- public void run() {
- redirect();
- }
- });
+ this.thread = tf.newThread(this::redirect);
this.sink = Logger.getLogger(loggerName);
thread.start();
}
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/launcher/src/main/java/org/apache/spark/launcher/SparkAppHandle.java
----------------------------------------------------------------------
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkAppHandle.java b/launcher/src/main/java/org/apache/spark/launcher/SparkAppHandle.java
index 0aa7bd1..cefb4d1 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkAppHandle.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkAppHandle.java
@@ -91,9 +91,6 @@ public interface SparkAppHandle {
* Tries to kill the underlying application. Implies {@link #disconnect()}. This will not send
* a {@link #stop()} message to the application, so it's recommended that users first try to
* stop the application cleanly and only resort to this method if that fails.
- * <p>
- * Note that if the application is running as a child process, this method fail to kill the
- * process when using Java 7. This may happen if, for example, the application is deadlocked.
*/
void kill();
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
----------------------------------------------------------------------
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
index 82b593a..8178684 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
@@ -49,35 +49,44 @@ class SparkClassCommandBuilder extends AbstractCommandBuilder {
// Master, Worker, HistoryServer, ExternalShuffleService, MesosClusterDispatcher use
// SPARK_DAEMON_JAVA_OPTS (and specific opts) + SPARK_DAEMON_MEMORY.
- if (className.equals("org.apache.spark.deploy.master.Master")) {
- javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
- javaOptsKeys.add("SPARK_MASTER_OPTS");
- memKey = "SPARK_DAEMON_MEMORY";
- } else if (className.equals("org.apache.spark.deploy.worker.Worker")) {
- javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
- javaOptsKeys.add("SPARK_WORKER_OPTS");
- memKey = "SPARK_DAEMON_MEMORY";
- } else if (className.equals("org.apache.spark.deploy.history.HistoryServer")) {
- javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
- javaOptsKeys.add("SPARK_HISTORY_OPTS");
- memKey = "SPARK_DAEMON_MEMORY";
- } else if (className.equals("org.apache.spark.executor.CoarseGrainedExecutorBackend")) {
- javaOptsKeys.add("SPARK_JAVA_OPTS");
- javaOptsKeys.add("SPARK_EXECUTOR_OPTS");
- memKey = "SPARK_EXECUTOR_MEMORY";
- } else if (className.equals("org.apache.spark.executor.MesosExecutorBackend")) {
- javaOptsKeys.add("SPARK_EXECUTOR_OPTS");
- memKey = "SPARK_EXECUTOR_MEMORY";
- } else if (className.equals("org.apache.spark.deploy.mesos.MesosClusterDispatcher")) {
- javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
- } else if (className.equals("org.apache.spark.deploy.ExternalShuffleService") ||
- className.equals("org.apache.spark.deploy.mesos.MesosExternalShuffleService")) {
- javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
- javaOptsKeys.add("SPARK_SHUFFLE_OPTS");
- memKey = "SPARK_DAEMON_MEMORY";
- } else {
- javaOptsKeys.add("SPARK_JAVA_OPTS");
- memKey = "SPARK_DRIVER_MEMORY";
+ switch (className) {
+ case "org.apache.spark.deploy.master.Master":
+ javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
+ javaOptsKeys.add("SPARK_MASTER_OPTS");
+ memKey = "SPARK_DAEMON_MEMORY";
+ break;
+ case "org.apache.spark.deploy.worker.Worker":
+ javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
+ javaOptsKeys.add("SPARK_WORKER_OPTS");
+ memKey = "SPARK_DAEMON_MEMORY";
+ break;
+ case "org.apache.spark.deploy.history.HistoryServer":
+ javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
+ javaOptsKeys.add("SPARK_HISTORY_OPTS");
+ memKey = "SPARK_DAEMON_MEMORY";
+ break;
+ case "org.apache.spark.executor.CoarseGrainedExecutorBackend":
+ javaOptsKeys.add("SPARK_JAVA_OPTS");
+ javaOptsKeys.add("SPARK_EXECUTOR_OPTS");
+ memKey = "SPARK_EXECUTOR_MEMORY";
+ break;
+ case "org.apache.spark.executor.MesosExecutorBackend":
+ javaOptsKeys.add("SPARK_EXECUTOR_OPTS");
+ memKey = "SPARK_EXECUTOR_MEMORY";
+ break;
+ case "org.apache.spark.deploy.mesos.MesosClusterDispatcher":
+ javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
+ break;
+ case "org.apache.spark.deploy.ExternalShuffleService":
+ case "org.apache.spark.deploy.mesos.MesosExternalShuffleService":
+ javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
+ javaOptsKeys.add("SPARK_SHUFFLE_OPTS");
+ memKey = "SPARK_DAEMON_MEMORY";
+ break;
+ default:
+ javaOptsKeys.add("SPARK_JAVA_OPTS");
+ memKey = "SPARK_DRIVER_MEMORY";
+ break;
}
List<String> cmd = buildJavaCommand(extraClassPath);
@@ -94,7 +103,6 @@ class SparkClassCommandBuilder extends AbstractCommandBuilder {
String mem = firstNonEmpty(memKey != null ? System.getenv(memKey) : null, DEFAULT_MEM);
cmd.add("-Xmx" + mem);
- addPermGenSizeOpt(cmd);
cmd.add(className);
cmd.addAll(classArgs);
return cmd;
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
----------------------------------------------------------------------
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
index 29c6d82..5e64fa7 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
@@ -271,7 +271,6 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
config.get(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH));
}
- addPermGenSizeOpt(cmd);
cmd.add("org.apache.spark.deploy.SparkSubmit");
cmd.addAll(buildSparkSubmitArgs());
return cmd;
@@ -405,49 +404,65 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
@Override
protected boolean handle(String opt, String value) {
- if (opt.equals(MASTER)) {
- master = value;
- } else if (opt.equals(DEPLOY_MODE)) {
- deployMode = value;
- } else if (opt.equals(PROPERTIES_FILE)) {
- propertiesFile = value;
- } else if (opt.equals(DRIVER_MEMORY)) {
- conf.put(SparkLauncher.DRIVER_MEMORY, value);
- } else if (opt.equals(DRIVER_JAVA_OPTIONS)) {
- conf.put(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, value);
- } else if (opt.equals(DRIVER_LIBRARY_PATH)) {
- conf.put(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH, value);
- } else if (opt.equals(DRIVER_CLASS_PATH)) {
- conf.put(SparkLauncher.DRIVER_EXTRA_CLASSPATH, value);
- } else if (opt.equals(CONF)) {
- String[] setConf = value.split("=", 2);
- checkArgument(setConf.length == 2, "Invalid argument to %s: %s", CONF, value);
- conf.put(setConf[0], setConf[1]);
- } else if (opt.equals(CLASS)) {
- // The special classes require some special command line handling, since they allow
- // mixing spark-submit arguments with arguments that should be propagated to the shell
- // itself. Note that for this to work, the "--class" argument must come before any
- // non-spark-submit arguments.
- mainClass = value;
- if (specialClasses.containsKey(value)) {
- allowsMixedArguments = true;
- appResource = specialClasses.get(value);
- }
- } else if (opt.equals(KILL_SUBMISSION) || opt.equals(STATUS)) {
- isAppResourceReq = false;
- sparkArgs.add(opt);
- sparkArgs.add(value);
- } else if (opt.equals(HELP) || opt.equals(USAGE_ERROR)) {
- isAppResourceReq = false;
- sparkArgs.add(opt);
- } else if (opt.equals(VERSION)) {
- isAppResourceReq = false;
- sparkArgs.add(opt);
- } else {
- sparkArgs.add(opt);
- if (value != null) {
+ switch (opt) {
+ case MASTER:
+ master = value;
+ break;
+ case DEPLOY_MODE:
+ deployMode = value;
+ break;
+ case PROPERTIES_FILE:
+ propertiesFile = value;
+ break;
+ case DRIVER_MEMORY:
+ conf.put(SparkLauncher.DRIVER_MEMORY, value);
+ break;
+ case DRIVER_JAVA_OPTIONS:
+ conf.put(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, value);
+ break;
+ case DRIVER_LIBRARY_PATH:
+ conf.put(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH, value);
+ break;
+ case DRIVER_CLASS_PATH:
+ conf.put(SparkLauncher.DRIVER_EXTRA_CLASSPATH, value);
+ break;
+ case CONF:
+ String[] setConf = value.split("=", 2);
+ checkArgument(setConf.length == 2, "Invalid argument to %s: %s", CONF, value);
+ conf.put(setConf[0], setConf[1]);
+ break;
+ case CLASS:
+ // The special classes require some special command line handling, since they allow
+ // mixing spark-submit arguments with arguments that should be propagated to the shell
+ // itself. Note that for this to work, the "--class" argument must come before any
+ // non-spark-submit arguments.
+ mainClass = value;
+ if (specialClasses.containsKey(value)) {
+ allowsMixedArguments = true;
+ appResource = specialClasses.get(value);
+ }
+ break;
+ case KILL_SUBMISSION:
+ case STATUS:
+ isAppResourceReq = false;
+ sparkArgs.add(opt);
sparkArgs.add(value);
- }
+ break;
+ case HELP:
+ case USAGE_ERROR:
+ isAppResourceReq = false;
+ sparkArgs.add(opt);
+ break;
+ case VERSION:
+ isAppResourceReq = false;
+ sparkArgs.add(opt);
+ break;
+ default:
+ sparkArgs.add(opt);
+ if (value != null) {
+ sparkArgs.add(value);
+ }
+ break;
}
return true;
}
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java
----------------------------------------------------------------------
diff --git a/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java b/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java
index caeeea5..9795041 100644
--- a/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java
+++ b/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java
@@ -99,42 +99,6 @@ public class CommandBuilderUtilsSuite {
assertEquals(10, javaMajorVersion("10"));
}
- @Test
- public void testAddPermGenSizeOpt() {
- List<String> cmd = new ArrayList<>();
-
- if (javaMajorVersion(System.getProperty("java.version")) > 7) {
- // Does nothing in Java 8
- addPermGenSizeOpt(cmd);
- assertEquals(0, cmd.size());
- cmd.clear();
-
- } else {
- addPermGenSizeOpt(cmd);
- assertEquals(1, cmd.size());
- assertTrue(cmd.get(0).startsWith("-XX:MaxPermSize="));
- cmd.clear();
-
- cmd.add("foo");
- addPermGenSizeOpt(cmd);
- assertEquals(2, cmd.size());
- assertTrue(cmd.get(1).startsWith("-XX:MaxPermSize="));
- cmd.clear();
-
- cmd.add("-XX:MaxPermSize=512m");
- addPermGenSizeOpt(cmd);
- assertEquals(1, cmd.size());
- assertEquals("-XX:MaxPermSize=512m", cmd.get(0));
- cmd.clear();
-
- cmd.add("'-XX:MaxPermSize=512m'");
- addPermGenSizeOpt(cmd);
- assertEquals(1, cmd.size());
- assertEquals("'-XX:MaxPermSize=512m'", cmd.get(0));
- cmd.clear();
- }
- }
-
private static void testOpt(String opts, List<String> expected) {
assertEquals(String.format("test string failed to parse: [[ %s ]]", opts),
expected, parseOptionString(opts));
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
----------------------------------------------------------------------
diff --git a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
index ad2e7a7..d569b66 100644
--- a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
+++ b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
@@ -233,7 +233,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
launcher.setPropertiesFile(dummyPropsFile.getAbsolutePath());
launcher.conf.put(SparkLauncher.DRIVER_MEMORY, "1g");
launcher.conf.put(SparkLauncher.DRIVER_EXTRA_CLASSPATH, "/driver");
- launcher.conf.put(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, "-Ddriver -XX:MaxPermSize=256m");
+ launcher.conf.put(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, "-Ddriver");
launcher.conf.put(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH, "/native");
} else {
launcher.childEnv.put("SPARK_CONF_DIR", System.getProperty("spark.test.home")
@@ -258,12 +258,6 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
assertFalse("Memory arguments should not be set.", found);
}
- for (String arg : cmd) {
- if (arg.startsWith("-XX:MaxPermSize=")) {
- assertEquals("-XX:MaxPermSize=256m", arg);
- }
- }
-
String[] cp = findArgValue(cmd, "-cp").split(Pattern.quote(File.pathSeparator));
if (isDriver) {
assertTrue("Driver classpath should contain provided entry.", contains("/driver", cp));
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/launcher/src/test/resources/spark-defaults.conf
----------------------------------------------------------------------
diff --git a/launcher/src/test/resources/spark-defaults.conf b/launcher/src/test/resources/spark-defaults.conf
index 239fc57..3a51208 100644
--- a/launcher/src/test/resources/spark-defaults.conf
+++ b/launcher/src/test/resources/spark-defaults.conf
@@ -17,5 +17,5 @@
spark.driver.memory=1g
spark.driver.extraClassPath=/driver
-spark.driver.extraJavaOptions=-Ddriver -XX:MaxPermSize=256m
+spark.driver.extraJavaOptions=-Ddriver
spark.driver.extraLibraryPath=/native
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ac61a57..60e4c72 100644
--- a/pom.xml
+++ b/pom.xml
@@ -117,7 +117,7 @@
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
- <java.version>1.7</java.version>
+ <java.version>1.8</java.version>
<maven.version>3.3.9</maven.version>
<sbt.project.name>spark</sbt.project.name>
<slf4j.version>1.7.16</slf4j.version>
@@ -186,9 +186,6 @@
<test.java.home>${java.home}</test.java.home>
<test.exclude.tags></test.exclude.tags>
- <!-- When using different JDKs for the build, we can't use Zinc for the jdk8 part. -->
- <useZincForJdk8>true</useZincForJdk8>
-
<!-- Package to use when relocating shaded classes. -->
<spark.shade.packageName>org.spark_project</spark.shade.packageName>
@@ -219,8 +216,6 @@
-->
<spark.test.home>${session.executionRootDirectory}</spark.test.home>
- <PermGen>64m</PermGen>
- <MaxPermGen>512m</MaxPermGen>
<CodeCacheSize>512m</CodeCacheSize>
</properties>
<repositories>
@@ -1920,7 +1915,7 @@
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
- <version>1.12</version>
+ <version>3.0.0</version>
</plugin>
<plugin>
<groupId>net.alchim31.maven</groupId>
@@ -1967,8 +1962,6 @@
<jvmArgs>
<jvmArg>-Xms1024m</jvmArg>
<jvmArg>-Xmx1024m</jvmArg>
- <jvmArg>-XX:PermSize=${PermGen}</jvmArg>
- <jvmArg>-XX:MaxPermSize=${MaxPermGen}</jvmArg>
<jvmArg>-XX:ReservedCodeCacheSize=${CodeCacheSize}</jvmArg>
</jvmArgs>
<javacArgs>
@@ -1983,7 +1976,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
- <version>3.6.0</version>
+ <version>3.6.1</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
@@ -2014,7 +2007,7 @@
<include>**/*Suite.java</include>
</includes>
<reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
- <argLine>-Xmx3g -Xss4096k -XX:MaxPermSize=${MaxPermGen} -XX:ReservedCodeCacheSize=512m</argLine>
+ <argLine>-Xmx3g -Xss4096k -XX:ReservedCodeCacheSize=${CodeCacheSize}</argLine>
<environmentVariables>
<!--
Setting SPARK_DIST_CLASSPATH is a simple way to make sure any child processes
@@ -2063,7 +2056,7 @@
<reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
<junitxml>.</junitxml>
<filereports>SparkTestSuite.txt</filereports>
- <argLine>-ea -Xmx3g -XX:MaxPermSize=${MaxPermGen} -XX:ReservedCodeCacheSize=${CodeCacheSize}</argLine>
+ <argLine>-ea -Xmx3g -XX:ReservedCodeCacheSize=${CodeCacheSize}</argLine>
<stderr/>
<environmentVariables>
<!--
@@ -2149,6 +2142,41 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>2.10.4</version>
+ <configuration>
+ <additionalparam>-Xdoclint:all -Xdoclint:-missing</additionalparam>
+ <tags>
+ <tag>
+ <name>example</name>
+ <placement>a</placement>
+ <head>Example:</head>
+ </tag>
+ <tag>
+ <name>note</name>
+ <placement>a</placement>
+ <head>Note:</head>
+ </tag>
+ <tag>
+ <name>group</name>
+ <placement>X</placement>
+ </tag>
+ <tag>
+ <name>tparam</name>
+ <placement>X</placement>
+ </tag>
+ <tag>
+ <name>constructor</name>
+ <placement>X</placement>
+ </tag>
+ <tag>
+ <name>todo</name>
+ <placement>X</placement>
+ </tag>
+ <tag>
+ <name>groupname</name>
+ <placement>X</placement>
+ </tag>
+ </tags>
+ </configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
@@ -2163,7 +2191,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
- <version>2.4.3</version>
+ <version>3.0.0</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
@@ -2178,6 +2206,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
+ <version>3.0.0</version>
<executions>
<execution>
<id>default-cli</id>
@@ -2252,7 +2281,6 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
- <version>2.10</version>
<executions>
<execution>
<id>generate-test-classpath</id>
@@ -2474,67 +2502,6 @@
</profile>
<profile>
- <id>java8-tests</id>
- <activation>
- <jdk>[1.8,)</jdk>
- </activation>
- <modules>
- <module>external/java8-tests</module>
- </modules>
- </profile>
-
- <profile>
- <id>doclint-java8-disable</id>
- <activation>
- <jdk>[1.8,)</jdk>
- </activation>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-javadoc-plugin</artifactId>
- <configuration>
- <additionalparam>-Xdoclint:all -Xdoclint:-missing</additionalparam>
- <tags>
- <tag>
- <name>example</name>
- <placement>a</placement>
- <head>Example:</head>
- </tag>
- <tag>
- <name>note</name>
- <placement>a</placement>
- <head>Note:</head>
- </tag>
- <tag>
- <name>group</name>
- <placement>X</placement>
- </tag>
- <tag>
- <name>tparam</name>
- <placement>X</placement>
- </tag>
- <tag>
- <name>constructor</name>
- <placement>X</placement>
- </tag>
- <tag>
- <name>todo</name>
- <placement>X</placement>
- </tag>
- <tag>
- <name>groupname</name>
- <placement>X</placement>
- </tag>
- </tags>
- </configuration>
- </plugin>
- </plugins>
- </build>
- </profile>
-
- <profile>
<id>docker-integration-tests</id>
<modules>
<module>external/docker-integration-tests</module>
@@ -2630,60 +2597,6 @@
</profile>
<profile>
- <id>java7</id>
- <activation>
- <property><name>env.JAVA_7_HOME</name></property>
- </activation>
- <properties>
- <useZincForJdk8>false</useZincForJdk8>
- </properties>
- <build>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- <configuration>
- <compilerArgs combine.children="append">
- <arg>-bootclasspath</arg>
- <arg>${env.JAVA_7_HOME}/jre/lib/rt.jar${path.separator}${env.JAVA_7_HOME}/jre/lib/jce.jar</arg>
- </compilerArgs>
- <verbose>true</verbose>
- </configuration>
- </plugin>
- <plugin>
- <groupId>net.alchim31.maven</groupId>
- <artifactId>scala-maven-plugin</artifactId>
- <!-- Note: -javabootclasspath is set on a per-execution basis rather than as a
- plugin-wide configuration because doc-jar generation will break if it's
- set; see SPARK-15839 for more details -->
- <executions>
- <execution>
- <id>scala-compile-first</id>
- <configuration>
- <args combine.children="append">
- <arg>-javabootclasspath</arg>
- <arg>${env.JAVA_7_HOME}/jre/lib/rt.jar${path.separator}${env.JAVA_7_HOME}/jre/lib/jce.jar</arg>
- </args>
- </configuration>
- </execution>
- <execution>
- <id>scala-test-compile-first</id>
- <configuration>
- <args combine.children="append">
- <arg>-javabootclasspath</arg>
- <arg>${env.JAVA_7_HOME}/jre/lib/rt.jar${path.separator}${env.JAVA_7_HOME}/jre/lib/jce.jar</arg>
- </args>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </pluginManagement>
- </build>
- </profile>
-
- <profile>
<id>scala-2.11</id>
<activation>
<property><name>!scala-2.10</name></property>
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/project/SparkBuild.scala
----------------------------------------------------------------------
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index bcc00fa..b48879f 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -56,9 +56,9 @@ object BuildCommons {
"tags", "sketch"
).map(ProjectRef(buildLocation, _)) ++ sqlProjects ++ streamingProjects
- val optionallyEnabledProjects@Seq(mesos, yarn, java8Tests, sparkGangliaLgpl,
+ val optionallyEnabledProjects@Seq(mesos, yarn, sparkGangliaLgpl,
streamingKinesisAsl, dockerIntegrationTests) =
- Seq("mesos", "yarn", "java8-tests", "ganglia-lgpl", "streaming-kinesis-asl",
+ Seq("mesos", "yarn", "ganglia-lgpl", "streaming-kinesis-asl",
"docker-integration-tests").map(ProjectRef(buildLocation, _))
val assemblyProjects@Seq(networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKafka010Assembly, streamingKinesisAslAssembly) =
@@ -233,8 +233,8 @@ object SparkBuild extends PomBuild {
if (major >= 8) Seq("-Xdoclint:all", "-Xdoclint:-missing") else Seq.empty
},
- javacJVMVersion := "1.7",
- scalacJVMVersion := "1.7",
+ javacJVMVersion := "1.8",
+ scalacJVMVersion := "1.8",
javacOptions in Compile ++= Seq(
"-encoding", "UTF-8",
@@ -245,24 +245,12 @@ object SparkBuild extends PomBuild {
// additional discussion and explanation.
javacOptions in (Compile, compile) ++= Seq(
"-target", javacJVMVersion.value
- ) ++ sys.env.get("JAVA_7_HOME").toSeq.flatMap { jdk7 =>
- if (javacJVMVersion.value == "1.7") {
- Seq("-bootclasspath", s"$jdk7/jre/lib/rt.jar${File.pathSeparator}$jdk7/jre/lib/jce.jar")
- } else {
- Nil
- }
- },
+ ),
scalacOptions in Compile ++= Seq(
s"-target:jvm-${scalacJVMVersion.value}",
"-sourcepath", (baseDirectory in ThisBuild).value.getAbsolutePath // Required for relative source links in scaladoc
- ) ++ sys.env.get("JAVA_7_HOME").toSeq.flatMap { jdk7 =>
- if (javacJVMVersion.value == "1.7") {
- Seq("-javabootclasspath", s"$jdk7/jre/lib/rt.jar${File.pathSeparator}$jdk7/jre/lib/jce.jar")
- } else {
- Nil
- }
- },
+ ),
// Implements -Xfatal-warnings, ignoring deprecation warnings.
// Code snippet taken from https://issues.scala-lang.org/browse/SI-8410.
@@ -363,8 +351,6 @@ object SparkBuild extends PomBuild {
enable(Flume.settings)(streamingFlumeSink)
- enable(Java8TestSettings.settings)(java8Tests)
-
// SPARK-14738 - Remove docker tests from main Spark build
// enable(DockerIntegrationTests.settings)(dockerIntegrationTests)
@@ -387,7 +373,7 @@ object SparkBuild extends PomBuild {
fork := true,
outputStrategy in run := Some (StdoutOutput),
- javaOptions ++= Seq("-Xmx2G", "-XX:MaxPermSize=256m"),
+ javaOptions += "-Xmx2g",
sparkShell := {
(runMain in Compile).toTask(" org.apache.spark.repl.Main -usejavacp").value
@@ -531,7 +517,6 @@ object SQL {
object Hive {
lazy val settings = Seq(
- javaOptions += "-XX:MaxPermSize=256m",
// Specially disable assertions since some Hive tests fail them
javaOptions in Test := (javaOptions in Test).value.filterNot(_ == "-ea"),
// Supporting all SerDes requires us to depend on deprecated APIs, so we turn off the warnings
@@ -765,16 +750,6 @@ object CopyDependencies {
}
-object Java8TestSettings {
- import BuildCommons._
-
- lazy val settings = Seq(
- javacJVMVersion := "1.8",
- // Targeting Java 8 bytecode is only supported in Scala 2.11.4 and higher:
- scalacJVMVersion := (if (System.getProperty("scala-2.10") == "true") "1.7" else "1.8")
- )
-}
-
object TestSettings {
import BuildCommons._
@@ -812,7 +787,7 @@ object TestSettings {
javaOptions in Test ++= System.getProperties.asScala.filter(_._1.startsWith("spark"))
.map { case (k,v) => s"-D$k=$v" }.toSeq,
javaOptions in Test += "-ea",
- javaOptions in Test ++= "-Xmx3g -Xss4096k -XX:PermSize=128M -XX:MaxNewSize=256m -XX:MaxPermSize=1g"
+ javaOptions in Test ++= "-Xmx3g -Xss4096k"
.split(" ").toSeq,
javaOptions += "-Xmx3g",
// Exclude tags defined in a system property
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
----------------------------------------------------------------------
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
index f4f4518..a00234c 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
@@ -911,7 +911,6 @@ private[spark] class Client(
// For log4j configuration to reference
javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
- YarnCommandBuilderUtils.addPermGenSizeOpt(javaOpts)
val userClass =
if (isClusterMode) {
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
----------------------------------------------------------------------
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
index b55b4b1..ee85c04 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
@@ -38,7 +38,6 @@ import org.apache.hadoop.yarn.util.{ConverterUtils, Records}
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
-import org.apache.spark.launcher.YarnCommandBuilderUtils
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.util.Utils
@@ -190,7 +189,6 @@ private[yarn] class ExecutorRunnable(
// For log4j configuration to reference
javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
- YarnCommandBuilderUtils.addPermGenSizeOpt(javaOpts)
val userClassPath = Client.getUserClasspath(sparkConf).flatMap { uri =>
val absPath =
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/resource-managers/yarn/src/main/scala/org/apache/spark/launcher/YarnCommandBuilderUtils.scala
----------------------------------------------------------------------
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/launcher/YarnCommandBuilderUtils.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/launcher/YarnCommandBuilderUtils.scala
index 6c3556a..0c3d080 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/launcher/YarnCommandBuilderUtils.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/launcher/YarnCommandBuilderUtils.scala
@@ -38,16 +38,4 @@ private[spark] object YarnCommandBuilderUtils {
CommandBuilderUtils.findJarsDir(sparkHome, scalaVer, true)
}
- /**
- * Adds the perm gen configuration to the list of java options if needed and not yet added.
- *
- * Note that this method adds the option based on the local JVM version; if the node where
- * the container is running has a different Java version, there's a risk that the option will
- * not be added (e.g. if the AM is running Java 8 but the container's node is set up to use
- * Java 7).
- */
- def addPermGenSizeOpt(args: ListBuffer[String]): Unit = {
- CommandBuilderUtils.addPermGenSizeOpt(args.asJava)
- }
-
}
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala
index 465fb83..089c84d 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala
@@ -134,12 +134,8 @@ final class Decimal extends Ordered[Decimal] with Serializable {
* Set this Decimal to the given BigInteger value. Will have precision 38 and scale 0.
*/
def set(bigintval: BigInteger): Decimal = {
- // TODO: Remove this once we migrate to java8 and use longValueExact() instead.
- require(
- bigintval.compareTo(LONG_MAX_BIG_INT) <= 0 && bigintval.compareTo(LONG_MIN_BIG_INT) >= 0,
- s"BigInteger $bigintval too large for decimal")
this.decimalVal = null
- this.longVal = bigintval.longValue()
+ this.longVal = bigintval.longValueExact()
this._precision = DecimalType.MAX_PRECISION
this._scale = 0
this
@@ -178,7 +174,7 @@ final class Decimal extends Ordered[Decimal] with Serializable {
def toUnscaledLong: Long = {
if (decimalVal.ne(null)) {
- decimalVal.underlying().unscaledValue().longValue()
+ decimalVal.underlying().unscaledValue().longValueExact()
} else {
longVal
}
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
----------------------------------------------------------------------
diff --git a/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java b/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
index 2570c8d..d44af7e 100644
--- a/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
+++ b/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
@@ -22,13 +22,13 @@ import java.util.Iterator;
import org.apache.spark.annotation.Experimental;
import org.apache.spark.annotation.InterfaceStability;
-import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.KeyedState;
/**
* ::Experimental::
* Base interface for a map function used in
- * {@link org.apache.spark.sql.KeyValueGroupedDataset#flatMapGroupsWithState(FlatMapGroupsWithStateFunction, Encoder, Encoder)}.
+ * {@link org.apache.spark.sql.KeyValueGroupedDataset#flatMapGroupsWithState(
+ * FlatMapGroupsWithStateFunction, org.apache.spark.sql.Encoder, org.apache.spark.sql.Encoder)}.
* @since 2.1.1
*/
@Experimental
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/core/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
----------------------------------------------------------------------
diff --git a/sql/core/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java b/sql/core/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
index 614d392..75986d1 100644
--- a/sql/core/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
+++ b/sql/core/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
@@ -22,13 +22,13 @@ import java.util.Iterator;
import org.apache.spark.annotation.Experimental;
import org.apache.spark.annotation.InterfaceStability;
-import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.KeyedState;
/**
* ::Experimental::
* Base interface for a map function used in
- * {@link org.apache.spark.sql.KeyValueGroupedDataset#mapGroupsWithState(MapGroupsWithStateFunction, Encoder, Encoder)}
+ * {@link org.apache.spark.sql.KeyValueGroupedDataset#mapGroupsWithState(
+ * MapGroupsWithStateFunction, org.apache.spark.sql.Encoder, org.apache.spark.sql.Encoder)}
* @since 2.1.1
*/
@Experimental
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index e62cd9f..38a24cc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -103,7 +103,7 @@ private[sql] object Dataset {
* the following creates a new Dataset by applying a filter on the existing one:
* {{{
* val names = people.map(_.name) // in Scala; names is a Dataset[String]
- * Dataset<String> names = people.map((Person p) -> p.name, Encoders.STRING)); // in Java 8
+ * Dataset<String> names = people.map((Person p) -> p.name, Encoders.STRING));
* }}}
*
* Dataset operations can also be untyped, through various domain-specific-language (DSL)
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
index 94e689a..3a548c2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
@@ -98,7 +98,7 @@ class KeyValueGroupedDataset[K, V] private[sql](
* // Create Integer values grouped by String key from a Dataset<Tuple2<String, Integer>>
* Dataset<Tuple2<String, Integer>> ds = ...;
* KeyValueGroupedDataset<String, Integer> grouped =
- * ds.groupByKey(t -> t._1, Encoders.STRING()).mapValues(t -> t._2, Encoders.INT()); // Java 8
+ * ds.groupByKey(t -> t._1, Encoders.STRING()).mapValues(t -> t._2, Encoders.INT());
* }}}
*
* @since 2.1.0
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index ea465e2..dbe5509 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -190,17 +190,6 @@ class SQLContext private[sql](val sparkSession: SparkSession)
* The following example registers a UDF in Java:
* {{{
* sqlContext.udf().register("myUDF",
- * new UDF2<Integer, String, String>() {
- * @Override
- * public String call(Integer arg1, String arg2) {
- * return arg2 + arg1;
- * }
- * }, DataTypes.StringType);
- * }}}
- *
- * Or, to use Java 8 lambda syntax:
- * {{{
- * sqlContext.udf().register("myUDF",
* (Integer arg1, String arg2) -> arg2 + arg1,
* DataTypes.StringType);
* }}}
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
index e1fdb2f..1975a56 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
@@ -164,17 +164,6 @@ class SparkSession private(
* The following example registers a UDF in Java:
* {{{
* sparkSession.udf().register("myUDF",
- * new UDF2<Integer, String, String>() {
- * @Override
- * public String call(Integer arg1, String arg2) {
- * return arg2 + arg1;
- * }
- * }, DataTypes.StringType);
- * }}}
- *
- * Or, to use Java 8 lambda syntax:
- * {{{
- * sparkSession.udf().register("myUDF",
* (Integer arg1, String arg2) -> arg2 + arg1,
* DataTypes.StringType);
* }}}
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/core/src/test/java/test/org/apache/spark/sql/Java8DatasetAggregatorSuite.java
----------------------------------------------------------------------
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/Java8DatasetAggregatorSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/Java8DatasetAggregatorSuite.java
new file mode 100644
index 0000000..8b8a403
--- /dev/null
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/Java8DatasetAggregatorSuite.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package test.org.apache.spark.sql;
+
+import java.util.Arrays;
+
+import org.junit.Assert;
+import org.junit.Test;
+import scala.Tuple2;
+
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.KeyValueGroupedDataset;
+import org.apache.spark.sql.expressions.javalang.typed;
+
+/**
+ * Suite that replicates tests in JavaDatasetAggregatorSuite using lambda syntax.
+ */
+public class Java8DatasetAggregatorSuite extends JavaDatasetAggregatorSuiteBase {
+ @Test
+ public void testTypedAggregationAverage() {
+ KeyValueGroupedDataset<String, Tuple2<String, Integer>> grouped = generateGroupedDataset();
+ Dataset<Tuple2<String, Double>> agged = grouped.agg(typed.avg(v -> (double)(v._2() * 2)));
+ Assert.assertEquals(Arrays.asList(tuple2("a", 3.0), tuple2("b", 6.0)), agged.collectAsList());
+ }
+
+ @Test
+ public void testTypedAggregationCount() {
+ KeyValueGroupedDataset<String, Tuple2<String, Integer>> grouped = generateGroupedDataset();
+ Dataset<Tuple2<String, Long>> agged = grouped.agg(typed.count(v -> v));
+ Assert.assertEquals(Arrays.asList(tuple2("a", 2L), tuple2("b", 1L)), agged.collectAsList());
+ }
+
+ @Test
+ public void testTypedAggregationSumDouble() {
+ KeyValueGroupedDataset<String, Tuple2<String, Integer>> grouped = generateGroupedDataset();
+ Dataset<Tuple2<String, Double>> agged = grouped.agg(typed.sum(v -> (double)v._2()));
+ Assert.assertEquals(Arrays.asList(tuple2("a", 3.0), tuple2("b", 3.0)), agged.collectAsList());
+ }
+
+ @Test
+ public void testTypedAggregationSumLong() {
+ KeyValueGroupedDataset<String, Tuple2<String, Integer>> grouped = generateGroupedDataset();
+ Dataset<Tuple2<String, Long>> agged = grouped.agg(typed.sumLong(v -> (long)v._2()));
+ Assert.assertEquals(Arrays.asList(tuple2("a", 3L), tuple2("b", 3L)), agged.collectAsList());
+ }
+}
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
----------------------------------------------------------------------
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
index 5ef4e88..a94a37c 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
@@ -228,7 +228,7 @@ public class JavaDatasetSuite implements Serializable {
Dataset<String> mapped2 = grouped.mapGroupsWithState(
new MapGroupsWithStateFunction<Integer, String, Long, String>() {
@Override
- public String call(Integer key, Iterator<String> values, KeyedState<Long> s) throws Exception {
+ public String call(Integer key, Iterator<String> values, KeyedState<Long> s) {
StringBuilder sb = new StringBuilder(key.toString());
while (values.hasNext()) {
sb.append(values.next());
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/hive/pom.xml
----------------------------------------------------------------------
diff --git a/sql/hive/pom.xml b/sql/hive/pom.xml
index 9aedaf2..0f249d7 100644
--- a/sql/hive/pom.xml
+++ b/sql/hive/pom.xml
@@ -190,6 +190,7 @@
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
+ <version>3.0.0</version>
<executions>
<execution>
<id>add-scala-test-sources</id>
@@ -219,7 +220,7 @@
<artifactId>scalatest-maven-plugin</artifactId>
<configuration>
<!-- Specially disable assertions since some Hive tests fail them -->
- <argLine>-da -Xmx3g -XX:MaxPermSize=${MaxPermGen} -XX:ReservedCodeCacheSize=512m</argLine>
+ <argLine>-da -Xmx3g -XX:ReservedCodeCacheSize=${CodeCacheSize}</argLine>
</configuration>
</plugin>
<plugin>
http://git-wip-us.apache.org/repos/asf/spark/blob/0e240549/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala
index e7c165c..d786a61 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala
@@ -137,21 +137,13 @@ case class ScriptTransformationExec(
throw writerThread.exception.get
}
- // Checks if the proc is still alive (incase the command ran was bad)
- // The ideal way to do this is to use Java 8's Process#isAlive()
- // but it cannot be used because Spark still supports Java 7.
- // Following is a workaround used to check if a process is alive in Java 7
- // TODO: Once builds are switched to Java 8, this can be changed
- try {
+ if (!proc.isAlive) {
val exitCode = proc.exitValue()
if (exitCode != 0) {
logError(stderrBuffer.toString) // log the stderr circular buffer
throw new SparkException(s"Subprocess exited with status $exitCode. " +
s"Error: ${stderrBuffer.toString}", cause)
}
- } catch {
- case _: IllegalThreadStateException =>
- // This means that the process is still alive. Move ahead
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org