You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tinkerpop.apache.org by ok...@apache.org on 2015/04/01 01:25:02 UTC
incubator-tinkerpop git commit: closing the SparkContext is not
needed as its in a try{} resource block.
Repository: incubator-tinkerpop
Updated Branches:
refs/heads/master 02b6083b8 -> 1244a8fa8
closing the SparkContext is not needed as its in a try{} resource block.
Project: http://git-wip-us.apache.org/repos/asf/incubator-tinkerpop/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-tinkerpop/commit/1244a8fa
Tree: http://git-wip-us.apache.org/repos/asf/incubator-tinkerpop/tree/1244a8fa
Diff: http://git-wip-us.apache.org/repos/asf/incubator-tinkerpop/diff/1244a8fa
Branch: refs/heads/master
Commit: 1244a8fa8c6bd9401d0f8ca63e129ded9414e5e5
Parents: 02b6083
Author: Marko A. Rodriguez <ok...@gmail.com>
Authored: Tue Mar 31 17:24:59 2015 -0600
Committer: Marko A. Rodriguez <ok...@gmail.com>
Committed: Tue Mar 31 17:24:59 2015 -0600
----------------------------------------------------------------------
.../hadoop/process/computer/spark/SparkGraphComputer.java | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-tinkerpop/blob/1244a8fa/hadoop-gremlin/src/main/java/org/apache/tinkerpop/gremlin/hadoop/process/computer/spark/SparkGraphComputer.java
----------------------------------------------------------------------
diff --git a/hadoop-gremlin/src/main/java/org/apache/tinkerpop/gremlin/hadoop/process/computer/spark/SparkGraphComputer.java b/hadoop-gremlin/src/main/java/org/apache/tinkerpop/gremlin/hadoop/process/computer/spark/SparkGraphComputer.java
index 94eb6a6..f09b649 100644
--- a/hadoop-gremlin/src/main/java/org/apache/tinkerpop/gremlin/hadoop/process/computer/spark/SparkGraphComputer.java
+++ b/hadoop-gremlin/src/main/java/org/apache/tinkerpop/gremlin/hadoop/process/computer/spark/SparkGraphComputer.java
@@ -209,7 +209,7 @@ public final class SparkGraphComputer implements GraphComputer {
//////////////////////////////
if (!this.mapReducers.isEmpty()) {
// drop all edges and messages in the graphRDD as they are no longer needed for the map reduce jobs
- final JavaPairRDD<Object, VertexWritable> mapReduceGraphRDD = null == viewAndMessageRDD ?
+ final JavaPairRDD<Object, VertexWritable> mapReduceGraphRDD = null == viewAndMessageRDD ? // TODO: move to SparkExecutor
graphRDD.mapValues(vertexWritable -> {
vertexWritable.get().edges(Direction.BOTH).forEachRemaining(Edge::remove);
return vertexWritable;
@@ -238,8 +238,6 @@ public final class SparkGraphComputer implements GraphComputer {
SparkExecutor.saveMapReduceRDD(null == reduceRDD ? mapRDD : reduceRDD, mapReduce, finalMemory, hadoopConfiguration);
}
}
- // close the context or else bad things happen // TODO: does this happen automatically cause of the try(resource) {} block?
- sparkContext.close();
// update runtime and return the newly computed graph
finalMemory.setRuntime(System.currentTimeMillis() - startTime);
return new DefaultComputerResult(HadoopHelper.getOutputGraph(this.hadoopGraph, this.resultGraph.get(), this.persist.get()), finalMemory.asImmutable());