You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2014/01/08 20:50:18 UTC

[1/3] git commit: Set boolean param name for two files call to SparkHadoopMapReduceUtil.newTaskAttemptID to make it clear which param being set.

Updated Branches:
  refs/heads/master bdeaeafbd -> 56ebfeaa5


Set boolean param name for two files call to SparkHadoopMapReduceUtil.newTaskAttemptID to make
it clear which param being set.


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/f6b6f883
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/f6b6f883
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/f6b6f883

Branch: refs/heads/master
Commit: f6b6f88367351f99d02a7de0dbd5c1980cc97bbf
Parents: c0f0155
Author: Henry Saputra <hs...@apache.org>
Authored: Tue Jan 7 23:23:17 2014 -0800
Committer: Henry Saputra <hs...@apache.org>
Committed: Tue Jan 7 23:23:17 2014 -0800

----------------------------------------------------------------------
 core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala     | 2 +-
 core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/f6b6f883/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
index 2662d48..73d15b9 100644
--- a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
@@ -76,7 +76,7 @@ class NewHadoopRDD[K, V](
       val split = theSplit.asInstanceOf[NewHadoopPartition]
       logInfo("Input split: " + split.serializableHadoopSplit)
       val conf = confBroadcast.value.value
-      val attemptId = newTaskAttemptID(jobtrackerId, id, true, split.index, 0)
+      val attemptId = newTaskAttemptID(jobtrackerId, id, isMap = true, split.index, 0)
       val hadoopAttemptContext = newTaskAttemptContext(conf, attemptId)
       val format = inputFormatClass.newInstance
       if (format.isInstanceOf[Configurable]) {

http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/f6b6f883/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index 04a8d05..c8446fd 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -613,7 +613,7 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: RDD[(K, V)])
       // around by taking a mod. We expect that no task will be attempted 2 billion times.
       val attemptNumber = (context.attemptId % Int.MaxValue).toInt
       /* "reduce task" <split #> <attempt # = spark task #> */
-      val attemptId = newTaskAttemptID(jobtrackerID, stageId, false, context.partitionId, attemptNumber)
+      val attemptId = newTaskAttemptID(jobtrackerID, stageId, isMap = false, context.partitionId, attemptNumber)
       val hadoopContext = newTaskAttemptContext(wrappedConf.value, attemptId)
       val format = outputFormatClass.newInstance
       val committer = format.getOutputCommitter(hadoopContext)
@@ -632,7 +632,7 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: RDD[(K, V)])
      * however we're only going to use this local OutputCommitter for
      * setupJob/commitJob, so we just use a dummy "map" task.
      */
-    val jobAttemptId = newTaskAttemptID(jobtrackerID, stageId, true, 0, 0)
+    val jobAttemptId = newTaskAttemptID(jobtrackerID, stageId, isMap = true, 0, 0)
     val jobTaskContext = newTaskAttemptContext(wrappedConf.value, jobAttemptId)
     val jobCommitter = jobFormat.getOutputCommitter(jobTaskContext)
     jobCommitter.setupJob(jobTaskContext)


[2/3] git commit: Resolve PR review over 100 chars

Posted by rx...@apache.org.
Resolve PR review over 100 chars


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/aa56585d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/aa56585d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/aa56585d

Branch: refs/heads/master
Commit: aa56585d2148b3ced506d2fff89da0858300928c
Parents: f6b6f88
Author: Henry Saputra <hs...@apache.org>
Authored: Wed Jan 8 00:38:29 2014 -0800
Committer: Henry Saputra <hs...@apache.org>
Committed: Wed Jan 8 00:38:29 2014 -0800

----------------------------------------------------------------------
 core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/aa56585d/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index c8446fd..4fe3bc5 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -613,7 +613,8 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: RDD[(K, V)])
       // around by taking a mod. We expect that no task will be attempted 2 billion times.
       val attemptNumber = (context.attemptId % Int.MaxValue).toInt
       /* "reduce task" <split #> <attempt # = spark task #> */
-      val attemptId = newTaskAttemptID(jobtrackerID, stageId, isMap = false, context.partitionId, attemptNumber)
+      val attemptId = newTaskAttemptID(jobtrackerID, stageId, isMap = false, context.partitionId,
+        attemptNumber)
       val hadoopContext = newTaskAttemptContext(wrappedConf.value, attemptId)
       val format = outputFormatClass.newInstance
       val committer = format.getOutputCommitter(hadoopContext)


[3/3] git commit: Merge pull request #357 from hsaputra/set_boolean_paramname

Posted by rx...@apache.org.
Merge pull request #357 from hsaputra/set_boolean_paramname

Set boolean param name for call to SparkHadoopMapReduceUtil.newTaskAttemptID

Set boolean param name for call to SparkHadoopMapReduceUtil.newTaskAttemptID to make it clear which param being set.


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/56ebfeaa
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/56ebfeaa
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/56ebfeaa

Branch: refs/heads/master
Commit: 56ebfeaa526edb7555c2c0889ada3e014ecbad09
Parents: bdeaeaf aa56585
Author: Reynold Xin <rx...@apache.org>
Authored: Wed Jan 8 11:50:06 2014 -0800
Committer: Reynold Xin <rx...@apache.org>
Committed: Wed Jan 8 11:50:06 2014 -0800

----------------------------------------------------------------------
 core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala     | 2 +-
 core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala | 5 +++--
 2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/56ebfeaa/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
----------------------------------------------------------------------