You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2016/02/11 22:31:18 UTC
spark git commit: Revert "[SPARK-13279] Remove O(n^2) operation from
scheduler."
Repository: spark
Updated Branches:
refs/heads/master 50fa6fd1b -> c86009ceb
Revert "[SPARK-13279] Remove O(n^2) operation from scheduler."
This reverts commit 50fa6fd1b365d5db7e2b2c59624a365cef0d1696.
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/c86009ce
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/c86009ce
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/c86009ce
Branch: refs/heads/master
Commit: c86009ceb9613201b41319245526a13b1f0b5451
Parents: 50fa6fd
Author: Reynold Xin <rx...@databricks.com>
Authored: Thu Feb 11 13:31:13 2016 -0800
Committer: Reynold Xin <rx...@databricks.com>
Committed: Thu Feb 11 13:31:13 2016 -0800
----------------------------------------------------------------------
.../org/apache/spark/scheduler/TaskSetManager.scala | 15 ++++++---------
1 file changed, 6 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/c86009ce/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
index 4b19beb..cf97877 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
@@ -114,14 +114,9 @@ private[spark] class TaskSetManager(
// treated as stacks, in which new tasks are added to the end of the
// ArrayBuffer and removed from the end. This makes it faster to detect
// tasks that repeatedly fail because whenever a task failed, it is put
- // back at the head of the stack. These collections may contain duplicates
- // for two reasons:
- // (1): Tasks are only removed lazily; when a task is launched, it remains
- // in all the pending lists except the one that it was launched from.
- // (2): Tasks may be re-added to these lists multiple times as a result
- // of failures.
- // Duplicates are handled in dequeueTaskFromList, which ensures that a
- // task hasn't already started running before launching it.
+ // back at the head of the stack. They are also only cleaned up lazily;
+ // when a task is launched, it remains in all the pending lists except
+ // the one that it was launched from, but gets removed from them later.
private val pendingTasksForExecutor = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each host. Similar to pendingTasksForExecutor,
@@ -186,7 +181,9 @@ private[spark] class TaskSetManager(
private def addPendingTask(index: Int) {
// Utility method that adds `index` to a list only if it's not already there
def addTo(list: ArrayBuffer[Int]) {
- list += index
+ if (!list.contains(index)) {
+ list += index
+ }
}
for (loc <- tasks(index).preferredLocations) {
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org