You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by an...@apache.org on 2015/06/04 01:54:48 UTC
spark git commit: [SPARK-8088] don't attempt to lower number of
executors by 0
Repository: spark
Updated Branches:
refs/heads/master 566cb5947 -> 51898b515
[SPARK-8088] don't attempt to lower number of executors by 0
Author: Ryan Williams <ry...@gmail.com>
Closes #6624 from ryan-williams/execs and squashes the following commits:
b6f71d4 [Ryan Williams] don't attempt to lower number of executors by 0
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/51898b51
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/51898b51
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/51898b51
Branch: refs/heads/master
Commit: 51898b5158ac7e7e67b0539bc062c9c16ce9a7ce
Parents: 566cb59
Author: Ryan Williams <ry...@gmail.com>
Authored: Wed Jun 3 16:54:46 2015 -0700
Committer: Andrew Or <an...@databricks.com>
Committed: Wed Jun 3 16:54:46 2015 -0700
----------------------------------------------------------------------
.../org/apache/spark/ExecutorAllocationManager.scala | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/51898b51/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
index 9514604..f7323a4 100644
--- a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
+++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
@@ -266,10 +266,14 @@ private[spark] class ExecutorAllocationManager(
// executors and inform the cluster manager to cancel the extra pending requests
val oldNumExecutorsTarget = numExecutorsTarget
numExecutorsTarget = math.max(maxNeeded, minNumExecutors)
- client.requestTotalExecutors(numExecutorsTarget)
numExecutorsToAdd = 1
- logInfo(s"Lowering target number of executors to $numExecutorsTarget because " +
- s"not all requests are actually needed (previously $oldNumExecutorsTarget)")
+
+ // If the new target has not changed, avoid sending a message to the cluster manager
+ if (numExecutorsTarget < oldNumExecutorsTarget) {
+ client.requestTotalExecutors(numExecutorsTarget)
+ logInfo(s"Lowering target number of executors to $numExecutorsTarget (previously " +
+ s"$oldNumExecutorsTarget) because not all requested executors are actually needed")
+ }
numExecutorsTarget - oldNumExecutorsTarget
} else if (addTime != NOT_SET && now >= addTime) {
val delta = addExecutors(maxNeeded)
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org