You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by pw...@apache.org on 2014/01/11 01:26:00 UTC

[43/50] git commit: Use AtomicInteger for numRunningTasks

Use AtomicInteger for numRunningTasks


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/4de9c955
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/4de9c955
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/4de9c955

Branch: refs/heads/master
Commit: 4de9c9554ca6464b806496dbffe0ba99c0ae6b45
Parents: 2db7884
Author: Andrew Or <an...@gmail.com>
Authored: Sat Jan 4 11:16:30 2014 -0800
Committer: Andrew Or <an...@gmail.com>
Committed: Sat Jan 4 11:16:30 2014 -0800

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/SparkEnv.scala   | 19 +++++++------------
 1 file changed, 7 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/4de9c955/core/src/main/scala/org/apache/spark/SparkEnv.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkEnv.scala b/core/src/main/scala/org/apache/spark/SparkEnv.scala
index 224b5c1..b581c7b 100644
--- a/core/src/main/scala/org/apache/spark/SparkEnv.scala
+++ b/core/src/main/scala/org/apache/spark/SparkEnv.scala
@@ -17,8 +17,9 @@
 
 package org.apache.spark
 
-import collection.mutable
-import serializer.Serializer
+import java.util.concurrent.atomic.AtomicInteger
+
+import scala.collection.mutable
 
 import akka.actor._
 import akka.remote.RemoteActorRefProvider
@@ -60,7 +61,7 @@ class SparkEnv private[spark] (
   private val pythonWorkers = mutable.HashMap[(String, Map[String, String]), PythonWorkerFactory]()
 
   // Number of tasks currently running across all threads
-  @volatile private var _numRunningTasks = 0
+  private val _numRunningTasks = new AtomicInteger(0)
 
   // A general, soft-reference map for metadata needed during HadoopRDD split computation
   // (e.g., HadoopFileRDD uses this to cache JobConfs and InputFormats).
@@ -93,15 +94,9 @@ class SparkEnv private[spark] (
   /**
    * Return the number of tasks currently running across all threads
    */
-  def numRunningTasks: Int = _numRunningTasks
-
-  def incrementNumRunningTasks() = synchronized {
-    _numRunningTasks += 1
-  }
-
-  def decrementNumRunningTasks() = synchronized {
-    _numRunningTasks -= 1
-  }
+  def numRunningTasks: Int = _numRunningTasks.intValue()
+  def incrementNumRunningTasks(): Int = _numRunningTasks.incrementAndGet()
+  def decrementNumRunningTasks(): Int = _numRunningTasks.decrementAndGet()
 }
 
 object SparkEnv extends Logging {