You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openwhisk.apache.org by sl...@apache.org on 2019/07/09 12:32:00 UTC

[incubator-openwhisk] branch master updated: Add action timeout limit to invoker assignment message in load balancer / create singleton limits (#4537)

This is an automated email from the ASF dual-hosted git repository.

slange pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-openwhisk.git


The following commit(s) were added to refs/heads/master by this push:
     new b0d48c8  Add action timeout limit to invoker assignment message in load balancer / create singleton limits (#4537)
b0d48c8 is described below

commit b0d48c87816f2e7c04ad9b9f9b4844a6e064047f
Author: Sven Lange-Last <sv...@de.ibm.com>
AuthorDate: Tue Jul 9 14:31:53 2019 +0200

    Add action timeout limit to invoker assignment message in load balancer / create singleton limits (#4537)
    
    * Add information whether the action memory / time limits are standard, i.e. default, or not.
    * Restructure message and shorten it to make log volume smaller despite adding information.
    * The code uses the term "scheduling an activation to an invoker". Change message to use this term.
    * Also add more information to message that indicates that an activation could not be scheduled to an invoker because no usable invokers are available. The additional information helps to identify which particular activation was affected.
    * Correct comment on permissible range for ConcurrencyLimit values.
    * Issue error instead of warn log entry if controller's invoker shard memory size is too low
    
    
    Co-authored-by: Sugandha Agrawal <su...@gmail.com>
    Co-authored-by: Rodric Rabbah <ro...@gmail.com>
---
 .../openwhisk/core/entity/ConcurrencyLimit.scala   | 19 +++---
 .../apache/openwhisk/core/entity/LogLimit.scala    | 20 ++++---
 .../apache/openwhisk/core/entity/MemoryLimit.scala | 16 +++--
 .../apache/openwhisk/core/entity/TimeLimit.scala   |  6 +-
 .../core/loadBalancer/InvokerSupervision.scala     |  2 +-
 .../ShardingContainerPoolBalancer.scala            | 24 +++++---
 .../openwhisk/core/invoker/InvokerReactive.scala   |  2 +-
 .../containerpool/test/ContainerPoolTests.scala    | 59 +++++++++---------
 .../core/controller/test/ActionsApiTests.scala     |  6 +-
 .../openwhisk/core/entity/test/SchemaTests.scala   | 36 +++++------
 .../openwhisk/core/limits/ActionLimitsTests.scala  | 70 +++++++++++-----------
 .../openwhisk/core/limits/ConcurrencyTests.scala   |  4 +-
 .../test/ShardingContainerPoolBalancerTests.scala  | 18 +++---
 13 files changed, 157 insertions(+), 125 deletions(-)

diff --git a/common/scala/src/main/scala/org/apache/openwhisk/core/entity/ConcurrencyLimit.scala b/common/scala/src/main/scala/org/apache/openwhisk/core/entity/ConcurrencyLimit.scala
index 1370f6f..5824dd5 100644
--- a/common/scala/src/main/scala/org/apache/openwhisk/core/entity/ConcurrencyLimit.scala
+++ b/common/scala/src/main/scala/org/apache/openwhisk/core/entity/ConcurrencyLimit.scala
@@ -29,7 +29,8 @@ case class ConcurrencyLimitConfig(min: Int, max: Int, std: Int)
 
 /**
  * ConcurrencyLimit encapsulates allowed concurrency in a single container for an action. The limit must be within a
- * permissible range (by default [1, 500]).
+ * permissible range (by default [1, 1]). This default range was chosen intentionally to reflect that concurrency
+ * is disabled by default.
  *
  * It is a value type (hence == is .equals, immutable and cannot be assigned null).
  * The constructor is private so that argument requirements are checked and normalized
@@ -45,12 +46,16 @@ protected[core] object ConcurrencyLimit extends ArgNormalizer[ConcurrencyLimit]
   private val concurrencyConfig =
     loadConfigWithFallbackOrThrow[ConcurrencyLimitConfig](config, ConfigKeys.concurrencyLimit)
 
-  protected[core] val minConcurrent: Int = concurrencyConfig.min
-  protected[core] val maxConcurrent: Int = concurrencyConfig.max
-  protected[core] val stdConcurrent: Int = concurrencyConfig.std
+  /** These values are set once at the beginning. Dynamic configuration updates are not supported at the moment. */
+  protected[core] val MIN_CONCURRENT: Int = concurrencyConfig.min
+  protected[core] val MAX_CONCURRENT: Int = concurrencyConfig.max
+  protected[core] val STD_CONCURRENT: Int = concurrencyConfig.std
+
+  /** A singleton ConcurrencyLimit with default value */
+  protected[core] val standardConcurrencyLimit = ConcurrencyLimit(STD_CONCURRENT)
 
   /** Gets ConcurrencyLimit with default value */
-  protected[core] def apply(): ConcurrencyLimit = ConcurrencyLimit(stdConcurrent)
+  protected[core] def apply(): ConcurrencyLimit = standardConcurrencyLimit
 
   /**
    * Creates ConcurrencyLimit for limit, iff limit is within permissible range.
@@ -61,8 +66,8 @@ protected[core] object ConcurrencyLimit extends ArgNormalizer[ConcurrencyLimit]
    */
   @throws[IllegalArgumentException]
   protected[core] def apply(concurrency: Int): ConcurrencyLimit = {
-    require(concurrency >= minConcurrent, s"concurrency $concurrency below allowed threshold of $minConcurrent")
-    require(concurrency <= maxConcurrent, s"concurrency $concurrency exceeds allowed threshold of $maxConcurrent")
+    require(concurrency >= MIN_CONCURRENT, s"concurrency $concurrency below allowed threshold of $MIN_CONCURRENT")
+    require(concurrency <= MAX_CONCURRENT, s"concurrency $concurrency exceeds allowed threshold of $MAX_CONCURRENT")
     new ConcurrencyLimit(concurrency)
   }
 
diff --git a/common/scala/src/main/scala/org/apache/openwhisk/core/entity/LogLimit.scala b/common/scala/src/main/scala/org/apache/openwhisk/core/entity/LogLimit.scala
index be4b54c..c750394 100644
--- a/common/scala/src/main/scala/org/apache/openwhisk/core/entity/LogLimit.scala
+++ b/common/scala/src/main/scala/org/apache/openwhisk/core/entity/LogLimit.scala
@@ -37,9 +37,9 @@ case class LogLimitConfig(min: ByteSize, max: ByteSize, std: ByteSize)
  * before creating a new instance.
  *
  * Argument type is Int because of JSON deserializer vs. <code>ByteSize</code> and
- * compatibility with <code>MemoryLimit</code>
+ * compatibility with <code>MemoryLimit</code>.
  *
- * @param megabytes the memory limit in megabytes for the action
+ * @param megabytes the log limit in megabytes for the action
  */
 protected[core] class LogLimit private (val megabytes: Int) extends AnyVal {
   protected[core] def asMegaBytes: ByteSize = megabytes.megabytes
@@ -48,12 +48,16 @@ protected[core] class LogLimit private (val megabytes: Int) extends AnyVal {
 protected[core] object LogLimit extends ArgNormalizer[LogLimit] {
   val config = loadConfigOrThrow[MemoryLimitConfig](ConfigKeys.logLimit)
 
-  protected[core] val minLogSize: ByteSize = config.min
-  protected[core] val maxLogSize: ByteSize = config.max
-  protected[core] val stdLogSize: ByteSize = config.std
+  /** These values are set once at the beginning. Dynamic configuration updates are not supported at the moment. */
+  protected[core] val MIN_LOGSIZE: ByteSize = config.min
+  protected[core] val MAX_LOGSIZE: ByteSize = config.max
+  protected[core] val STD_LOGSIZE: ByteSize = config.std
+
+  /** A singleton LogLimit with default value */
+  protected[core] val standardLogLimit = LogLimit(STD_LOGSIZE)
 
   /** Gets LogLimit with default log limit */
-  protected[core] def apply(): LogLimit = LogLimit(stdLogSize)
+  protected[core] def apply(): LogLimit = standardLogLimit
 
   /**
    * Creates LogLimit for limit. Only the default limit is allowed currently.
@@ -64,8 +68,8 @@ protected[core] object LogLimit extends ArgNormalizer[LogLimit] {
    */
   @throws[IllegalArgumentException]
   protected[core] def apply(megabytes: ByteSize): LogLimit = {
-    require(megabytes >= minLogSize, s"log size $megabytes below allowed threshold of $minLogSize")
-    require(megabytes <= maxLogSize, s"log size $megabytes exceeds allowed threshold of $maxLogSize")
+    require(megabytes >= MIN_LOGSIZE, s"log size $megabytes below allowed threshold of $MIN_LOGSIZE")
+    require(megabytes <= MAX_LOGSIZE, s"log size $megabytes exceeds allowed threshold of $MAX_LOGSIZE")
     new LogLimit(megabytes.toMB.toInt)
   }
 
diff --git a/common/scala/src/main/scala/org/apache/openwhisk/core/entity/MemoryLimit.scala b/common/scala/src/main/scala/org/apache/openwhisk/core/entity/MemoryLimit.scala
index 928a31b..015c317 100644
--- a/common/scala/src/main/scala/org/apache/openwhisk/core/entity/MemoryLimit.scala
+++ b/common/scala/src/main/scala/org/apache/openwhisk/core/entity/MemoryLimit.scala
@@ -44,12 +44,16 @@ protected[entity] class MemoryLimit private (val megabytes: Int) extends AnyVal
 protected[core] object MemoryLimit extends ArgNormalizer[MemoryLimit] {
   val config = loadConfigOrThrow[MemoryLimitConfig](ConfigKeys.memory)
 
-  protected[core] val minMemory: ByteSize = config.min
-  protected[core] val maxMemory: ByteSize = config.max
-  protected[core] val stdMemory: ByteSize = config.std
+  /** These values are set once at the beginning. Dynamic configuration updates are not supported at the moment. */
+  protected[core] val MIN_MEMORY: ByteSize = config.min
+  protected[core] val MAX_MEMORY: ByteSize = config.max
+  protected[core] val STD_MEMORY: ByteSize = config.std
+
+  /** A singleton MemoryLimit with default value */
+  protected[core] val standardMemoryLimit = MemoryLimit(STD_MEMORY)
 
   /** Gets MemoryLimit with default value */
-  protected[core] def apply(): MemoryLimit = MemoryLimit(stdMemory)
+  protected[core] def apply(): MemoryLimit = standardMemoryLimit
 
   /**
    * Creates MemoryLimit for limit, iff limit is within permissible range.
@@ -60,8 +64,8 @@ protected[core] object MemoryLimit extends ArgNormalizer[MemoryLimit] {
    */
   @throws[IllegalArgumentException]
   protected[core] def apply(megabytes: ByteSize): MemoryLimit = {
-    require(megabytes >= minMemory, s"memory $megabytes below allowed threshold of $minMemory")
-    require(megabytes <= maxMemory, s"memory $megabytes exceeds allowed threshold of $maxMemory")
+    require(megabytes >= MIN_MEMORY, s"memory $megabytes below allowed threshold of $MIN_MEMORY")
+    require(megabytes <= MAX_MEMORY, s"memory $megabytes exceeds allowed threshold of $MAX_MEMORY")
     new MemoryLimit(megabytes.toMB.toInt)
   }
 
diff --git a/common/scala/src/main/scala/org/apache/openwhisk/core/entity/TimeLimit.scala b/common/scala/src/main/scala/org/apache/openwhisk/core/entity/TimeLimit.scala
index 59a643a..7c74178 100644
--- a/common/scala/src/main/scala/org/apache/openwhisk/core/entity/TimeLimit.scala
+++ b/common/scala/src/main/scala/org/apache/openwhisk/core/entity/TimeLimit.scala
@@ -49,12 +49,16 @@ case class TimeLimitConfig(max: FiniteDuration, min: FiniteDuration, std: Finite
 protected[core] object TimeLimit extends ArgNormalizer[TimeLimit] {
   val config = loadConfigOrThrow[TimeLimitConfig](ConfigKeys.timeLimit)
 
+  /** These values are set once at the beginning. Dynamic configuration updates are not supported at the moment. */
   protected[core] val MIN_DURATION: FiniteDuration = config.min
   protected[core] val MAX_DURATION: FiniteDuration = config.max
   protected[core] val STD_DURATION: FiniteDuration = config.std
 
+  /** A singleton TimeLimit with default value */
+  protected[core] val standardTimeLimit = TimeLimit(STD_DURATION)
+
   /** Gets TimeLimit with default duration */
-  protected[core] def apply(): TimeLimit = TimeLimit(STD_DURATION)
+  protected[core] def apply(): TimeLimit = standardTimeLimit
 
   /**
    * Creates TimeLimit for duration, iff duration is within permissible range.
diff --git a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala
index 736a6ab..56f04af 100644
--- a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala
+++ b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala
@@ -277,7 +277,7 @@ object InvokerPool {
         namespace = healthActionIdentity.namespace.name.toPath,
         name = EntityName(s"invokerHealthTestAction${i.asString}"),
         exec = CodeExecAsString(manifest, """function main(params) { return params; }""", None),
-        limits = ActionLimits(memory = MemoryLimit(MemoryLimit.minMemory)))
+        limits = ActionLimits(memory = MemoryLimit(MemoryLimit.MIN_MEMORY)))
     }
 }
 
diff --git a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
index 46274d9..4a57400 100644
--- a/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
+++ b/core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/ShardingContainerPoolBalancer.scala
@@ -289,9 +289,14 @@ class ShardingContainerPoolBalancer(
 
     chosen
       .map { invoker =>
+        // MemoryLimit() and TimeLimit() return singletons - they should be fast enough to be used here
+        val memoryLimit = action.limits.memory
+        val memoryLimitInfo = if (memoryLimit == MemoryLimit()) { "std" } else { "non-std" }
+        val timeLimit = action.limits.timeout
+        val timeLimitInfo = if (timeLimit == TimeLimit()) { "std" } else { "non-std" }
         logging.info(
           this,
-          s"activation ${msg.activationId} for '${msg.action.asString}' ($actionType) by namespace '${msg.user.namespace.name.asString}' with memory limit ${action.limits.memory.megabytes}MB assigned to $invoker")
+          s"scheduled activation ${msg.activationId}, action '${msg.action.asString}' ($actionType), ns '${msg.user.namespace.name.asString}', mem limit ${memoryLimit.megabytes} MB (${memoryLimitInfo}), time limit ${timeLimit.duration.toMillis} ms (${timeLimitInfo}) to ${invoker}")
         val activationResult = setupActivation(msg, action, invoker)
         sendActivationToInvoker(messageProducer, msg, invoker).map(_ => activationResult)
       }
@@ -302,7 +307,9 @@ class ShardingContainerPoolBalancer(
           agg + (curr.status -> count)
         }
 
-        logging.error(this, s"failed to schedule $actionType action, invokers to use: $invokerStates")
+        logging.error(
+          this,
+          s"failed to schedule activation ${msg.activationId}, action '${msg.action.asString}' ($actionType), ns '${msg.user.namespace.name.asString}' - invokers to use: $invokerStates")
         Future.failed(LoadBalancerException("No invokers available"))
       }
   }
@@ -474,14 +481,17 @@ case class ShardingContainerPoolBalancerState(
    * @return calculated invoker slot
    */
   private def getInvokerSlot(memory: ByteSize): ByteSize = {
-    val newTreshold = if (memory / _clusterSize < MemoryLimit.minMemory) {
-      logging.warn(
+    val invokerShardMemorySize = memory / _clusterSize
+    val newTreshold = if (invokerShardMemorySize < MemoryLimit.MIN_MEMORY) {
+      logging.error(
         this,
-        s"registered controllers: ${_clusterSize}: the slots per invoker fall below the min memory of one action.")(
+        s"registered controllers: calculated controller's invoker shard memory size falls below the min memory of one action. "
+          + s"Setting to min memory. Expect invoker overloads. Cluster size ${_clusterSize}, invoker user memory size ${memory.toMB.MB}, "
+          + s"min action memory size ${MemoryLimit.MIN_MEMORY.toMB.MB}, calculated shard size ${invokerShardMemorySize.toMB.MB}.")(
         TransactionId.loadbalancer)
-      MemoryLimit.minMemory
+      MemoryLimit.MIN_MEMORY
     } else {
-      memory / _clusterSize
+      invokerShardMemorySize
     }
     newTreshold
   }
diff --git a/core/invoker/src/main/scala/org/apache/openwhisk/core/invoker/InvokerReactive.scala b/core/invoker/src/main/scala/org/apache/openwhisk/core/invoker/InvokerReactive.scala
index 66c7189..db28703 100644
--- a/core/invoker/src/main/scala/org/apache/openwhisk/core/invoker/InvokerReactive.scala
+++ b/core/invoker/src/main/scala/org/apache/openwhisk/core/invoker/InvokerReactive.scala
@@ -131,7 +131,7 @@ class InvokerReactive(
 
   /** Initialize message consumers */
   private val topic = s"invoker${instance.toInt}"
-  private val maximumContainers = (poolConfig.userMemory / MemoryLimit.minMemory).toInt
+  private val maximumContainers = (poolConfig.userMemory / MemoryLimit.MIN_MEMORY).toInt
   private val msgProvider = SpiLoader.get[MessagingProvider]
 
   //number of peeked messages - increasing the concurrentPeekFactor improves concurrent usage, but adds risk for message loss in case of crash
diff --git a/tests/src/test/scala/org/apache/openwhisk/core/containerpool/test/ContainerPoolTests.scala b/tests/src/test/scala/org/apache/openwhisk/core/containerpool/test/ContainerPoolTests.scala
index 431b3f2..8eacda9 100644
--- a/tests/src/test/scala/org/apache/openwhisk/core/containerpool/test/ContainerPoolTests.scala
+++ b/tests/src/test/scala/org/apache/openwhisk/core/containerpool/test/ContainerPoolTests.scala
@@ -96,7 +96,7 @@ class ContainerPoolTests
   val largeAction =
     action.copy(
       name = EntityName("largeAction"),
-      limits = ActionLimits(memory = MemoryLimit(MemoryLimit.stdMemory * 2)))
+      limits = ActionLimits(memory = MemoryLimit(MemoryLimit.STD_MEMORY * 2)))
 
   val runMessage = createRunMessage(action, invocationNamespace)
   val runMessageLarge = createRunMessage(largeAction, invocationNamespace)
@@ -139,7 +139,7 @@ class ContainerPoolTests
     val (containers, factory) = testContainers(2)
     val feed = TestProbe()
     // Actions are created with default memory limit (MemoryLimit.stdMemory). This means 4 actions can be scheduled.
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 4), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
 
     pool ! runMessage
     containers(0).expectMsg(runMessage)
@@ -154,7 +154,7 @@ class ContainerPoolTests
     val (containers, factory) = testContainers(2)
     val feed = TestProbe()
     // Actions are created with default memory limit (MemoryLimit.stdMemory). This means 4 actions can be scheduled.
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 4), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
 
     pool ! runMessage
     containers(0).expectMsg(runMessage)
@@ -170,7 +170,7 @@ class ContainerPoolTests
     val feed = TestProbe()
 
     // Actions are created with default memory limit (MemoryLimit.stdMemory). This means 4 actions can be scheduled.
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 4), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
     pool ! runMessage
     containers(0).expectMsg(runMessage)
     // Note that the container doesn't respond, thus it's not free to take work
@@ -184,7 +184,7 @@ class ContainerPoolTests
     val feed = TestProbe()
 
     // a pool with only 1 slot
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY), feed.ref))
     pool ! runMessage
     containers(0).expectMsg(runMessage)
     containers(0).send(pool, NeedWork(warmedData()))
@@ -222,7 +222,7 @@ class ContainerPoolTests
     val feed = TestProbe()
 
     // a pool with only 1 active slot but 2 slots in total
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 2), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref))
 
     // Run the first container
     pool ! runMessage
@@ -248,7 +248,7 @@ class ContainerPoolTests
     val feed = TestProbe()
 
     // a pool with only 1 slot
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY), feed.ref))
     pool ! runMessage
     containers(0).expectMsg(runMessage)
     containers(0).send(pool, NeedWork(warmedData()))
@@ -263,7 +263,7 @@ class ContainerPoolTests
     val feed = TestProbe()
 
     // a pool with only 1 slot
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY), feed.ref))
     pool ! runMessage
     containers(0).expectMsg(runMessage)
     containers(0).send(pool, RescheduleJob) // emulate container failure ...
@@ -276,7 +276,7 @@ class ContainerPoolTests
     val (containers, factory) = testContainers(2)
     val feed = TestProbe()
 
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 2), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref))
 
     // Start first action
     pool ! runMessage // 1 * stdMemory taken
@@ -321,7 +321,7 @@ class ContainerPoolTests
     val pool =
       system.actorOf(
         ContainerPool
-          .props(factory, poolConfig(MemoryLimit.stdMemory), feed.ref, List(PrewarmingConfig(1, exec, memoryLimit))))
+          .props(factory, poolConfig(MemoryLimit.STD_MEMORY), feed.ref, List(PrewarmingConfig(1, exec, memoryLimit))))
     containers(0).expectMsg(Start(exec, memoryLimit))
     containers(0).send(pool, NeedWork(preWarmedData(exec.kind)))
     pool ! runMessage
@@ -338,7 +338,7 @@ class ContainerPoolTests
       ContainerPool
         .props(
           factory,
-          poolConfig(MemoryLimit.stdMemory),
+          poolConfig(MemoryLimit.STD_MEMORY),
           feed.ref,
           List(PrewarmingConfig(1, alternativeExec, memoryLimit))))
     containers(0).expectMsg(Start(alternativeExec, memoryLimit)) // container0 was prewarmed
@@ -354,8 +354,13 @@ class ContainerPoolTests
     val alternativeLimit = 128.MB
 
     val pool =
-      system.actorOf(ContainerPool
-        .props(factory, poolConfig(MemoryLimit.stdMemory), feed.ref, List(PrewarmingConfig(1, exec, alternativeLimit))))
+      system.actorOf(
+        ContainerPool
+          .props(
+            factory,
+            poolConfig(MemoryLimit.STD_MEMORY),
+            feed.ref,
+            List(PrewarmingConfig(1, exec, alternativeLimit))))
     containers(0).expectMsg(Start(exec, alternativeLimit)) // container0 was prewarmed
     containers(0).send(pool, NeedWork(preWarmedData(exec.kind, alternativeLimit)))
     pool ! runMessage
@@ -369,7 +374,7 @@ class ContainerPoolTests
     val (containers, factory) = testContainers(2)
     val feed = TestProbe()
 
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 4), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
 
     // container0 is created and used
     pool ! runMessage
@@ -399,7 +404,7 @@ class ContainerPoolTests
 
     // Pool with 512 MB usermemory
     val pool =
-      system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 2), feed.ref))
+      system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref))
 
     // Send action that blocks the pool
     pool ! runMessageLarge
@@ -431,7 +436,7 @@ class ContainerPoolTests
     val feed = TestProbe()
 
     // Pool with 512 MB usermemory
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 2), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref))
 
     // Send 4 actions to the ContainerPool (Action 0, Action 2 and Action 3 with each 265 MB and Action 1 with 512 MB)
     pool ! runMessage
@@ -485,7 +490,7 @@ class ContainerPoolTests
     val (containers, factory) = testContainers(2)
     val feed = TestProbe()
 
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 4), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
 
     // container0 is created and used
     pool ! runMessageConcurrent
@@ -509,7 +514,7 @@ class ContainerPoolTests
     val (containers, factory) = testContainers(2)
     val feed = TestProbe()
 
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 4), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
 
     // container0 is created and used
     pool ! runMessageConcurrent
@@ -525,7 +530,7 @@ class ContainerPoolTests
     val (containers, factory) = testContainers(2)
     val feed = TestProbe()
 
-    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.stdMemory * 4), feed.ref))
+    val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
 
     // container0 is created and used
     pool ! runMessageConcurrent
@@ -755,24 +760,24 @@ class ContainerPoolObjectTests extends FlatSpec with Matchers with MockFactory {
   behavior of "ContainerPool remove()"
 
   it should "not provide a container if pool is empty" in {
-    ContainerPool.remove(Map.empty, MemoryLimit.stdMemory) shouldBe List.empty
+    ContainerPool.remove(Map.empty, MemoryLimit.STD_MEMORY) shouldBe List.empty
   }
 
   it should "not provide a container from busy pool with non-warm containers" in {
     val pool = Map('none -> noData(), 'pre -> preWarmedData())
-    ContainerPool.remove(pool, MemoryLimit.stdMemory) shouldBe List.empty
+    ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List.empty
   }
 
   it should "not provide a container from pool if there is not enough capacity" in {
     val pool = Map('first -> warmedData())
 
-    ContainerPool.remove(pool, MemoryLimit.stdMemory * 2) shouldBe List.empty
+    ContainerPool.remove(pool, MemoryLimit.STD_MEMORY * 2) shouldBe List.empty
   }
 
   it should "provide a container from pool with one single free container" in {
     val data = warmedData()
     val pool = Map('warm -> data)
-    ContainerPool.remove(pool, MemoryLimit.stdMemory) shouldBe List('warm)
+    ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List('warm)
   }
 
   it should "provide oldest container from busy pool with multiple containers" in {
@@ -783,7 +788,7 @@ class ContainerPoolObjectTests extends FlatSpec with Matchers with MockFactory {
 
     val pool = Map('first -> first, 'second -> second, 'oldest -> oldest)
 
-    ContainerPool.remove(pool, MemoryLimit.stdMemory) shouldBe List('oldest)
+    ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List('oldest)
   }
 
   it should "provide a list of the oldest containers from pool, if several containers have to be removed" in {
@@ -795,7 +800,7 @@ class ContainerPoolObjectTests extends FlatSpec with Matchers with MockFactory {
 
     val pool = Map('first -> first, 'second -> second, 'third -> third, 'oldest -> oldest)
 
-    ContainerPool.remove(pool, MemoryLimit.stdMemory * 2) shouldBe List('oldest, 'first)
+    ContainerPool.remove(pool, MemoryLimit.STD_MEMORY * 2) shouldBe List('oldest, 'first)
   }
 
   it should "provide oldest container (excluding concurrently busy) from busy pool with multiple containers" in {
@@ -805,8 +810,8 @@ class ContainerPoolObjectTests extends FlatSpec with Matchers with MockFactory {
     val oldest = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(0), active = 3)
 
     var pool = Map('first -> first, 'second -> second, 'oldest -> oldest)
-    ContainerPool.remove(pool, MemoryLimit.stdMemory) shouldBe List('first)
+    ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List('first)
     pool = pool - 'first
-    ContainerPool.remove(pool, MemoryLimit.stdMemory) shouldBe List('second)
+    ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List('second)
   }
 }
diff --git a/tests/src/test/scala/org/apache/openwhisk/core/controller/test/ActionsApiTests.scala b/tests/src/test/scala/org/apache/openwhisk/core/controller/test/ActionsApiTests.scala
index 20e3cc4..1d1c47b 100644
--- a/tests/src/test/scala/org/apache/openwhisk/core/controller/test/ActionsApiTests.scala
+++ b/tests/src/test/scala/org/apache/openwhisk/core/controller/test/ActionsApiTests.scala
@@ -1292,9 +1292,9 @@ class ActionsApiTests extends ControllerTestCommon with WhiskActionsApi {
       Some(
         ActionLimitsOption(
           Some(TimeLimit(TimeLimit.MAX_DURATION)),
-          Some(MemoryLimit(MemoryLimit.maxMemory)),
-          Some(LogLimit(LogLimit.maxLogSize)),
-          Some(ConcurrencyLimit(ConcurrencyLimit.maxConcurrent)))))
+          Some(MemoryLimit(MemoryLimit.MAX_MEMORY)),
+          Some(LogLimit(LogLimit.MAX_LOGSIZE)),
+          Some(ConcurrencyLimit(ConcurrencyLimit.MAX_CONCURRENT)))))
     put(entityStore, action)
     Put(s"$collectionPath/${action.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
       deleteAction(action.docid)
diff --git a/tests/src/test/scala/org/apache/openwhisk/core/entity/test/SchemaTests.scala b/tests/src/test/scala/org/apache/openwhisk/core/entity/test/SchemaTests.scala
index 044e44c..fa8eaaf 100644
--- a/tests/src/test/scala/org/apache/openwhisk/core/entity/test/SchemaTests.scala
+++ b/tests/src/test/scala/org/apache/openwhisk/core/entity/test/SchemaTests.scala
@@ -698,18 +698,18 @@ class SchemaTests extends FlatSpec with BeforeAndAfter with ExecHelpers with Mat
     val json = Seq[JsValue](
       JsObject(
         "timeout" -> TimeLimit.STD_DURATION.toMillis.toInt.toJson,
-        "memory" -> MemoryLimit.stdMemory.toMB.toInt.toJson,
-        "logs" -> LogLimit.stdLogSize.toMB.toInt.toJson,
-        "concurrency" -> ConcurrencyLimit.stdConcurrent.toInt.toJson),
+        "memory" -> MemoryLimit.STD_MEMORY.toMB.toInt.toJson,
+        "logs" -> LogLimit.STD_LOGSIZE.toMB.toInt.toJson,
+        "concurrency" -> ConcurrencyLimit.STD_CONCURRENT.toInt.toJson),
       JsObject(
         "timeout" -> TimeLimit.STD_DURATION.toMillis.toInt.toJson,
-        "memory" -> MemoryLimit.stdMemory.toMB.toInt.toJson,
-        "logs" -> LogLimit.stdLogSize.toMB.toInt.toJson,
-        "concurrency" -> ConcurrencyLimit.stdConcurrent.toInt.toJson,
+        "memory" -> MemoryLimit.STD_MEMORY.toMB.toInt.toJson,
+        "logs" -> LogLimit.STD_LOGSIZE.toMB.toInt.toJson,
+        "concurrency" -> ConcurrencyLimit.STD_CONCURRENT.toInt.toJson,
         "foo" -> "bar".toJson),
       JsObject(
         "timeout" -> TimeLimit.STD_DURATION.toMillis.toInt.toJson,
-        "memory" -> MemoryLimit.stdMemory.toMB.toInt.toJson))
+        "memory" -> MemoryLimit.STD_MEMORY.toMB.toInt.toJson))
     val limits = json.map(ActionLimits.serdes.read)
     assert(limits(0) == ActionLimits())
     assert(limits(1) == ActionLimits())
@@ -725,19 +725,19 @@ class SchemaTests extends FlatSpec with BeforeAndAfter with ExecHelpers with Mat
       JsObject.empty,
       JsNull,
       JsObject("timeout" -> TimeLimit.STD_DURATION.toMillis.toInt.toJson),
-      JsObject("memory" -> MemoryLimit.stdMemory.toMB.toInt.toJson),
-      JsObject("logs" -> (LogLimit.stdLogSize.toMB.toInt + 1).toJson),
+      JsObject("memory" -> MemoryLimit.STD_MEMORY.toMB.toInt.toJson),
+      JsObject("logs" -> (LogLimit.STD_LOGSIZE.toMB.toInt + 1).toJson),
       JsObject(
         "TIMEOUT" -> TimeLimit.STD_DURATION.toMillis.toInt.toJson,
-        "MEMORY" -> MemoryLimit.stdMemory.toMB.toInt.toJson),
+        "MEMORY" -> MemoryLimit.STD_MEMORY.toMB.toInt.toJson),
       JsObject(
         "timeout" -> (TimeLimit.STD_DURATION.toMillis.toDouble + .01).toJson,
-        "memory" -> (MemoryLimit.stdMemory.toMB.toDouble + .01).toJson),
+        "memory" -> (MemoryLimit.STD_MEMORY.toMB.toDouble + .01).toJson),
       JsObject("timeout" -> null, "memory" -> null),
       JsObject("timeout" -> JsNull, "memory" -> JsNull),
       JsObject(
         "timeout" -> TimeLimit.STD_DURATION.toMillis.toString.toJson,
-        "memory" -> MemoryLimit.stdMemory.toMB.toInt.toString.toJson))
+        "memory" -> MemoryLimit.STD_MEMORY.toMB.toInt.toString.toJson))
 
     limits.foreach { p =>
       a[DeserializationException] should be thrownBy ActionLimits.serdes.read(p)
@@ -773,17 +773,17 @@ class SchemaTests extends FlatSpec with BeforeAndAfter with ExecHelpers with Mat
       LogLimit())
     an[IllegalArgumentException] should be thrownBy ActionLimits(
       TimeLimit(),
-      MemoryLimit(MemoryLimit.minMemory - 1.B),
+      MemoryLimit(MemoryLimit.MIN_MEMORY - 1.B),
       LogLimit())
     an[IllegalArgumentException] should be thrownBy ActionLimits(
       TimeLimit(),
       MemoryLimit(),
-      LogLimit(LogLimit.minLogSize - 1.B))
+      LogLimit(LogLimit.MIN_LOGSIZE - 1.B))
     an[IllegalArgumentException] should be thrownBy ActionLimits(
       TimeLimit(),
       MemoryLimit(),
       LogLimit(),
-      ConcurrencyLimit(ConcurrencyLimit.minConcurrent - 1))
+      ConcurrencyLimit(ConcurrencyLimit.MIN_CONCURRENT - 1))
 
     an[IllegalArgumentException] should be thrownBy ActionLimits(
       TimeLimit(TimeLimit.MAX_DURATION + 1.millisecond),
@@ -791,17 +791,17 @@ class SchemaTests extends FlatSpec with BeforeAndAfter with ExecHelpers with Mat
       LogLimit())
     an[IllegalArgumentException] should be thrownBy ActionLimits(
       TimeLimit(),
-      MemoryLimit(MemoryLimit.maxMemory + 1.B),
+      MemoryLimit(MemoryLimit.MAX_MEMORY + 1.B),
       LogLimit())
     an[IllegalArgumentException] should be thrownBy ActionLimits(
       TimeLimit(),
       MemoryLimit(),
-      LogLimit(LogLimit.maxLogSize + 1.B))
+      LogLimit(LogLimit.MAX_LOGSIZE + 1.B))
     an[IllegalArgumentException] should be thrownBy ActionLimits(
       TimeLimit(),
       MemoryLimit(),
       LogLimit(),
-      ConcurrencyLimit(ConcurrencyLimit.maxConcurrent + 1))
+      ConcurrencyLimit(ConcurrencyLimit.MAX_CONCURRENT + 1))
   }
 
   it should "parse activation id as uuid" in {
diff --git a/tests/src/test/scala/org/apache/openwhisk/core/limits/ActionLimitsTests.scala b/tests/src/test/scala/org/apache/openwhisk/core/limits/ActionLimitsTests.scala
index 14acb9a..62d3508 100644
--- a/tests/src/test/scala/org/apache/openwhisk/core/limits/ActionLimitsTests.scala
+++ b/tests/src/test/scala/org/apache/openwhisk/core/limits/ActionLimitsTests.scala
@@ -97,32 +97,32 @@ class ActionLimitsTests extends TestHelpers with WskTestHelpers with WskActorSys
     }
 
     val toMemoryString = memory match {
-      case None                                   => "None"
-      case Some(MemoryLimit.minMemory)            => s"${MemoryLimit.minMemory.toMB.MB} (= min)"
-      case Some(MemoryLimit.stdMemory)            => s"${MemoryLimit.stdMemory.toMB.MB} (= std)"
-      case Some(MemoryLimit.maxMemory)            => s"${MemoryLimit.maxMemory.toMB.MB} (= max)"
-      case Some(m) if (m < MemoryLimit.minMemory) => s"${m.toMB.MB} (< min)"
-      case Some(m) if (m > MemoryLimit.maxMemory) => s"${m.toMB.MB} (> max)"
-      case Some(m)                                => s"${m.toMB.MB} (allowed)"
+      case None                                    => "None"
+      case Some(MemoryLimit.MIN_MEMORY)            => s"${MemoryLimit.MIN_MEMORY.toMB.MB} (= min)"
+      case Some(MemoryLimit.STD_MEMORY)            => s"${MemoryLimit.STD_MEMORY.toMB.MB} (= std)"
+      case Some(MemoryLimit.MAX_MEMORY)            => s"${MemoryLimit.MAX_MEMORY.toMB.MB} (= max)"
+      case Some(m) if (m < MemoryLimit.MIN_MEMORY) => s"${m.toMB.MB} (< min)"
+      case Some(m) if (m > MemoryLimit.MAX_MEMORY) => s"${m.toMB.MB} (> max)"
+      case Some(m)                                 => s"${m.toMB.MB} (allowed)"
     }
 
     val toLogsString = logs match {
-      case None                                 => "None"
-      case Some(LogLimit.minLogSize)            => s"${LogLimit.minLogSize} (= min)"
-      case Some(LogLimit.stdLogSize)            => s"${LogLimit.stdLogSize} (= std)"
-      case Some(LogLimit.maxLogSize)            => s"${LogLimit.maxLogSize} (= max)"
-      case Some(l) if (l < LogLimit.minLogSize) => s"${l} (< min)"
-      case Some(l) if (l > LogLimit.maxLogSize) => s"${l} (> max)"
-      case Some(l)                              => s"${l} (allowed)"
+      case None                                  => "None"
+      case Some(LogLimit.MIN_LOGSIZE)            => s"${LogLimit.MIN_LOGSIZE} (= min)"
+      case Some(LogLimit.STD_LOGSIZE)            => s"${LogLimit.STD_LOGSIZE} (= std)"
+      case Some(LogLimit.MAX_LOGSIZE)            => s"${LogLimit.MAX_LOGSIZE} (= max)"
+      case Some(l) if (l < LogLimit.MIN_LOGSIZE) => s"${l} (< min)"
+      case Some(l) if (l > LogLimit.MAX_LOGSIZE) => s"${l} (> max)"
+      case Some(l)                               => s"${l} (allowed)"
     }
     val toConcurrencyString = concurrency match {
-      case None                                            => "None"
-      case Some(ConcurrencyLimit.minConcurrent)            => s"${ConcurrencyLimit.minConcurrent} (= min)"
-      case Some(ConcurrencyLimit.stdConcurrent)            => s"${ConcurrencyLimit.stdConcurrent} (= std)"
-      case Some(ConcurrencyLimit.maxConcurrent)            => s"${ConcurrencyLimit.maxConcurrent} (= max)"
-      case Some(c) if (c < ConcurrencyLimit.minConcurrent) => s"${c} (< min)"
-      case Some(c) if (c > ConcurrencyLimit.maxConcurrent) => s"${c} (> max)"
-      case Some(c)                                         => s"${c} (allowed)"
+      case None                                             => "None"
+      case Some(ConcurrencyLimit.MIN_CONCURRENT)            => s"${ConcurrencyLimit.MIN_CONCURRENT} (= min)"
+      case Some(ConcurrencyLimit.STD_CONCURRENT)            => s"${ConcurrencyLimit.STD_CONCURRENT} (= std)"
+      case Some(ConcurrencyLimit.MAX_CONCURRENT)            => s"${ConcurrencyLimit.MAX_CONCURRENT} (= max)"
+      case Some(c) if (c < ConcurrencyLimit.MIN_CONCURRENT) => s"${c} (< min)"
+      case Some(c) if (c > ConcurrencyLimit.MAX_CONCURRENT) => s"${c} (> max)"
+      case Some(c)                                          => s"${c} (allowed)"
     }
     val toExpectedResultString: String = if (ec == SUCCESS_EXIT) "allow" else "reject"
   }
@@ -132,12 +132,12 @@ class ActionLimitsTests extends TestHelpers with WskTestHelpers with WskActorSys
   val perms = { // Assert for valid permutations that the values are set correctly
     for {
       time <- Seq(None, Some(TimeLimit.MIN_DURATION), Some(TimeLimit.MAX_DURATION))
-      mem <- Seq(None, Some(MemoryLimit.minMemory), Some(MemoryLimit.maxMemory))
-      log <- Seq(None, Some(LogLimit.minLogSize), Some(LogLimit.maxLogSize))
-      concurrency <- if (!concurrencyEnabled || (ConcurrencyLimit.minConcurrent == ConcurrencyLimit.maxConcurrent)) {
-        Seq(None, Some(ConcurrencyLimit.minConcurrent))
+      mem <- Seq(None, Some(MemoryLimit.MIN_MEMORY), Some(MemoryLimit.MAX_MEMORY))
+      log <- Seq(None, Some(LogLimit.MIN_LOGSIZE), Some(LogLimit.MAX_LOGSIZE))
+      concurrency <- if (!concurrencyEnabled || (ConcurrencyLimit.MIN_CONCURRENT == ConcurrencyLimit.MAX_CONCURRENT)) {
+        Seq(None, Some(ConcurrencyLimit.MIN_CONCURRENT))
       } else {
-        Seq(None, Some(ConcurrencyLimit.minConcurrent), Some(ConcurrencyLimit.maxConcurrent))
+        Seq(None, Some(ConcurrencyLimit.MIN_CONCURRENT), Some(ConcurrencyLimit.MAX_CONCURRENT))
       }
     } yield PermutationTestParameter(time, mem, log, concurrency)
   } ++
@@ -148,9 +148,9 @@ class ActionLimitsTests extends TestHelpers with WskTestHelpers with WskActorSys
       PermutationTestParameter(Some(TimeLimit.MAX_DURATION * 10), None, None, None, BAD_REQUEST), // timeout that is much higher than allowed
       PermutationTestParameter(None, Some(0.MB), None, None, BAD_REQUEST), // memory limit that is lower than allowed
       PermutationTestParameter(None, None, None, Some(0), BAD_REQUEST), // concurrency limit that is lower than allowed
-      PermutationTestParameter(None, Some(MemoryLimit.maxMemory + 1.MB), None, None, BAD_REQUEST), // memory limit that is slightly higher than allowed
-      PermutationTestParameter(None, Some((MemoryLimit.maxMemory.toMB * 5).MB), None, None, BAD_REQUEST), // memory limit that is much higher than allowed
-      PermutationTestParameter(None, None, Some((LogLimit.maxLogSize.toMB * 5).MB), None, BAD_REQUEST), // log size limit that is much higher than allowed
+      PermutationTestParameter(None, Some(MemoryLimit.MAX_MEMORY + 1.MB), None, None, BAD_REQUEST), // memory limit that is slightly higher than allowed
+      PermutationTestParameter(None, Some((MemoryLimit.MAX_MEMORY.toMB * 5).MB), None, None, BAD_REQUEST), // memory limit that is much higher than allowed
+      PermutationTestParameter(None, None, Some((LogLimit.MAX_LOGSIZE.toMB * 5).MB), None, BAD_REQUEST), // log size limit that is much higher than allowed
       PermutationTestParameter(None, None, None, Some(Int.MaxValue), BAD_REQUEST)) // concurrency limit that is much higher than allowed
 
   /**
@@ -170,9 +170,9 @@ class ActionLimitsTests extends TestHelpers with WskTestHelpers with WskActorSys
       // Limits to assert, standard values if CLI omits certain values
       val limits = JsObject(
         "timeout" -> parm.timeout.getOrElse(TimeLimit.STD_DURATION).toMillis.toJson,
-        "memory" -> parm.memory.getOrElse(MemoryLimit.stdMemory).toMB.toInt.toJson,
-        "logs" -> parm.logs.getOrElse(LogLimit.stdLogSize).toMB.toInt.toJson,
-        "concurrency" -> parm.concurrency.getOrElse(ConcurrencyLimit.stdConcurrent).toJson)
+        "memory" -> parm.memory.getOrElse(MemoryLimit.STD_MEMORY).toMB.toInt.toJson,
+        "logs" -> parm.logs.getOrElse(LogLimit.STD_LOGSIZE).toMB.toInt.toJson,
+        "concurrency" -> parm.concurrency.getOrElse(ConcurrencyLimit.STD_CONCURRENT).toJson)
 
       val name = "ActionLimitTests-" + Instant.now.toEpochMilli
       val createResult = assetHelper.withCleaner(wsk.action, name, confirmDelete = (parm.ec == SUCCESS_EXIT)) {
@@ -436,7 +436,7 @@ class ActionLimitsTests extends TestHelpers with WskTestHelpers with WskActorSys
 
   it should "be able to run a memory intensive actions" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
     val name = "TestNodeJsInvokeHighMemory"
-    val allowedMemory = MemoryLimit.maxMemory
+    val allowedMemory = MemoryLimit.MAX_MEMORY
     assetHelper.withCleaner(wsk.action, name, confirmDelete = true) {
       val actionName = TestUtils.getTestActionFilename("memoryWithGC.js")
       (action, _) =>
@@ -453,13 +453,13 @@ class ActionLimitsTests extends TestHelpers with WskTestHelpers with WskActorSys
   it should "be aborted when exceeding its memory limits" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
     val name = "TestNodeJsMemoryExceeding"
     assetHelper.withCleaner(wsk.action, name, confirmDelete = true) {
-      val allowedMemory = MemoryLimit.minMemory
+      val allowedMemory = MemoryLimit.MIN_MEMORY
       val actionName = TestUtils.getTestActionFilename("memoryWithGC.js")
       (action, _) =>
         action.create(name, Some(actionName), memory = Some(allowedMemory))
     }
 
-    val payload = MemoryLimit.minMemory.toMB * 2
+    val payload = MemoryLimit.MIN_MEMORY.toMB * 2
     val run = wsk.action.invoke(name, Map("payload" -> payload.toJson))
     withActivation(wsk.activation, run) {
       _.response.result.get.fields("error") shouldBe Messages.memoryExhausted.toJson
diff --git a/tests/src/test/scala/org/apache/openwhisk/core/limits/ConcurrencyTests.scala b/tests/src/test/scala/org/apache/openwhisk/core/limits/ConcurrencyTests.scala
index eb0e110..d0a43cf 100644
--- a/tests/src/test/scala/org/apache/openwhisk/core/limits/ConcurrencyTests.scala
+++ b/tests/src/test/scala/org/apache/openwhisk/core/limits/ConcurrencyTests.scala
@@ -75,7 +75,7 @@ class ConcurrencyTests extends TestHelpers with WskTestHelpers with WskActorSyst
 
       //read configs to determine max concurrency support - currently based on single invoker and invokerUserMemory config
       val busyThreshold =
-        (loadConfigOrThrow[ContainerPoolConfig](ConfigKeys.containerPool).userMemory / MemoryLimit.stdMemory).toInt
+        (loadConfigOrThrow[ContainerPoolConfig](ConfigKeys.containerPool).userMemory / MemoryLimit.STD_MEMORY).toInt
 
       //run maximum allowed concurrent actions via Futures
       val requestCount = busyThreshold
@@ -124,7 +124,7 @@ class ConcurrencyTests extends TestHelpers with WskTestHelpers with WskActorSyst
 
       //read configs to determine max concurrency support - currently based on single invoker and invokerUserMemory config
       val busyThreshold =
-        (loadConfigOrThrow[ContainerPoolConfig](ConfigKeys.containerPool).userMemory / MemoryLimit.stdMemory).toInt
+        (loadConfigOrThrow[ContainerPoolConfig](ConfigKeys.containerPool).userMemory / MemoryLimit.STD_MEMORY).toInt
 
       //run maximum allowed concurrent actions via Futures
       val requestCount = busyThreshold
diff --git a/tests/src/test/scala/org/apache/openwhisk/core/loadBalancer/test/ShardingContainerPoolBalancerTests.scala b/tests/src/test/scala/org/apache/openwhisk/core/loadBalancer/test/ShardingContainerPoolBalancerTests.scala
index 42ddbcf..2447c08 100644
--- a/tests/src/test/scala/org/apache/openwhisk/core/loadBalancer/test/ShardingContainerPoolBalancerTests.scala
+++ b/tests/src/test/scala/org/apache/openwhisk/core/loadBalancer/test/ShardingContainerPoolBalancerTests.scala
@@ -101,7 +101,7 @@ class ShardingContainerPoolBalancerTests
   it should "update invoker's state, growing the slots data and keeping valid old data" in {
     // start empty
     val slots = 10
-    val memoryPerSlot = MemoryLimit.minMemory
+    val memoryPerSlot = MemoryLimit.MIN_MEMORY
     val memory = memoryPerSlot * slots
     val state = ShardingContainerPoolBalancerState()(lbConfig(0.5))
     state.invokers shouldBe 'empty
@@ -148,7 +148,7 @@ class ShardingContainerPoolBalancerTests
       val state = ShardingContainerPoolBalancerState()(lbConfig(bf))
 
       (1 to 100).toSeq.foreach { i =>
-        state.updateInvokers((1 to i).map(_ => healthy(1, MemoryLimit.stdMemory)))
+        state.updateInvokers((1 to i).map(_ => healthy(1, MemoryLimit.STD_MEMORY)))
 
         withClue(s"invoker count $bf $i:") {
           state.managedInvokers.length should be <= i
@@ -174,7 +174,7 @@ class ShardingContainerPoolBalancerTests
 
     val state = ShardingContainerPoolBalancerState()(lbConfig(1.0, Some(1.0)))
     (1 to 100).foreach { i =>
-      state.updateInvokers((1 to i).map(_ => healthy(1, MemoryLimit.stdMemory)))
+      state.updateInvokers((1 to i).map(_ => healthy(1, MemoryLimit.STD_MEMORY)))
     }
 
     state.managedInvokers should have size 100
@@ -185,7 +185,7 @@ class ShardingContainerPoolBalancerTests
 
   it should "update the cluster size, adjusting the invoker slots accordingly" in {
     val slots = 10
-    val memoryPerSlot = MemoryLimit.minMemory
+    val memoryPerSlot = MemoryLimit.MIN_MEMORY
     val memory = memoryPerSlot * slots
     val state = ShardingContainerPoolBalancerState()(lbConfig(0.5))
     state.updateInvokers(IndexedSeq(healthy(0, memory), healthy(1, memory * 2)))
@@ -203,7 +203,7 @@ class ShardingContainerPoolBalancerTests
 
   it should "fallback to a size of 1 (alone) if cluster size is < 1" in {
     val slots = 10
-    val memoryPerSlot = MemoryLimit.minMemory
+    val memoryPerSlot = MemoryLimit.MIN_MEMORY
     val memory = memoryPerSlot * slots
     val state = ShardingContainerPoolBalancerState()(lbConfig(0.5))
     state.updateInvokers(IndexedSeq(healthy(0, memory)))
@@ -222,7 +222,7 @@ class ShardingContainerPoolBalancerTests
 
   it should "set the threshold to 1 if the cluster is bigger than there are slots on 1 invoker" in {
     val slots = 10
-    val memoryPerSlot = MemoryLimit.minMemory
+    val memoryPerSlot = MemoryLimit.MIN_MEMORY
     val memory = memoryPerSlot * slots
     val state = ShardingContainerPoolBalancerState()(lbConfig(0.5))
     state.updateInvokers(IndexedSeq(healthy(0, memory)))
@@ -231,7 +231,7 @@ class ShardingContainerPoolBalancerTests
 
     state.updateCluster(20)
 
-    state.invokerSlots.head.availablePermits shouldBe MemoryLimit.minMemory.toMB
+    state.invokerSlots.head.availablePermits shouldBe MemoryLimit.MIN_MEMORY.toMB
   }
   val namespace = EntityPath("testspace")
   val name = EntityName("testname")
@@ -247,7 +247,7 @@ class ShardingContainerPoolBalancerTests
       fqn,
       IndexedSeq.empty,
       IndexedSeq.empty,
-      MemoryLimit.minMemory.toMB.toInt,
+      MemoryLimit.MIN_MEMORY.toMB.toInt,
       index = 0,
       step = 2) shouldBe None
   }
@@ -262,7 +262,7 @@ class ShardingContainerPoolBalancerTests
       fqn,
       invokers,
       invokerSlots,
-      MemoryLimit.minMemory.toMB.toInt,
+      MemoryLimit.MIN_MEMORY.toMB.toInt,
       index = 0,
       step = 2) shouldBe None
   }