You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@stratos.apache.org by la...@apache.org on 2014/12/26 16:31:09 UTC

[2/3] stratos git commit: send scaling over max event when required number is higher than max

send scaling over max event when required number is higher than max


Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/cf7b808a
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/cf7b808a
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/cf7b808a

Branch: refs/heads/master
Commit: cf7b808a229f2965777a0787a67b60a9f3e8766e
Parents: d8a52fc
Author: Lahiru Sandaruwan <la...@apache.org>
Authored: Fri Dec 26 18:33:10 2014 +0530
Committer: Lahiru Sandaruwan <la...@apache.org>
Committed: Fri Dec 26 20:50:02 2014 +0530

----------------------------------------------------------------------
 .../src/main/conf/drools/scaling.drl            | 21 ++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/stratos/blob/cf7b808a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 4260426..4aa81f7 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -92,8 +92,8 @@ dialect "mvel"
         averageRequestsServedPerInstance : Float() from  clusterInstanceContext.getAverageRequestsServedPerInstance()
 
         numberOfInstancesReuquiredBasedOnRif : Integer() from delegator.getNumberOfInstancesRequiredBasedOnRif(rifPredictedValue, requestsServedPerInstance, averageRequestsServedPerInstance, arspiReset)
-        numberOfInstancesReuquiredBasedOnMemoryConsumption : Integer() from delegator.getNumberOfInstancesRequiredBasedOnMemoryConsumption(mcThreshold, mcPredictedValue, clusterInstanceContext.getActiveMemberCount())
-        numberOfInstancesReuquiredBasedOnLoadAverage : Integer() from delegator.getNumberOfInstancesRequiredBasedOnLoadAverage(laThreshold, laPredictedValue, clusterInstanceContext.getActiveMemberCount())
+        numberOfInstancesReuquiredBasedOnMemoryConsumption : Integer() from delegator.getNumberOfInstancesRequiredBasedOnMemoryConsumption(mcThreshold, mcPredictedValue, activeInstancesCount)
+        numberOfInstancesReuquiredBasedOnLoadAverage : Integer() from delegator.getNumberOfInstancesRequiredBasedOnLoadAverage(laThreshold, laPredictedValue, activeInstancesCount)
 
         numberOfRequiredInstances : Integer() from delegator.getMaxNumberOfInstancesRequired(numberOfInstancesReuquiredBasedOnRif, numberOfInstancesReuquiredBasedOnMemoryConsumption ,mcReset ,numberOfInstancesReuquiredBasedOnLoadAverage, laReset) 
 
@@ -119,19 +119,24 @@ dialect "mvel"
 
 	    log.debug("[scaling] Number of required instances based on stats: " + numberOfRequiredInstances + " [active instances count] " + activeInstancesCount + " [network-partition] " + clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId);
 
-        int nonTerminatedMemberCount = clusterInstanceContext.getNonTerminatedMemberCount();
+        int nonTerminatedMembers = clusterInstanceContext.getNonTerminatedMemberCount();
         if(scaleUp){
 
             int clusterMaxMembers = clusterInstanceContext.getMaxInstanceCount();
-            if (nonTerminatedMemberCount < clusterMaxMembers) {
+            if (nonTerminatedMembers < clusterMaxMembers) {
 
                 int additionalInstances = 0;
                 if(clusterMaxMembers < numberOfRequiredInstances){
 
-                    additionalInstances = clusterMaxMembers - nonTerminatedMemberCount;
+                    additionalInstances = clusterMaxMembers - nonTerminatedMembers;
+                    log.info("[scale-up] Required member count based on stat based scaling is higher than max, hence
+                            notifying to parent for possible group scaling or app bursting.
+                            [cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
+                            " [max] " + clusterMaxMembers);
+                    delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId());
                 } else {
 
-                    additionalInstances = numberOfRequiredInstances - nonTerminatedMemberCount;
+                    additionalInstances = numberOfRequiredInstances - nonTerminatedMembers;
                 }
 
                 clusterInstanceContext.resetScaleDownRequestsCount();
@@ -162,7 +167,7 @@ dialect "mvel"
                     }
                 }
             } else {
-                log.info("[scale-up] Max is reached, hence not scaling up cluster monitor itself and
+                log.info("[scale-up] Trying to scale up over max, hence not scaling up cluster itself and
                         notifying to parent for possible group scaling or app bursting.
                         [cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
                         " [max] " + clusterMaxMembers);
@@ -170,7 +175,7 @@ dialect "mvel"
             }
         } else if(scaleDown){
 
-            if(nonTerminatedMemberCount > clusterInstanceContext.getMinInstanceCount){
+            if(nonTerminatedMembers > clusterInstanceContext.getMinInstanceCount){
 
                 log.debug("[scale-down] Decided to Scale down [cluster] " + clusterId);
                 if(clusterInstanceContext.getScaleDownRequestsCount() > 2 ){