You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@stratos.apache.org by la...@apache.org on 2014/12/26 16:31:08 UTC
[1/3] stratos git commit: improve scaling up logic in scaling rule
Repository: stratos
Updated Branches:
refs/heads/master 9cd4e89b1 -> 35f156d48
improve scaling up logic in scaling rule
Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/d8a52fc0
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/d8a52fc0
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/d8a52fc0
Branch: refs/heads/master
Commit: d8a52fc04d05ca91c3c223ed9ca94de6e330635c
Parents: 9cd4e89
Author: Lahiru Sandaruwan <la...@apache.org>
Authored: Fri Dec 26 14:18:28 2014 +0530
Committer: Lahiru Sandaruwan <la...@apache.org>
Committed: Fri Dec 26 20:50:01 2014 +0530
----------------------------------------------------------------------
.../src/main/conf/drools/scaling.drl | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/stratos/blob/d8a52fc0/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 3af79c0..4260426 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -119,9 +119,21 @@ dialect "mvel"
log.debug("[scaling] Number of required instances based on stats: " + numberOfRequiredInstances + " [active instances count] " + activeInstancesCount + " [network-partition] " + clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId);
+ int nonTerminatedMemberCount = clusterInstanceContext.getNonTerminatedMemberCount();
if(scaleUp){
- if (clusterInstanceContext.getNonTerminatedMemberCount() < clusterInstanceContext.getMaxInstanceCount()) {
- int additionalInstances = numberOfRequiredInstances - activeInstancesCount ;
+
+ int clusterMaxMembers = clusterInstanceContext.getMaxInstanceCount();
+ if (nonTerminatedMemberCount < clusterMaxMembers) {
+
+ int additionalInstances = 0;
+ if(clusterMaxMembers < numberOfRequiredInstances){
+
+ additionalInstances = clusterMaxMembers - nonTerminatedMemberCount;
+ } else {
+
+ additionalInstances = numberOfRequiredInstances - nonTerminatedMemberCount;
+ }
+
clusterInstanceContext.resetScaleDownRequestsCount();
log.debug("[scale-up] " + " has scaling dependents " + clusterInstanceContext.hasScalingDependants() + " [cluster] " + clusterId );
@@ -153,12 +165,12 @@ dialect "mvel"
log.info("[scale-up] Max is reached, hence not scaling up cluster monitor itself and
notifying to parent for possible group scaling or app bursting.
[cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
- " [max] " + clusterInstanceContext.getMaxInstanceCount());
+ " [max] " + clusterMaxMembers);
delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId());
}
} else if(scaleDown){
- if(clusterInstanceContext.getNonTerminatedMemberCount() > clusterInstanceContext.getMinInstanceCount){
+ if(nonTerminatedMemberCount > clusterInstanceContext.getMinInstanceCount){
log.debug("[scale-down] Decided to Scale down [cluster] " + clusterId);
if(clusterInstanceContext.getScaleDownRequestsCount() > 2 ){
[2/3] stratos git commit: send scaling over max event when required
number is higher than max
Posted by la...@apache.org.
send scaling over max event when required number is higher than max
Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/cf7b808a
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/cf7b808a
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/cf7b808a
Branch: refs/heads/master
Commit: cf7b808a229f2965777a0787a67b60a9f3e8766e
Parents: d8a52fc
Author: Lahiru Sandaruwan <la...@apache.org>
Authored: Fri Dec 26 18:33:10 2014 +0530
Committer: Lahiru Sandaruwan <la...@apache.org>
Committed: Fri Dec 26 20:50:02 2014 +0530
----------------------------------------------------------------------
.../src/main/conf/drools/scaling.drl | 21 ++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/stratos/blob/cf7b808a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 4260426..4aa81f7 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -92,8 +92,8 @@ dialect "mvel"
averageRequestsServedPerInstance : Float() from clusterInstanceContext.getAverageRequestsServedPerInstance()
numberOfInstancesReuquiredBasedOnRif : Integer() from delegator.getNumberOfInstancesRequiredBasedOnRif(rifPredictedValue, requestsServedPerInstance, averageRequestsServedPerInstance, arspiReset)
- numberOfInstancesReuquiredBasedOnMemoryConsumption : Integer() from delegator.getNumberOfInstancesRequiredBasedOnMemoryConsumption(mcThreshold, mcPredictedValue, clusterInstanceContext.getActiveMemberCount())
- numberOfInstancesReuquiredBasedOnLoadAverage : Integer() from delegator.getNumberOfInstancesRequiredBasedOnLoadAverage(laThreshold, laPredictedValue, clusterInstanceContext.getActiveMemberCount())
+ numberOfInstancesReuquiredBasedOnMemoryConsumption : Integer() from delegator.getNumberOfInstancesRequiredBasedOnMemoryConsumption(mcThreshold, mcPredictedValue, activeInstancesCount)
+ numberOfInstancesReuquiredBasedOnLoadAverage : Integer() from delegator.getNumberOfInstancesRequiredBasedOnLoadAverage(laThreshold, laPredictedValue, activeInstancesCount)
numberOfRequiredInstances : Integer() from delegator.getMaxNumberOfInstancesRequired(numberOfInstancesReuquiredBasedOnRif, numberOfInstancesReuquiredBasedOnMemoryConsumption ,mcReset ,numberOfInstancesReuquiredBasedOnLoadAverage, laReset)
@@ -119,19 +119,24 @@ dialect "mvel"
log.debug("[scaling] Number of required instances based on stats: " + numberOfRequiredInstances + " [active instances count] " + activeInstancesCount + " [network-partition] " + clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId);
- int nonTerminatedMemberCount = clusterInstanceContext.getNonTerminatedMemberCount();
+ int nonTerminatedMembers = clusterInstanceContext.getNonTerminatedMemberCount();
if(scaleUp){
int clusterMaxMembers = clusterInstanceContext.getMaxInstanceCount();
- if (nonTerminatedMemberCount < clusterMaxMembers) {
+ if (nonTerminatedMembers < clusterMaxMembers) {
int additionalInstances = 0;
if(clusterMaxMembers < numberOfRequiredInstances){
- additionalInstances = clusterMaxMembers - nonTerminatedMemberCount;
+ additionalInstances = clusterMaxMembers - nonTerminatedMembers;
+ log.info("[scale-up] Required member count based on stat based scaling is higher than max, hence
+ notifying to parent for possible group scaling or app bursting.
+ [cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
+ " [max] " + clusterMaxMembers);
+ delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId());
} else {
- additionalInstances = numberOfRequiredInstances - nonTerminatedMemberCount;
+ additionalInstances = numberOfRequiredInstances - nonTerminatedMembers;
}
clusterInstanceContext.resetScaleDownRequestsCount();
@@ -162,7 +167,7 @@ dialect "mvel"
}
}
} else {
- log.info("[scale-up] Max is reached, hence not scaling up cluster monitor itself and
+ log.info("[scale-up] Trying to scale up over max, hence not scaling up cluster itself and
notifying to parent for possible group scaling or app bursting.
[cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
" [max] " + clusterMaxMembers);
@@ -170,7 +175,7 @@ dialect "mvel"
}
} else if(scaleDown){
- if(nonTerminatedMemberCount > clusterInstanceContext.getMinInstanceCount){
+ if(nonTerminatedMembers > clusterInstanceContext.getMinInstanceCount){
log.debug("[scale-down] Decided to Scale down [cluster] " + clusterId);
if(clusterInstanceContext.getScaleDownRequestsCount() > 2 ){
[3/3] stratos git commit: Limiting the instance count to max in
dependent scaling up
Posted by la...@apache.org.
Limiting the instance count to max in dependent scaling up
Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/35f156d4
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/35f156d4
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/35f156d4
Branch: refs/heads/master
Commit: 35f156d484a1d3c013234b69a66b6bcacc404310
Parents: cf7b808
Author: Lahiru Sandaruwan <la...@apache.org>
Authored: Fri Dec 26 20:49:08 2014 +0530
Committer: Lahiru Sandaruwan <la...@apache.org>
Committed: Fri Dec 26 20:50:02 2014 +0530
----------------------------------------------------------------------
.../src/main/conf/drools/dependent-scaling.drl | 75 +++++++++++++-------
1 file changed, 49 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/stratos/blob/35f156d4/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl b/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
index 0d98d0e..b225958 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
@@ -43,25 +43,42 @@ dialect "mvel"
clusterInstanceContext : ClusterInstanceContext ()
autoscaleAlgorithm : AutoscaleAlgorithm() from delegator.getAutoscaleAlgorithm(algorithmName)
+ nonTerminatedMembers : Integer() from clusterInstanceContext.getNonTerminatedMemberCount()
+
eval(log.debug("Running dependent scaling rule: [network-partition] " + clusterInstanceContext.getNetworkPartitionId() + " [cluster-instance] " + clusterInstanceContext.getId()))
- scaleUp : Boolean() from (clusterInstanceContext.getNonTerminatedMemberCount() < roundedRequiredInstanceCount )
- scaleDown : Boolean() from (clusterInstanceContext.getNonTerminatedMemberCount() > roundedRequiredInstanceCount )
+ scaleUp : Boolean() from (nonTerminatedMembers < roundedRequiredInstanceCount )
+ scaleDown : Boolean() from (nonTerminatedMembers > roundedRequiredInstanceCount )
then
if(scaleUp) {
+
int clusterMaxMembers = clusterInstanceContext.getMaxInstanceCount();
- int currentMemberCount = clusterInstanceContext.getNonTerminatedMemberCount();
- int additionalInstances = roundedRequiredInstanceCount - currentMemberCount;
- int count = 0;
- boolean partitionsAvailable = true;
+ if (nonTerminatedMembers < clusterInstanceContext.getMaxInstanceCount()) {
+
+ int additionalInstances = 0;
+ if(clusterInstanceContext.getMaxInstanceCount() < roundedRequiredInstanceCount){
+
+ additionalInstances = clusterInstanceContext.getMaxInstanceCount() - nonTerminatedMembers;
+ } else {
+
+ additionalInstances = roundedRequiredInstanceCount - nonTerminatedMembers;
+ log.info("[scale-up] Required member count based on dependecy scaling is higher than max, hence
+ notifying to parent for possible group scaling or app bursting.
+ [cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
+ " [max] " + clusterMaxMembers);
+ delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId());
+ }
+
+ int count = 0;
+ boolean partitionsAvailable = true;
- log.debug("[dependent-scale] is running for [cluster] " + clusterId +
- " [cluster-instance] " + clusterInstanceContext.getId() + " max member count is: " +
- clusterMaxMembers + " current member count is: " + currentMemberCount);
+ log.debug("[dependent-scale] is running for [cluster] " + clusterId +
+ " [cluster-instance] " + clusterInstanceContext.getId() + " max member count is: " +
+ clusterMaxMembers + " current member count is: " + nonTerminatedMembers);
+
+ while(count != additionalInstances && partitionsAvailable) {
- while(count != additionalInstances && partitionsAvailable) {
- if(currentMemberCount < clusterMaxMembers) {
ClusterLevelPartitionContext partitionContext = (ClusterLevelPartitionContext)autoscaleAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
if(partitionContext != null) {
@@ -72,27 +89,33 @@ dialect "mvel"
} else {
partitionsAvailable = false;
}
- } else {
- partitionsAvailable = false;
}
- }
- if(!partitionsAvailable) {
- if(clusterInstanceContext.isInGroupScalingEnabledSubtree()){
- delegator.delegateScalingOverMaxNotification(clusterId,
- clusterInstanceContext.getNetworkPartitionId(),
- clusterInstanceContext.getId());
- log.info("[dependency-scale][dependent-max-notification] partition is not
- available for [scale-up]. Hence notifying the parent for group scaling" );
- } else {
- log.warn("[dependency-scale][dependent-max-notification] partition is not
- available for [scale-up]. All resources are exhausted.
- Please enable group-scaling for further scaleup" );
+
+ if(!partitionsAvailable) {
+ if(clusterInstanceContext.isInGroupScalingEnabledSubtree()){
+ delegator.delegateScalingOverMaxNotification(clusterId,
+ clusterInstanceContext.getNetworkPartitionId(),
+ clusterInstanceContext.getId());
+ log.info("[dependency-scale][dependent-max-notification] partition is not
+ available for [scale-up]. Hence notifying the parent for group scaling" );
+ } else {
+ log.warn("[dependency-scale][dependent-max-notification] partition is not
+ available for [scale-up]. All resources are exhausted.
+ Please enable group-scaling for further scaleup" );
+ }
+
}
+ } else {
+ log.info("[scale-up] Trying to scale up over max, hence not scaling up cluster itself and
+ notifying to parent for possible group scaling or app bursting.
+ [cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
+ " [max] " + clusterMaxMembers);
+ delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId());
}
} else if (scaleDown) {
- int redundantInstances = clusterInstanceContext.getNonTerminatedMemberCount() - roundedRequiredInstanceCount;
+ int redundantInstances = nonTerminatedMembers - roundedRequiredInstanceCount;
int count = 0;