You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@stratos.apache.org by la...@apache.org on 2015/03/17 19:00:03 UTC
[42/50] [abbrv] stratos git commit: Fix indentation of scaling.drl
file to adhere line limit
Fix indentation of scaling.drl file to adhere line limit
Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/8ad5fef2
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/8ad5fef2
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/8ad5fef2
Branch: refs/heads/master-deployment-policy-fix-merge
Commit: 8ad5fef242496e56d0a849280f8871df930062e5
Parents: ce64e75
Author: Lahiru Sandaruwan <la...@apache.org>
Authored: Tue Mar 17 11:47:01 2015 +0530
Committer: Lahiru Sandaruwan <la...@apache.org>
Committed: Tue Mar 17 11:47:01 2015 +0530
----------------------------------------------------------------------
.../src/main/conf/drools/scaling.drl | 69 +++++++++++++-------
1 file changed, 47 insertions(+), 22 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/stratos/blob/8ad5fef2/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index f37c773..2f71ce3 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -64,8 +64,10 @@ dialect "mvel"
loadThresholds : LoadThresholds() from autoscalePolicy.getLoadThresholds()
partitionAlgorithm : PartitionAlgorithm() from delegator.getPartitionAlgorithm(algorithmName)
- eval(log.debug("Running scale up rule: [network-partition] " + clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId))
- eval(log.debug("[scaling] [network-partition] " + clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId + " Algorithm name: " + algorithmName))
+ eval(log.debug("Running scale up rule: [network-partition] " + clusterInstanceContext.getNetworkPartitionId() +
+ " [cluster] " + clusterId))
+ eval(log.debug("[scaling] [network-partition] " + clusterInstanceContext.getNetworkPartitionId() + " [cluster] "
+ + clusterId + " Algorithm name: " + algorithmName))
rifThreshold : Float() from loadThresholds.getRequestsInFlightThreshold()
@@ -89,11 +91,17 @@ dialect "mvel"
requestsServedPerInstance : Float() from clusterInstanceContext.getRequestsServedPerInstance()
averageRequestsServedPerInstance : Float() from clusterInstanceContext.getAverageRequestsServedPerInstance()
- numberOfInstancesReuquiredBasedOnRif : Integer() from delegator.getNumberOfInstancesRequiredBasedOnRif(rifPredictedValue, requestsServedPerInstance, averageRequestsServedPerInstance, arspiReset)
- numberOfInstancesReuquiredBasedOnMemoryConsumption : Integer() from delegator.getNumberOfInstancesRequiredBasedOnMemoryConsumption(mcThreshold, mcPredictedValue, minInstancesCount, maxInstancesCount)
- numberOfInstancesReuquiredBasedOnLoadAverage : Integer() from delegator.getNumberOfInstancesRequiredBasedOnLoadAverage(laThreshold, laPredictedValue, minInstancesCount)
+ numberOfInstancesReuquiredBasedOnRif : Integer() from delegator.getNumberOfInstancesRequiredBasedOnRif(
+ rifPredictedValue, requestsServedPerInstance, averageRequestsServedPerInstance, arspiReset)
+ numberOfInstancesReuquiredBasedOnMemoryConsumption : Integer() from
+ delegator.getNumberOfInstancesRequiredBasedOnMemoryConsumption(mcThreshold, mcPredictedValue, minInstancesCount,
+ maxInstancesCount)
+ numberOfInstancesReuquiredBasedOnLoadAverage : Integer() from
+ delegator.getNumberOfInstancesRequiredBasedOnLoadAverage(laThreshold, laPredictedValue, minInstancesCount)
- numberOfRequiredInstances : Integer() from delegator.getMaxNumberOfInstancesRequired(numberOfInstancesReuquiredBasedOnRif, numberOfInstancesReuquiredBasedOnMemoryConsumption ,mcReset ,numberOfInstancesReuquiredBasedOnLoadAverage, laReset)
+ numberOfRequiredInstances : Integer() from delegator.getMaxNumberOfInstancesRequired(
+ numberOfInstancesReuquiredBasedOnRif, numberOfInstancesReuquiredBasedOnMemoryConsumption, mcReset,
+ numberOfInstancesReuquiredBasedOnLoadAverage, laReset)
@@ -115,7 +123,9 @@ dialect "mvel"
then
- log.debug("[scaling] Number of required instances based on stats: " + numberOfRequiredInstances + " [active instances count] " + activeInstancesCount + " [network-partition] " + clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId);
+ log.debug("[scaling] Number of required instances based on stats: " + numberOfRequiredInstances + " " +
+ "[active instances count] " + activeInstancesCount + " [network-partition] " +
+ clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId);
int nonTerminatedMembers = clusterInstanceContext.getNonTerminatedMemberCount();
if(scaleUp){
@@ -132,7 +142,8 @@ dialect "mvel"
+ " [instance id]" + clusterInstanceContext.getId() + " [max] " + clusterMaxMembers
+ " [number of required instances] " + numberOfRequiredInstances
+ " [additional instances to be created] " + additionalInstances);
- delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId());
+ delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(),
+ clusterInstanceContext.getId());
} else {
additionalInstances = numberOfRequiredInstances - nonTerminatedMembers;
@@ -140,11 +151,13 @@ dialect "mvel"
clusterInstanceContext.resetScaleDownRequestsCount();
- log.debug("[scale-up] " + " [has scaling dependents] " + clusterInstanceContext.hasScalingDependants() + " [cluster] " + clusterId );
+ log.debug("[scale-up] " + " [has scaling dependents] " + clusterInstanceContext.hasScalingDependants() +
+ " [cluster] " + clusterId );
if(clusterInstanceContext.hasScalingDependants()) {
log.debug("[scale-up] Notifying dependencies [cluster] " + clusterId);
- delegator.delegateScalingDependencyNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId(), numberOfRequiredInstances, clusterInstanceContext.getMinInstanceCount());
+ delegator.delegateScalingDependencyNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(),
+ clusterInstanceContext.getId(), numberOfRequiredInstances, clusterInstanceContext.getMinInstanceCount());
} else {
boolean partitionsAvailable = true;
@@ -152,7 +165,8 @@ dialect "mvel"
while(count != additionalInstances && partitionsAvailable){
- ClusterLevelPartitionContext partitionContext = (ClusterLevelPartitionContext)partitionAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
+ ClusterLevelPartitionContext partitionContext = (ClusterLevelPartitionContext)
+ partitionAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
if(partitionContext != null){
log.info("[scale-up] Partition available, hence trying to spawn an instance to scale up!" );
@@ -171,7 +185,8 @@ dialect "mvel"
notifying to parent for possible group scaling or app bursting.
[cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
" [max] " + clusterMaxMembers);
- delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId());
+ delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(),
+ clusterInstanceContext.getId());
}
} else if(scaleDown){
@@ -180,18 +195,21 @@ dialect "mvel"
log.debug("[scale-down] Decided to Scale down [cluster] " + clusterId);
if(clusterInstanceContext.getScaleDownRequestsCount() > 2 ){
- log.debug("[scale-down] Reached scale down requests threshold [cluster] " + clusterId + " Count " + clusterInstanceContext.getScaleDownRequestsCount());
+ log.debug("[scale-down] Reached scale down requests threshold [cluster] " + clusterId + " Count " +
+ clusterInstanceContext.getScaleDownRequestsCount());
if(clusterInstanceContext.hasScalingDependants()) {
log.debug("[scale-up] Notifying dependencies [cluster] " + clusterId);
- delegator.delegateScalingDependencyNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId(), numberOfRequiredInstances, clusterInstanceContext.getMinInstanceCount());
+ delegator.delegateScalingDependencyNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(),
+ clusterInstanceContext.getId(), numberOfRequiredInstances, clusterInstanceContext.getMinInstanceCount());
} else{
MemberStatsContext selectedMemberStatsContext = null;
double lowestOverallLoad = 0.0;
boolean foundAValue = false;
- ClusterLevelPartitionContext partitionContext = (ClusterLevelPartitionContext) partitionAlgorithm.getNextScaleDownPartitionContext((clusterInstanceContext.getPartitionCtxtsAsAnArray()));
+ ClusterLevelPartitionContext partitionContext = (ClusterLevelPartitionContext)
+ partitionAlgorithm.getNextScaleDownPartitionContext((clusterInstanceContext.getPartitionCtxtsAsAnArray()));
if(partitionContext != null){
log.info("[scale-down] Partition available to scale down ");
// log.debug("[scale-down] " + " [partition] " + partition.getId() + " [cluster] " + clusterId);
@@ -212,15 +230,19 @@ dialect "mvel"
MemoryConsumption memoryConsumption = memberStatsContext.getMemoryConsumption();
log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
- + clusterId + " [member] " + memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
+ + clusterId + " [member] " + memberStatsContext.getMemberId() + " Memory consumption: " +
+ memoryConsumption);
- double predictedCpu = delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(), 1);
+ double predictedCpu = delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),
+ loadAverage.getGradient(),loadAverage.getSecondDerivative(), 1);
log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
- double predictedMemoryConsumption = delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(), 1);
+ double predictedMemoryConsumption = delegator.getPredictedValueForNextMinute(
+ memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(), 1);
log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
- + clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted memory consumption: " + predictedMemoryConsumption);
+ + clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted memory consumption: " +
+ predictedMemoryConsumption);
double overallLoad = (predictedCpu + predictedMemoryConsumption) / 2;
log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
@@ -249,15 +271,18 @@ dialect "mvel"
}
}
} else{
- log.debug("[scale-down] Not reached scale down requests threshold. " + clusterId + " Count " + clusterInstanceContext.getScaleDownRequestsCount());
+ log.debug("[scale-down] Not reached scale down requests threshold. " + clusterId + " Count " +
+ clusterInstanceContext.getScaleDownRequestsCount());
clusterInstanceContext.increaseScaleDownRequestsCount();
}
} else {
- log.info("[scale-down] Min is reached, hence not scaling down [cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId());
+ log.info("[scale-down] Min is reached, hence not scaling down [cluster] " + clusterId + " [instance id]"
+ + clusterInstanceContext.getId());
if(clusterInstanceContext.isInGroupScalingEnabledSubtree()){
- delegator.delegateScalingDownBeyondMinNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId());
+ delegator.delegateScalingDownBeyondMinNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(),
+ clusterInstanceContext.getId());
}
}
} else{