You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@stratos.apache.org by la...@apache.org on 2014/12/22 20:21:30 UTC

stratos git commit: adding scale dependent event in scale down case

Repository: stratos
Updated Branches:
  refs/heads/master 38f553d6d -> f97428f06


adding scale dependent event in scale down case


Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/f97428f0
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/f97428f0
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/f97428f0

Branch: refs/heads/master
Commit: f97428f062bdbbcc32a2d303e766187dac849c6a
Parents: 38f553d
Author: Lahiru Sandaruwan <la...@apache.org>
Authored: Tue Dec 23 00:46:16 2014 +0530
Committer: Lahiru Sandaruwan <la...@apache.org>
Committed: Tue Dec 23 00:46:24 2014 +0530

----------------------------------------------------------------------
 .../monitor/cluster/ClusterMonitor.java         |   3 +-
 .../src/main/conf/drools/scaling.drl            | 120 ++++++++++---------
 2 files changed, 64 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/stratos/blob/f97428f0/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
----------------------------------------------------------------------
diff --git a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
index e97b516..f9ac6f0 100644
--- a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
+++ b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
@@ -395,7 +395,8 @@ public class ClusterMonitor extends AbstractClusterMonitor {
 
         log.info("Parent scaling event received to [cluster]: " + this.getClusterId()
                     + ", [network partition]: " + scalingEvent.getNetworkPartitionId()
-                    + ", [event] " + scalingEvent.getId() + ", [group instance] " + scalingEvent.getInstanceId());
+                    + ", [event] " + scalingEvent.getId() + ", [group instance] " + scalingEvent.getInstanceId()
+                    + ", [factor]" + scalingEvent.getFactor());
 
 
         this.scalingFactorBasedOnDependencies = scalingEvent.getFactor();

http://git-wip-us.apache.org/repos/asf/stratos/blob/f97428f0/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 8135230..3af79c0 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -160,71 +160,75 @@ dialect "mvel"
 
             if(clusterInstanceContext.getNonTerminatedMemberCount() > clusterInstanceContext.getMinInstanceCount){
 
-
-                float factor = numberOfRequiredInstances / clusterInstanceContext.getMinInstanceCount();
-    //            delegator.delegateScalingDependencyNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), factor);
-
                 log.debug("[scale-down] Decided to Scale down [cluster] " + clusterId);
-                if(clusterInstanceContext.getScaleDownRequestsCount() > 5 ){
+                if(clusterInstanceContext.getScaleDownRequestsCount() > 2 ){
 
                     log.debug("[scale-down] Reached scale down requests threshold [cluster] " + clusterId + " Count " + clusterInstanceContext.getScaleDownRequestsCount());
-                    MemberStatsContext selectedMemberStatsContext = null;
-                    double lowestOverallLoad = 0.0;
-                    boolean foundAValue = false;
-                    ClusterLevelPartitionContext partitionContext =  (ClusterLevelPartitionContext) autoscaleAlgorithm.getNextScaleDownPartitionContext((clusterInstanceContext.getPartitionCtxtsAsAnArray()));
-                    if(partitionContext != null){
-                        log.info("[scale-down] Partition available to scale down ");
-    //                    log.debug("[scale-down] " + " [partition] " + partition.getId() + " [cluster] " + clusterId);
-    //                    partitionContext = clusterInstanceContext.getPartitionCtxt(partition.getId());
-    //
-
-                        // In partition context member stat context, all the primary members need to be
-                        // avoided being selected as the member to terminated
-
-
-                        for(MemberStatsContext memberStatsContext: partitionContext.getMemberStatsContexts().values()){
-
-                            if( !primaryMembers.contains(memberStatsContext.getMemberId()) ) {
-
-                            LoadAverage loadAverage = memberStatsContext.getLoadAverage();
-                            log.debug("[scale-down] " + " [cluster] "
-                                + clusterId + " [member] " + memberStatsContext.getMemberId() + " Load average: " + loadAverage);
-
-                            MemoryConsumption memoryConsumption = memberStatsContext.getMemoryConsumption();
-                            log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
-                                + clusterId + " [member] " + memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
-
-                            double predictedCpu = delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(), 1);
-                            log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
-                                + clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
-
-                            double predictedMemoryConsumption = delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(), 1);
-                            log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
-                                + clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted memory consumption: " + predictedMemoryConsumption);
-
-                            double overallLoad = (predictedCpu + predictedMemoryConsumption) / 2;
-                            log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
-                                + clusterId + " [member] " + memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
-
-                            if(!foundAValue){
-                                foundAValue = true;
-                                selectedMemberStatsContext = memberStatsContext;
-                                lowestOverallLoad = overallLoad;
-                            } else if(overallLoad < lowestOverallLoad){
-                                selectedMemberStatsContext = memberStatsContext;
-                                lowestOverallLoad = overallLoad;
-                            }
 
+                    if(clusterInstanceContext.hasScalingDependants()) {
 
-                          }
+                        log.debug("[scale-up] Notifying dependencies [cluster] " + clusterId);
+                        delegator.delegateScalingDependencyNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(), clusterInstanceContext.getId(), numberOfRequiredInstances, clusterInstanceContext.getMinInstanceCount());
+                    } else{
 
-                        }
-                        if(selectedMemberStatsContext != null) {
-                            log.info("[scale-down] Trying to terminating an instace to scale down!" );
-                            log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
-                                + clusterId + " Member with lowest overall load: " + selectedMemberStatsContext.getMemberId());
+                        MemberStatsContext selectedMemberStatsContext = null;
+                        double lowestOverallLoad = 0.0;
+                        boolean foundAValue = false;
+                        ClusterLevelPartitionContext partitionContext =  (ClusterLevelPartitionContext) autoscaleAlgorithm.getNextScaleDownPartitionContext((clusterInstanceContext.getPartitionCtxtsAsAnArray()));
+                        if(partitionContext != null){
+                            log.info("[scale-down] Partition available to scale down ");
+        //                    log.debug("[scale-down] " + " [partition] " + partition.getId() + " [cluster] " + clusterId);
+        //                    partitionContext = clusterInstanceContext.getPartitionCtxt(partition.getId());
+        //
+
+                            // In partition context member stat context, all the primary members need to be
+                            // avoided being selected as the member to terminated
+
+
+                            for(MemberStatsContext memberStatsContext: partitionContext.getMemberStatsContexts().values()){
+
+                                if( !primaryMembers.contains(memberStatsContext.getMemberId()) ) {
+
+                                LoadAverage loadAverage = memberStatsContext.getLoadAverage();
+                                log.debug("[scale-down] " + " [cluster] "
+                                    + clusterId + " [member] " + memberStatsContext.getMemberId() + " Load average: " + loadAverage);
 
-                            delegator.delegateTerminate(partitionContext, selectedMemberStatsContext.getMemberId());
+                                MemoryConsumption memoryConsumption = memberStatsContext.getMemoryConsumption();
+                                log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+                                    + clusterId + " [member] " + memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
+
+                                double predictedCpu = delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(), 1);
+                                log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+                                    + clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
+
+                                double predictedMemoryConsumption = delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(), 1);
+                                log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+                                    + clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted memory consumption: " + predictedMemoryConsumption);
+
+                                double overallLoad = (predictedCpu + predictedMemoryConsumption) / 2;
+                                log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+                                    + clusterId + " [member] " + memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
+
+                                if(!foundAValue){
+                                    foundAValue = true;
+                                    selectedMemberStatsContext = memberStatsContext;
+                                    lowestOverallLoad = overallLoad;
+                                } else if(overallLoad < lowestOverallLoad){
+                                    selectedMemberStatsContext = memberStatsContext;
+                                    lowestOverallLoad = overallLoad;
+                                }
+
+
+                              }
+
+                            }
+                            if(selectedMemberStatsContext != null) {
+                                log.info("[scale-down] Trying to terminating an instace to scale down!" );
+                                log.debug("[scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+                                    + clusterId + " Member with lowest overall load: " + selectedMemberStatsContext.getMemberId());
+
+                                delegator.delegateTerminate(partitionContext, selectedMemberStatsContext.getMemberId());
+                            }
                         }
                     }
                 } else{