You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@stratos.apache.org by la...@apache.org on 2015/03/17 18:59:58 UTC
[37/50] [abbrv] stratos git commit: Improve/adjust efw logs in Drools
files
Improve/adjust efw logs in Drools files
Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/6743601d
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/6743601d
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/6743601d
Branch: refs/heads/master-deployment-policy-fix-merge
Commit: 6743601daf36605224bf78f9c702ec43fe0e353b
Parents: d11ed72
Author: Lahiru Sandaruwan <la...@apache.org>
Authored: Mon Mar 16 14:55:19 2015 +0530
Committer: Lahiru Sandaruwan <la...@apache.org>
Committed: Mon Mar 16 14:55:19 2015 +0530
----------------------------------------------------------------------
.../src/test/resources/autoscaler-old.drl | 1 +
.../src/main/conf/drools/dependent-scaling.drl | 30 ++++++++++----------
.../src/main/conf/drools/obsoletecheck.drl | 2 +-
.../src/main/conf/drools/scaling.drl | 18 ++++++------
4 files changed, 26 insertions(+), 25 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/stratos/blob/6743601d/components/org.apache.stratos.autoscaler/src/test/resources/autoscaler-old.drl
----------------------------------------------------------------------
diff --git a/components/org.apache.stratos.autoscaler/src/test/resources/autoscaler-old.drl b/components/org.apache.stratos.autoscaler/src/test/resources/autoscaler-old.drl
index bc045f7..378db09 100644
--- a/components/org.apache.stratos.autoscaler/src/test/resources/autoscaler-old.drl
+++ b/components/org.apache.stratos.autoscaler/src/test/resources/autoscaler-old.drl
@@ -1,3 +1,4 @@
+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
http://git-wip-us.apache.org/repos/asf/stratos/blob/6743601d/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl b/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
index 336833a..56e9164 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
@@ -63,7 +63,7 @@ dialect "mvel"
} else {
additionalInstances = roundedRequiredInstanceCount - nonTerminatedMembers;
- log.info("[scale-up] Required member count based on dependecy scaling is higher than max, hence
+ log.info("[dependency-scaling] [scale-up] Required member count based on dependecy scaling is higher than max, hence
notifying to parent for possible group scaling or app bursting.
[cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
" [max] " + clusterMaxMembers);
@@ -82,8 +82,8 @@ dialect "mvel"
ClusterLevelPartitionContext partitionContext = (ClusterLevelPartitionContext)partitionAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
if(partitionContext != null) {
- log.info("[dependency-scale][scale-up] Partition available, hence trying to spawn an instance to scale up!" );
- log.debug("[dependency-scale][scale-up] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] " + clusterId );
+ log.info("[dependency-scale] [scale-up] Partition available, hence trying to spawn an instance to scale up!" );
+ log.debug("[dependency-scale] [scale-up] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] " + clusterId );
delegator.delegateSpawn(partitionContext, clusterId, clusterInstanceContext.getId(), isPrimary);
count++;
} else {
@@ -96,10 +96,10 @@ dialect "mvel"
delegator.delegateScalingOverMaxNotification(clusterId,
clusterInstanceContext.getNetworkPartitionId(),
clusterInstanceContext.getId());
- log.info("[dependency-scale][dependent-max-notification] partition is not
+ log.info("[dependency-scale] [dependent-max-notification] partition is not
available for [scale-up]. Hence notifying the parent for group scaling" );
} else {
- log.warn("[dependency-scale][dependent-max-notification] partition is not
+ log.warn("[dependency-scale] [dependent-max-notification] partition is not
available for [scale-up]. All resources are exhausted.
Please enable group-scaling for further scaleup" );
}
@@ -107,7 +107,7 @@ dialect "mvel"
}
} else {
- log.info("[scale-up] Trying to scale up over max, hence not scaling up cluster itself and
+ log.info("[dependency-scale] [scale-up] Trying to scale up over max, hence not scaling up cluster itself and
notifying to parent for possible group scaling or app bursting.
[cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId() +
" [max] " + clusterMaxMembers);
@@ -125,31 +125,31 @@ dialect "mvel"
boolean foundAValue = false;
ClusterLevelPartitionContext partitionContext = (ClusterLevelPartitionContext)partitionAlgorithm.getNextScaleDownPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
if(partitionContext != null){
- log.info("[dependency-scale][scale-down] Partition available to scale down, hence trying to terminate an instance to scale down!" );
- log.debug("[dependency-scale][scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] " + clusterId );
+ log.info("[dependency-scale] [scale-down] Partition available to scale down, hence trying to terminate an instance to scale down!" );
+ log.debug("[dependency-scale] [scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] " + clusterId );
for(MemberStatsContext memberStatsContext: partitionContext.getMemberStatsContexts().values()){
if( !primaryMembers.contains(memberStatsContext.getMemberId()) ) {
LoadAverage loadAverage = memberStatsContext.getLoadAverage();
- log.debug("[dependency-scale][scale-down] " + " [cluster] "
+ log.debug("[dependency-scale] [scale-down] " + " [cluster] "
+ clusterId + " [member] " + memberStatsContext.getMemberId() + " Load average: " + loadAverage);
MemoryConsumption memoryConsumption = memberStatsContext.getMemoryConsumption();
- log.debug("[dependency-scale][scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ log.debug("[dependency-scale] [scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ clusterId + " [member] " + memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
double predictedCpu = delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(), 1);
- log.debug("[dependency-scale][scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ log.debug("[dependency-scale] [scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
double predictedMemoryConsumption = delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(), 1);
- log.debug("[dependency-scale][scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ log.debug("[dependency-scale] [scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ clusterId + " [member] " + memberStatsContext.getMemberId() + " Predicted memory consumption: " + predictedMemoryConsumption);
double overallLoad = (predictedCpu + predictedMemoryConsumption) / 2;
- log.debug("[dependency-scale][scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ log.debug("[dependency-scale] [scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ clusterId + " [member] " + memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
if(!foundAValue){
@@ -166,8 +166,8 @@ dialect "mvel"
}
if(selectedMemberStatsContext != null) {
- log.info("[dependency-scale][scale-down] Trying to terminating an instace to scale down!" );
- log.debug("[dependency-scale][scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ log.info("[dependency-scale] [scale-down] Trying to terminating an instace to scale down!" );
+ log.debug("[dependency-scale] [scale-down] " + " [partition] " + partitionContext.getPartitionId() + " [cluster] "
+ clusterId + " Member with lowest overall load: " + selectedMemberStatsContext.getMemberId());
delegator.delegateTerminate(partitionContext, selectedMemberStatsContext.getMemberId());
http://git-wip-us.apache.org/repos/asf/stratos/blob/6743601d/products/stratos/modules/distribution/src/main/conf/drools/obsoletecheck.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/obsoletecheck.drl b/products/stratos/modules/distribution/src/main/conf/drools/obsoletecheck.drl
index 82b9d3a..320b07b 100755
--- a/products/stratos/modules/distribution/src/main/conf/drools/obsoletecheck.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/obsoletecheck.drl
@@ -70,7 +70,7 @@ dialect "mvel"
eval(log.debug("[instance-cleanup-check] [network-partition] " + ctxt.getNetworkPartitionId() + " [partition] " + ctxt.getPartitionId() + " [cluster] " + clusterId + " Pending termination member count: " + ctxt.getTerminationPendingMembers().size()))
eval(ctxt.getTerminationPendingMembers().size() > 0)
member : MemberContext() from ctxt.getTerminationPendingMembers()
- eval(log.debug("[instance-cleanup-check ] [network-partition] " + ctxt.getNetworkPartitionId() + " [partition] " + ctxt.getPartitionId() + " [cluster] " + clusterId + " Member id: " + member.getMemberId()))
+ eval(log.debug("[instance-cleanup-check] [network-partition] " + ctxt.getNetworkPartitionId() + " [partition] " + ctxt.getPartitionId() + " [cluster] " + clusterId + " Member id: " + member.getMemberId()))
then
delegator.delegateInstanceCleanup(member.getMemberId());
end
http://git-wip-us.apache.org/repos/asf/stratos/blob/6743601d/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 0ece190..f37c773 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -100,18 +100,18 @@ dialect "mvel"
scaleUp : Boolean() from (activeInstancesCount < numberOfRequiredInstances)
scaleDown : Boolean() from (activeInstancesCount > numberOfRequiredInstances)
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " RIF Resetted?: " + rifReset))
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " RIF predicted value: " + rifPredictedValue))
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " RIF threshold: " + rifThreshold))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " RIF Resetted?: " + rifReset))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " RIF predicted value: " + rifPredictedValue))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " RIF threshold: " + rifThreshold))
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " MC predicted value: " + mcPredictedValue))
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " MC threshold: " + mcThreshold))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " MC predicted value: " + mcPredictedValue))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " MC threshold: " + mcThreshold))
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " LA predicted value: " + laPredictedValue))
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " LA threshold: " + laThreshold))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " LA predicted value: " + laPredictedValue))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " LA threshold: " + laThreshold))
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " Scale-up action: " + scaleUp))
- eval(log.debug("[scaling] " + " [cluster] " + clusterId + " Scale-down action: " + scaleDown))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " Scale-up action: " + scaleUp))
+ eval(log.debug("[scaling] " + "[cluster] " + clusterId + " Scale-down action: " + scaleDown))
then