Updated Branches:
  refs/heads/master b3359961c -> 56878f061

Added more logs to scaling.drl file


Project: http://git-wip-us.apache.org/repos/asf/incubator-stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-stratos/commit/56878f06
Tree: http://git-wip-us.apache.org/repos/asf/incubator-stratos/tree/56878f06
Diff: http://git-wip-us.apache.org/repos/asf/incubator-stratos/diff/56878f06

Branch: refs/heads/master
Commit: 56878f061c3cc432b3a5bfc98ed86fd30cab3961
Parents: b335996
Author: Imesh Gunaratne <[email protected]>
Authored: Fri Dec 27 10:35:30 2013 +0530
Committer: Imesh Gunaratne <[email protected]>
Committed: Fri Dec 27 10:35:30 2013 +0530

----------------------------------------------------------------------
 .../distribution/src/main/conf/scaling.drl      | 64 ++++++++++++++------
 1 file changed, 47 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-stratos/blob/56878f06/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
----------------------------------------------------------------------
diff --git a/products/autoscaler/modules/distribution/src/main/conf/scaling.drl 
b/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
index 3782d66..eaf225a 100644
--- a/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
+++ b/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
@@ -56,7 +56,7 @@ rule "Scaler-up Rule"
 dialect "mvel"
        when
         $networkPartitionContext : NetworkPartitionContext ()
-        eval(log.debug("Running scale up rule: [network-partition] " + 
$networkPartitionContext.getId()))
+        eval(log.debug("Running scale up rule: [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId))
         $loadThresholds : LoadThresholds() from  
autoscalePolicy.getLoadThresholds()
            algorithmName : String() from 
$networkPartitionContext.getPartitionAlgorithm();
         eval(log.debug("algorithm name " + algorithmName))
@@ -68,14 +68,19 @@ dialect "mvel"
         averageLimit : Float() from  
$loadThresholds.getRequestsInFlight().getAverage()
 
         partition :  Partition() from 
autoscaleAlgorithm.getNextScaleUpPartition($networkPartitionContext, clusterId)
+        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " Partition is null: " + (partition == 
null)))
         eval(partition != null)
            predictedValue : Double() from 
$delegator.getPredictedValueForNextMinute(lbStatAverage, lbStatGradient, 
lbStatSecondDerivative, 1)
-        eval(log.debug("predicted value: " + predictedValue))
-        eval(log.debug("average limit: " + averageLimit))
-        eval(log.debug("scale-up factor: " + $delegator.SCALE_UP_FACTOR))
-        eval(predictedValue > averageLimit * $delegator.SCALE_UP_FACTOR)
+        scaleUpAction : Boolean() from (predictedValue > averageLimit * 
$delegator.SCALE_UP_FACTOR)
+
+        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Predicted value: " + predictedValue))
+        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Average limit: " + averageLimit))
+        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Scale-up factor: " + $delegator.SCALE_UP_FACTOR))
+        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Scale-up action: " + scaleUpAction))
+
+        eval(scaleUpAction)
        then
-        log.debug("scaling up");
+        log.debug(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + cluster + " Scaling up cluster");
         
$delegator.delegateSpawn($networkPartitionContext.getPartitionCtxt(partition.getId()),
 clusterId, lbRef);
 end
 
@@ -84,7 +89,7 @@ rule "Scaler-down Rule"
 dialect "mvel"
        when
         $networkPartitionContext : NetworkPartitionContext ()
-        eval(log.debug("Running scale down rule: [network-partition] " + 
$networkPartitionContext.getId()))
+        eval(log.debug("Running scale down rule: [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId))
         $loadThresholds : LoadThresholds() from  
autoscalePolicy.getLoadThresholds()
         autoscaleAlgorithm : AutoscaleAlgorithm() from  
$delegator.getAutoscaleAlgorithm($networkPartitionContext.getPartitionAlgorithm())
         lbStatAverage : Float() from  
$networkPartitionContext.getAverageRequestsInFlight()
@@ -93,12 +98,18 @@ dialect "mvel"
         averageLimit : Float() from  
$loadThresholds.getRequestsInFlight().getAverage()
 
         partition :  Partition() from 
autoscaleAlgorithm.getNextScaleDownPartition($networkPartitionContext, 
clusterId)
+        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " Partition is null: " + (partition == 
null)))
         eval(partition != null)
         predictedValue :  Float() from 
$delegator.getPredictedValueForNextMinute(lbStatAverage, lbStatGradient, 
lbStatSecondDerivative, 1)
-        eval(log.debug("predicted value: " + predictedValue))
-        eval(log.debug("average limit: " + averageLimit))
-        eval(log.debug("scale-down factor: " + $delegator.SCALE_DOWN_FACTOR))
-        eval(predictedValue < averageLimit * $delegator.SCALE_DOWN_FACTOR)
+        scaleDown : Boolean() from (predictedValue < averageLimit * 
$delegator.SCALE_DOWN_FACTOR)
+
+        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Predicted value: " + predictedValue))
+        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Predicted value: " + predictedValue))
+        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Average limit: " + averageLimit))
+        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Scale-down factor: " + 
$delegator.SCALE_DOWN_FACTOR))
+        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId + " Scale-down action: " + scaleDown))
+
+        eval(scaleDown)
        then
 
         MemberStatsContext selectedMemberStatsContext = null;
@@ -108,12 +119,25 @@ dialect "mvel"
             for(MemberStatsContext memberStatsContext: 
partitionContext.getMemberStatsContexts().values()){
 
                 LoadAverage loadAverage = memberStatsContext.getLoadAverage();
+                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId
+                    + " [member] " + memberStatsContext.getMemberId() + " Load 
average: " + loadAverage.toString());
+
                 MemoryConsumption memoryConsumption = 
memberStatsContext.getMemoryConsumption();
-                double predictedCpu
-                = 
$delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(),
 1);
-                double predictedMemoryConsumption
-                = 
$delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(),
 1);
+                log.debug(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId
+                    + " [member] " + memberStatsContext.getMemberId() + " 
Memory consumption: " + memoryConsumption.toString()));
+
+                double predictedCpu = 
$delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(),
 1);
+                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId
+                    + " [member] " + memberStatsContext.getMemberId() + " 
Predicted CPU: " + predictedCpu);
+
+                double predictedMemoryConsumption = 
$delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(),
 1);
+                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId
+                    + " [member] " + memberStatsContext.getMemberId() + " 
Predicted memory consumption: " + predictedMemoryConsumption);
+
                 double overallLoad = (predictedCpu + 
predictedMemoryConsumption) / 2;
+                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId
+                    + " [member] " + memberStatsContext.getMemberId() + " 
Overall load: " + overallLoad);
+
                 if(!foundAValue){
                     foundAValue = true;
                     selectedMemberStatsContext = memberStatsContext;
@@ -123,11 +147,17 @@ dialect "mvel"
                     lowestOverallLoad = overallLoad;
                 }
 
-
+                if(selectedMemberStatsContext != null) {
+                    log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId +
+                        + " Member with lowest overall load: " + 
memberStatsContext.getMemberId());
+                }
             }
         }
-        if(selectedMemberStatsContext != null)
+        if(selectedMemberStatsContext != null) {
+            log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition]" + partition.getId() + " 
[cluster] " + clusterId +
+                        + " Terminating member: " + 
memberStatsContext.getMemberId());
             
$delegator.delegateTerminate(selectedMemberStatsContext.getMemberId());
+        }
 end
 
 

Reply via email to