Updated Branches:
  refs/heads/master b23434ee6 -> 9b9c08113

improving cluster monitor selector in member health stat events


Project: http://git-wip-us.apache.org/repos/asf/incubator-stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-stratos/commit/4095d661
Tree: http://git-wip-us.apache.org/repos/asf/incubator-stratos/tree/4095d661
Diff: http://git-wip-us.apache.org/repos/asf/incubator-stratos/diff/4095d661

Branch: refs/heads/master
Commit: 4095d661698c22ed6a7ef7728e8d12f407e700a4
Parents: 123a16b
Author: Lahiru Sandaruwan <[email protected]>
Authored: Fri Jan 10 17:37:50 2014 +0530
Committer: Lahiru Sandaruwan <[email protected]>
Committed: Fri Jan 10 17:37:50 2014 +0530

----------------------------------------------------------------------
 .../health/AutoscalerHealthStatReceiver.java    | 25 ++++++----
 .../distribution/src/main/conf/scaling.drl      | 52 ++++++++++----------
 2 files changed, 41 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-stratos/blob/4095d661/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/message/receiver/health/AutoscalerHealthStatReceiver.java
----------------------------------------------------------------------
diff --git 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/message/receiver/health/AutoscalerHealthStatReceiver.java
 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/message/receiver/health/AutoscalerHealthStatReceiver.java
index 53c2541..25cbcef 100644
--- 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/message/receiver/health/AutoscalerHealthStatReceiver.java
+++ 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/message/receiver/health/AutoscalerHealthStatReceiver.java
@@ -957,16 +957,21 @@ public class AutoscalerHealthStatReceiver implements 
Runnable {
             }
                return null;
         }
-        AbstractMonitor monitor = 
AutoscalerContext.getInstance().getMonitor(member.getClusterId());
-        if(null == monitor){
-            monitor = 
AutoscalerContext.getInstance().getLBMonitor(member.getClusterId());
-            if(null == monitor){
-
-                if(log.isErrorEnabled()) {
-                   log.error(String.format("Cluster monitor is not available 
for : [member] %s", memberId));
-                }
-                return null;
+        String clusterId = member.getClusterId();
+
+        AutoscalerContext asCtx = AutoscalerContext.getInstance();
+        AbstractMonitor monitor;
+
+        if(asCtx.moniterExist(clusterId)){
+            monitor = asCtx.getMonitor(clusterId);
+        }else if(asCtx.lbMoniterExist(clusterId)){
+            monitor = asCtx.getLBMonitor(clusterId);
+        }else{
+            String errMsg = "A monitor is not found for this cluster";
+            if(log.isErrorEnabled()){
+                log.error(String.format("A cluster monitor is not found in 
autoscaler context [cluster] %s", clusterId));
             }
+            throw new RuntimeException(errMsg);
         }
         String networkPartitionId = findNetworkPartitionId(memberId);
         MemberStatsContext memberStatsContext = 
monitor.getNetworkPartitionCtxt(networkPartitionId)
@@ -1000,7 +1005,7 @@ public class AutoscalerHealthStatReceiver implements 
Runnable {
             }
                return null;
         }
-        
+        String clusterId = member.getClusterId();
         AbstractMonitor monitor = 
AutoscalerContext.getInstance().getMonitor(member.getClusterId());
         if(null == monitor){
 

http://git-wip-us.apache.org/repos/asf/incubator-stratos/blob/4095d661/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
----------------------------------------------------------------------
diff --git a/products/autoscaler/modules/distribution/src/main/conf/scaling.drl 
b/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
index 7036d54..4656b8a 100644
--- a/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
+++ b/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
@@ -89,26 +89,26 @@ dialect "mvel"
 
         scaleUpAction : Boolean() from ((rifReset && (rifPredictedValue > 
rifAverageLimit * $delegator.SCALE_UP_FACTOR)) || (mcReset && (mcPredictedValue 
> mcAverageLimit * $delegator.SCALE_UP_FACTOR)) || (laReset && 
(laPredictedValue > laAverageLimit * $delegator.SCALE_UP_FACTOR)))
 
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " RIF predicted 
value: " + rifPredictedValue))
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " RIF average 
limit: " + rifAverageLimit))
+        eval(log.debug("[scale-up] " + " [cluster] " + clusterId + " RIF 
predicted value: " + rifPredictedValue))
+        eval(log.debug("[scale-up] " + " [cluster] " + clusterId + " RIF 
average limit: " + rifAverageLimit))
 
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " MC predicted 
value: " + mcPredictedValue))
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " MC average 
limit: " + mcAverageLimit))
+        eval(log.debug("[scale-up] " + " [cluster] " + clusterId + " MC 
predicted value: " + mcPredictedValue))
+        eval(log.debug("[scale-up] " + " [cluster] " + clusterId + " MC 
average limit: " + mcAverageLimit))
 
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " LA predicted 
value: " + laPredictedValue))
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " LA Average 
limit: " + laAverageLimit))
+        eval(log.debug("[scale-up] " + " [cluster] " + clusterId + " LA 
predicted value: " + laPredictedValue))
+        eval(log.debug("[scale-up] " + " [cluster] " + clusterId + " LA 
Average limit: " + laAverageLimit))
 
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " Scale-up 
factor: " + $delegator.SCALE_UP_FACTOR))
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " Scale-up 
action: " + scaleUpAction))
+        eval(log.debug("[scale-up] " + " [cluster] " + clusterId + " Scale-up 
factor: " + $delegator.SCALE_UP_FACTOR))
+        eval(log.debug("[scale-up] " + " [cluster] " + clusterId + " Scale-up 
action: " + scaleUpAction))
 
         eval(scaleUpAction)
 
         partition :  Partition() from 
autoscaleAlgorithm.getNextScaleUpPartition($networkPartitionContext, clusterId)
-        eval(log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " Partition is null: " + (partition == 
null)))
+        eval(log.debug("[scale-up] " + " Partition available: " + (partition 
!= null)))
         eval(partition != null)
 
        then
-        log.debug("[scale-up] [network-partition] " + 
$networkPartitionContext.getId() + " [partition] " + partition.getId() + " 
[cluster] " + clusterId + " Scaling up cluster");
+        log.debug("[scale-up] " + " [partition] " + partition.getId() + " 
[cluster] " + clusterId + " Scaling up cluster");
         
$delegator.delegateSpawn($networkPartitionContext.getPartitionCtxt(partition.getId()),
 clusterId, lbRef);
 end
 
@@ -146,23 +146,23 @@ dialect "mvel"
 
         scaleDownAction : Boolean() from ((rifReset && (rifPredictedValue < 
rifAverageLimit * $delegator.SCALE_DOWN_FACTOR)) || (mcReset && 
(mcPredictedValue < mcAverageLimit * $delegator.SCALE_DOWN_FACTOR)) || (laReset 
&& (laPredictedValue < laAverageLimit * $delegator.SCALE_DOWN_FACTOR)))
 
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " RIF predicted 
value: " + rifPredictedValue))
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " RIF average 
limit: " + rifAverageLimit))
+        eval(log.debug("[scale-down] " + " [cluster] " + clusterId + " RIF 
predicted value: " + rifPredictedValue))
+        eval(log.debug("[scale-down] " + " [cluster] " + clusterId + " RIF 
average limit: " + rifAverageLimit))
 
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " MC predicted 
value: " + mcPredictedValue))
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " MC average 
limit: " + mcAverageLimit))
+        eval(log.debug("[scale-down] " + " [cluster] " + clusterId + " MC 
predicted value: " + mcPredictedValue))
+        eval(log.debug("[scale-down] " + " [cluster] " + clusterId + " MC 
average limit: " + mcAverageLimit))
 
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " LA predicted 
value: " + laPredictedValue))
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " LA Average 
limit: " + laAverageLimit))
+        eval(log.debug("[scale-down] " + " [cluster] " + clusterId + " LA 
predicted value: " + laPredictedValue))
+        eval(log.debug("[scale-down] " + " [cluster] " + clusterId + " LA 
Average limit: " + laAverageLimit))
 
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " Scale-down 
factor: " + $delegator.SCALE_DOWN_FACTOR))
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] " + clusterId + " Scale-down 
action: " + scaleDownAction))
+        eval(log.debug("[scale-down] " + " [cluster] " + clusterId + " 
Scale-down factor: " + $delegator.SCALE_DOWN_FACTOR))
+        eval(log.debug("[scale-down] " + " [cluster] " + clusterId + " 
Scale-down action: " + scaleDownAction))
 
         eval(scaleDownAction)
 
         partition :  Partition() from 
autoscaleAlgorithm.getNextScaleDownPartition($networkPartitionContext, 
clusterId)
         eval(partition != null)
-        eval(log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " Partition is null: " + (partition == 
null)))
+        eval(log.debug("[scale-down] " + " Partition available: " + (partition 
!= null)))
 
        then
 
@@ -173,23 +173,23 @@ dialect "mvel"
             for(MemberStatsContext memberStatsContext: 
partitionContext.getMemberStatsContexts().values()){
 
                 LoadAverage loadAverage = memberStatsContext.getLoadAverage();
-                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [cluster] "
+                log.debug("[scale-down] " + " [cluster] "
                     + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Load average: " + loadAverage);
 
                 MemoryConsumption memoryConsumption = 
memberStatsContext.getMemoryConsumption();
-                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition] " + partition.getId() + " 
[cluster] "
+                log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
                     + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
 
                 double predictedCpu = 
$delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(),
 1);
-                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition] " + partition.getId() + " 
[cluster] "
+                log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
                     + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
 
                 double predictedMemoryConsumption = 
$delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(),
 1);
-                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition] " + partition.getId() + " 
[cluster] "
+                log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
                     + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted memory consumption: " + 
predictedMemoryConsumption);
 
                 double overallLoad = (predictedCpu + 
predictedMemoryConsumption) / 2;
-                log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition] " + partition.getId() + " 
[cluster] "
+                log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
                     + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
 
                 if(!foundAValue){
@@ -202,13 +202,13 @@ dialect "mvel"
                 }
 
                 if(selectedMemberStatsContext != null) {
-                    log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition] " + partition.getId() + " 
[cluster] "
+                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
                         + clusterId + " Member with lowest overall load: " + 
memberStatsContext.getMemberId());
                 }
             }
         }
         if(selectedMemberStatsContext != null) {
-            log.debug("[scale-down] [network-partition] " + 
$networkPartitionContext.getId() + " [partition] " + partition.getId() + " 
[cluster] " + clusterId
+            log.debug("[scale-down] " + " [partition] " + partition.getId() + 
" [cluster] " + clusterId
                 + " Terminating member: " + 
selectedMemberStatsContext.getMemberId());
             
$delegator.delegateTerminate(selectedMemberStatsContext.getMemberId());
         }

Reply via email to