Updated Branches:
  refs/heads/master a7109affe -> b48bfcd96

Delay scale down by a counter(configurable)


Project: http://git-wip-us.apache.org/repos/asf/incubator-stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-stratos/commit/b48bfcd9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-stratos/tree/b48bfcd9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-stratos/diff/b48bfcd9

Branch: refs/heads/master
Commit: b48bfcd9644b0af5d9f2d1b65933bd340800384e
Parents: a7109af
Author: Lahiru Sandaruwan <[email protected]>
Authored: Fri Jan 17 12:48:37 2014 +0530
Committer: Lahiru Sandaruwan <[email protected]>
Committed: Fri Jan 17 12:48:37 2014 +0530

----------------------------------------------------------------------
 .../autoscaler/NetworkPartitionContext.java     | 19 ++++
 .../distribution/src/main/conf/scaling.drl      | 95 +++++++++++---------
 2 files changed, 70 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-stratos/blob/b48bfcd9/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/NetworkPartitionContext.java
----------------------------------------------------------------------
diff --git 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/NetworkPartitionContext.java
 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/NetworkPartitionContext.java
index 9bb5c91..81d1b48 100644
--- 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/NetworkPartitionContext.java
+++ 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/NetworkPartitionContext.java
@@ -38,6 +38,9 @@ public class NetworkPartitionContext implements Serializable{
        private static final Log log = 
LogFactory.getLog(NetworkPartitionContext.class);
     private static final long serialVersionUID = 572769304374110159L;
     private final String id;
+    private boolean scaleDownAllowed = false;
+    private int scaleDownWaitCount = 5; //TODO get from a config
+    private int scaleDownRequestsCount = 0;
 
 //    private String defaultLbClusterId;
 //
@@ -490,6 +493,22 @@ public class NetworkPartitionContext implements 
Serializable{
         return 0;
     }
 
+    public int getScaleDownWaitCount() {
+        return scaleDownWaitCount;
+    }
+
+    public int getScaleDownRequestsCount() {
+        return scaleDownRequestsCount;
+    }
+
+    public void resetScaleDownRequestsCount() {
+        this.scaleDownRequestsCount = 0;
+    }
+    public void increaseScaleDownRequestsCount() {
+        this.scaleDownRequestsCount += 1;
+    }
+
+
 //    public void setPartitions(Partition[] partitions) {
 //        this.partitions = partitions;
 //        for (Partition partition: partitions){

http://git-wip-us.apache.org/repos/asf/incubator-stratos/blob/b48bfcd9/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
----------------------------------------------------------------------
diff --git a/products/autoscaler/modules/distribution/src/main/conf/scaling.drl 
b/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
index 39a27d3..29b7c55 100644
--- a/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
+++ b/products/autoscaler/modules/distribution/src/main/conf/scaling.drl
@@ -107,6 +107,7 @@ dialect "mvel"
        then
         if(scaleUp){
 
+            $networkPartitionContext.resetScaleDownRequestsCount();
             Partition partition =  
autoscaleAlgorithm.getNextScaleUpPartition($networkPartitionContext, clusterId);
             if(partition != null){
                 log.info("[scale-up] Partition available, hence trying to 
spawn an instance to scale up!" );
@@ -115,52 +116,58 @@ dialect "mvel"
             }
         } else if(scaleDown){
 
-            MemberStatsContext selectedMemberStatsContext = null;
-            double lowestOverallLoad = 0.0;
-            boolean foundAValue = false;
-            Partition partition =  
autoscaleAlgorithm.getNextScaleDownPartition($networkPartitionContext, 
clusterId);
-            if(partition != null){
-                log.info("[scaling down] Partition available to scale down ");
-                log.debug("[scaling down] " + " [partition] " + 
partition.getId() + " [cluster] " + clusterId);
-                partitionContext = 
$networkPartitionContext.getPartitionCtxt(partition.getId());
-
-                for(MemberStatsContext memberStatsContext: 
partitionContext.getMemberStatsContexts().values()){
-
-                    LoadAverage loadAverage = 
memberStatsContext.getLoadAverage();
-                    log.debug("[scale-down] " + " [cluster] "
-                        + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Load average: " + loadAverage);
-
-                    MemoryConsumption memoryConsumption = 
memberStatsContext.getMemoryConsumption();
-                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
-                        + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
-
-                    double predictedCpu = 
$delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(),
 1);
-                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
-                        + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
-
-                    double predictedMemoryConsumption = 
$delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(),
 1);
-                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
-                        + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted memory consumption: " + 
predictedMemoryConsumption);
-
-                    double overallLoad = (predictedCpu + 
predictedMemoryConsumption) / 2;
-                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
-                        + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
-
-                    if(!foundAValue){
-                        foundAValue = true;
-                        selectedMemberStatsContext = memberStatsContext;
-                        lowestOverallLoad = overallLoad;
-                    } else if(overallLoad < lowestOverallLoad){
-                        selectedMemberStatsContext = memberStatsContext;
-                        lowestOverallLoad = overallLoad;
+            if($networkPartitionContext.getScaleDownRequestsCount() > 
$networkPartitionContext.getScaleDownWaitCount()){
+
+                $networkPartitionContext.resetScaleDownRequestsCount();
+                MemberStatsContext selectedMemberStatsContext = null;
+                double lowestOverallLoad = 0.0;
+                boolean foundAValue = false;
+                Partition partition =  
autoscaleAlgorithm.getNextScaleDownPartition($networkPartitionContext, 
clusterId);
+                if(partition != null){
+                    log.info("[scaling down] Partition available to scale down 
");
+                    log.debug("[scaling down] " + " [partition] " + 
partition.getId() + " [cluster] " + clusterId);
+                    partitionContext = 
$networkPartitionContext.getPartitionCtxt(partition.getId());
+
+                    for(MemberStatsContext memberStatsContext: 
partitionContext.getMemberStatsContexts().values()){
+
+                        LoadAverage loadAverage = 
memberStatsContext.getLoadAverage();
+                        log.debug("[scale-down] " + " [cluster] "
+                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Load average: " + loadAverage);
+
+                        MemoryConsumption memoryConsumption = 
memberStatsContext.getMemoryConsumption();
+                        log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
+                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Memory consumption: " + memoryConsumption);
+
+                        double predictedCpu = 
$delegator.getPredictedValueForNextMinute(loadAverage.getAverage(),loadAverage.getGradient(),loadAverage.getSecondDerivative(),
 1);
+                        log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
+                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted CPU: " + predictedCpu);
+
+                        double predictedMemoryConsumption = 
$delegator.getPredictedValueForNextMinute(memoryConsumption.getAverage(),memoryConsumption.getGradient(),memoryConsumption.getSecondDerivative(),
 1);
+                        log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
+                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Predicted memory consumption: " + 
predictedMemoryConsumption);
+
+                        double overallLoad = (predictedCpu + 
predictedMemoryConsumption) / 2;
+                        log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
+                            + clusterId + " [member] " + 
memberStatsContext.getMemberId() + " Overall load: " + overallLoad);
+
+                        if(!foundAValue){
+                            foundAValue = true;
+                            selectedMemberStatsContext = memberStatsContext;
+                            lowestOverallLoad = overallLoad;
+                        } else if(overallLoad < lowestOverallLoad){
+                            selectedMemberStatsContext = memberStatsContext;
+                            lowestOverallLoad = overallLoad;
+                        }
                     }
-                }
-                if(selectedMemberStatsContext != null) {
-                    log.info("[scale-down] Trying to terminating an instace to 
scale down!" );
-                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
-                        + clusterId + " Member with lowest overall load: " + 
selectedMemberStatsContext.getMemberId());
+                    if(selectedMemberStatsContext != null) {
+                        log.info("[scale-down] Trying to terminating an 
instace to scale down!" );
+                        log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] "
+                            + clusterId + " Member with lowest overall load: " 
+ selectedMemberStatsContext.getMemberId());
 
-                    $delegator.delegateTerminate(partitionContext, 
selectedMemberStatsContext.getMemberId());
+                        $delegator.delegateTerminate(partitionContext, 
selectedMemberStatsContext.getMemberId());
+                    }
+                } else{
+                    $networkPartitionContext.increaseScaleDownRequestsCount();
                 }
             }
         }  else{

Reply via email to