Repository: stratos
Updated Branches:
  refs/heads/master 8742bac93 -> d5511c204


fixing group scale down


Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/d5511c20
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/d5511c20
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/d5511c20

Branch: refs/heads/master
Commit: d5511c204a4247b4abcd43f9d9b04cf7c7c8799b
Parents: 22f46e0
Author: reka <[email protected]>
Authored: Mon Dec 22 17:39:30 2014 +0530
Committer: reka <[email protected]>
Committed: Mon Dec 22 17:45:22 2014 +0530

----------------------------------------------------------------------
 .../monitor/component/GroupMonitor.java         | 44 +++++++++++++++++---
 1 file changed, 38 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/stratos/blob/d5511c20/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/component/GroupMonitor.java
----------------------------------------------------------------------
diff --git 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/component/GroupMonitor.java
 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/component/GroupMonitor.java
index c8a59cf..750ec7b 100644
--- 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/component/GroupMonitor.java
+++ 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/component/GroupMonitor.java
@@ -160,10 +160,23 @@ public class GroupMonitor extends ParentComponentMonitor {
         //all the children sent the scale down only, it will try to scale down
         if (allChildrenScaleDown) {
             if (hasScalingDependents) {
-                //Parent has to handle this scale down as by dependent scale 
down
-                ScalingDownBeyondMinEvent newScalingDownBeyondMinEvent = new 
ScalingDownBeyondMinEvent(this.id,
-                        nwPartitionContext.getId(), 
instanceContext.getParentInstanceId());
-                
this.parent.onChildScalingDownBeyondMinEvent(newScalingDownBeyondMinEvent);
+                if (nwPartitionContext.getNonTerminatedInstancesCount() >
+                        ((GroupLevelNetworkPartitionContext)
+                                nwPartitionContext).getMinInstanceCount() ) {
+                    //Will scale down based on dependent manner
+                    float minInstances = ((GroupLevelNetworkPartitionContext)
+                            nwPartitionContext).getMinInstanceCount();
+                    float factor = 
(nwPartitionContext.getNonTerminatedInstancesCount() - 1)/minInstances;
+                    ScalingEvent scalingEvent = new ScalingEvent(this.id, 
nwPartitionContext.getId(),
+                                                        
instanceContext.getId(),factor);
+                    this.parent.onChildScalingEvent(scalingEvent);
+                } else {
+                    //Parent has to handle this scale down as by dependent 
scale down
+                    ScalingDownBeyondMinEvent newScalingDownBeyondMinEvent = 
new ScalingDownBeyondMinEvent(this.id,
+                            nwPartitionContext.getId(), 
instanceContext.getParentInstanceId());
+                    
this.parent.onChildScalingDownBeyondMinEvent(newScalingDownBeyondMinEvent);
+                }
+
             } else {
                 if (groupScalingEnabled) {
                     if (nwPartitionContext.getNonTerminatedInstancesCount() >
@@ -425,10 +438,29 @@ public class GroupMonitor extends ParentComponentMonitor {
         //Parent notification always brings up new group instances in order to 
keep the ratio.
         String networkPartitionId = scalingEvent.getNetworkPartitionId();
         final String parentInstanceId = scalingEvent.getInstanceId();
-
         final NetworkPartitionContext networkPartitionContext = 
this.networkPartitionCtxts.
                 get(networkPartitionId);
-        createGroupInstanceOnScaling(networkPartitionContext, 
parentInstanceId);
+
+        float factor = scalingEvent.getFactor();
+        int currentInstances = 
networkPartitionContext.getNonTerminatedInstancesCount();
+        float requiredInstances = factor * ((GroupLevelNetworkPartitionContext)
+                                networkPartitionContext).getMinInstanceCount();
+        float additionalInstances = requiredInstances - currentInstances;
+        if(additionalInstances >= 1) {
+            createGroupInstanceOnScaling(networkPartitionContext, 
parentInstanceId);
+        } else {
+            //have to scale down
+            if(networkPartitionContext.getPendingInstancesCount() != 0) {
+                ApplicationBuilder.handleGroupTerminatingEvent(appId, this.id,
+                        
networkPartitionContext.getPendingInstances().get(0).getId());
+
+            } else {
+                List<InstanceContext> activeInstances = 
networkPartitionContext.getActiveInstances();
+                ApplicationBuilder.handleGroupTerminatingEvent(appId, this.id,
+                        activeInstances.get(activeInstances.size() - 
1).toString());
+            }
+        }
+
 
     }
 

Reply via email to