Repository: stratos
Updated Branches:
  refs/heads/master a9c0bb537 -> 412d5c8da


Inject cluster instance context to scaling.drl instead of networkpartition 
context


Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/c9f1d76b
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/c9f1d76b
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/c9f1d76b

Branch: refs/heads/master
Commit: c9f1d76b8be924a0f1575e7536eb853b49c1a2a7
Parents: a9c0bb5
Author: Lahiru Sandaruwan <[email protected]>
Authored: Wed Dec 3 17:36:30 2014 +0530
Committer: Lahiru Sandaruwan <[email protected]>
Committed: Wed Dec 3 17:36:30 2014 +0530

----------------------------------------------------------------------
 .../monitor/cluster/VMClusterMonitor.java         |  4 +++-
 .../distribution/src/main/conf/drools/scaling.drl | 18 +++++++++---------
 2 files changed, 12 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/stratos/blob/c9f1d76b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/VMClusterMonitor.java
----------------------------------------------------------------------
diff --git 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/VMClusterMonitor.java
 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/VMClusterMonitor.java
index c5b0a8e..5162d4c 100644
--- 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/VMClusterMonitor.java
+++ 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/VMClusterMonitor.java
@@ -247,6 +247,8 @@ public class VMClusterMonitor extends 
AbstractClusterMonitor {
                                 
getScaleCheckKnowledgeSession().setGlobal("laReset", loadAverageReset);
                                 
getScaleCheckKnowledgeSession().setGlobal("isPrimary", false);
                                 
getScaleCheckKnowledgeSession().setGlobal("primaryMembers", 
primaryMemberListInClusterInstance);
+                                
getMinCheckKnowledgeSession().setGlobal("algorithmName",
+                                        
networkPartitionContext.getPartitionAlgorithm());
 
                                 if (log.isDebugEnabled()) {
                                     log.debug(String.format("Running scale 
check for network partition %s ",
@@ -255,7 +257,7 @@ public class VMClusterMonitor extends 
AbstractClusterMonitor {
                                 }
 
                                 scaleCheckFactHandle = 
AutoscalerRuleEvaluator.evaluateScaleCheck(getScaleCheckKnowledgeSession()
-                                        , scaleCheckFactHandle, 
networkPartitionContext);
+                                        , scaleCheckFactHandle, 
instanceContext);
 
                                 instanceContext.setRifReset(false);
                                 
instanceContext.setMemoryConsumptionReset(false);

http://git-wip-us.apache.org/repos/asf/stratos/blob/c9f1d76b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git 
a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl 
b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 1433a46..8523088 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -42,6 +42,7 @@ import 
org.apache.stratos.autoscaler.rule.AutoscalerRuleEvaluator;
 import 
org.apache.stratos.autoscaler.pojo.policy.deployment.partition.network.NetworkPartition;
 import org.apache.stratos.cloud.controller.stub.domain.Partition;
 import org.apache.stratos.cloud.controller.stub.domain.MemberContext;
+import org.apache.stratos.autoscaler.context.cluster.ClusterInstanceContext;
 
 import org.apache.stratos.autoscaler.pojo.policy.autoscale.LoadAverage
 import org.apache.stratos.autoscaler.pojo.policy.autoscale.MemoryConsumption
@@ -49,7 +50,6 @@ import 
org.apache.stratos.autoscaler.pojo.policy.autoscale.MemoryConsumption
 global org.apache.stratos.autoscaler.rule.RuleLog log;
 global org.apache.stratos.autoscaler.rule.RuleTasksDelegator delegator;
 global org.apache.stratos.autoscaler.pojo.policy.autoscale.AutoscalePolicy 
autoscalePolicy;
-global org.apache.stratos.autoscaler.context.cluster.ClusterInstanceContext 
clusterInstanceContext;
 global java.lang.String clusterId;
 global java.lang.String instanceId;
 global java.lang.Boolean rifReset;
@@ -58,19 +58,19 @@ global java.lang.Boolean laReset;
 global java.lang.Boolean isPrimary;
 global java.util.List primaryMembers;
 global java.lang.Boolean arspiReset;
+global java.lang.String algorithmName;
 
 rule "Scaling Rule"
 dialect "mvel"
        when
-        networkPartitionContext : ClusterLevelNetworkPartitionContext ()
+       clusterInstanceContext : ClusterInstanceContext ()
 
         loadThresholds : LoadThresholds() from  
autoscalePolicy.getLoadThresholds()
-           algorithmName : String() from 
networkPartitionContext.getPartitionAlgorithm();
         autoscaleAlgorithm : AutoscaleAlgorithm() from  
delegator.getAutoscaleAlgorithm(algorithmName)
 
-        eval(log.debug("Running scale up rule: [network-partition] " + 
networkPartitionContext.getId() + " [cluster] " + clusterId))
-        eval(log.debug("[scaling] [network-partition] " + 
networkPartitionContext.getId() + " [cluster] " + clusterId + " Algorithm name: 
" + algorithmName))
-        eval(log.debug("[scaling] [network-partition] " + 
networkPartitionContext.getId() + " [cluster] " + clusterId + " Algorithm: " + 
autoscaleAlgorithm))
+        eval(log.debug("Running scale up rule: [network-partition] " + 
clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId))
+        eval(log.debug("[scaling] [network-partition] " + 
clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId + " 
Algorithm name: " + algorithmName))
+        eval(log.debug("[scaling] [network-partition] " + 
clusterInstanceContext.getNetworkPartitionId() + " [cluster] " + clusterId + " 
Algorithm: " + autoscaleAlgorithm))
 
        
         rifUpperLimit : Float() from  
loadThresholds.getRequestsInFlight().getUpperLimit()
@@ -133,7 +133,7 @@ dialect "mvel"
 
             //Calculating the factor scaling
             float factor = numberOfRequiredInstances / 
clusterInstanceContext.getMinMembers();
-            delegator.delegateScalingDependencyNotification(clusterId, 
networkPartitionContext.getId(), factor);
+            delegator.delegateScalingDependencyNotification(clusterId, 
clusterInstanceContext.getId(), factor);
 
             while(count != additionalInstances){
             ClusterLevelPartitionContext partitionContext =  
(ClusterLevelPartitionContext)autoscaleAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
@@ -147,7 +147,7 @@ dialect "mvel"
         } else if(scaleDown){
 
             float factor = numberOfRequiredInstances / 
clusterInstanceContext.getMinMembers();
-            delegator.delegateScalingDependencyNotification(clusterId, 
networkPartitionContext.getId(), factor);
+            delegator.delegateScalingDependencyNotification(clusterId, 
clusterInstanceContext.getNetworkPartitionId(), factor);
 
             log.debug("[scale-down] Decided to Scale down [cluster] " + 
clusterId);
             if(clusterInstanceContext.getScaleDownRequestsCount() > 5 ){
@@ -159,7 +159,7 @@ dialect "mvel"
                 if(partitionContext != null){
                     log.info("[scale-down] Partition available to scale down 
");
 //                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] " + clusterId);
-//                    partitionContext = 
networkPartitionContext.getPartitionCtxt(partition.getId());
+//                    partitionContext = 
clusterInstanceContext.getPartitionCtxt(partition.getId());
 //
 
                                        // In partition context member stat 
context, all the primary members need to be

Reply via email to