Adding more information to improve the scaling log

Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/48d9bc16
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/48d9bc16
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/48d9bc16

Branch: refs/heads/master
Commit: 48d9bc16dee0fabc0ffe36457f00daa4c1967e10
Parents: 7271c76
Author: Vishanth <[email protected]>
Authored: Thu May 14 20:15:15 2015 +0530
Committer: Vishanth <[email protected]>
Committed: Thu May 14 20:15:15 2015 +0530

----------------------------------------------------------------------
 .../autoscaler/monitor/cluster/ClusterMonitor.java       |  1 +
 .../distribution/src/main/conf/drools/scaling.drl        | 11 +++++++++--
 2 files changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/stratos/blob/48d9bc16/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
----------------------------------------------------------------------
diff --git 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
index 750daa6..64a5ab2 100644
--- 
a/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
+++ 
b/components/org.apache.stratos.autoscaler/src/main/java/org/apache/stratos/autoscaler/monitor/cluster/ClusterMonitor.java
@@ -438,6 +438,7 @@ public class ClusterMonitor extends Monitor {
                                     log.info("Executing scaling rule as 
statistics have been reset");
                                     ClusterContext clusterContext = 
ClusterMonitor.this.clusterContext;
 
+                                    
instanceContext.getScaleCheckKnowledgeSession().setGlobal("applicationId", 
getAppId());
                                     
instanceContext.getScaleCheckKnowledgeSession().setGlobal("clusterId", 
getClusterId());
                                     
instanceContext.getScaleCheckKnowledgeSession().setGlobal("rifReset", rifReset);
                                     
instanceContext.getScaleCheckKnowledgeSession().setGlobal("mcReset", 
memoryConsumptionReset);

http://git-wip-us.apache.org/repos/asf/stratos/blob/48d9bc16/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git 
a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl 
b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 1008e52..72530da 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -46,6 +46,7 @@ import 
org.apache.stratos.autoscaler.pojo.policy.autoscale.MemoryConsumption
 global org.apache.stratos.autoscaler.rule.RuleLog log;
 global org.apache.stratos.autoscaler.rule.RuleTasksDelegator delegator;
 global org.apache.stratos.autoscaler.pojo.policy.autoscale.AutoscalePolicy 
autoscalePolicy;
+global java.lang.String applicationId;
 global java.lang.String clusterId;
 global java.lang.Boolean rifReset;
 global java.lang.Boolean mcReset;
@@ -168,7 +169,10 @@ dialect "mvel"
                         if(partitionContext != null){
 
                             log.info("[scale-up] Partition available, hence 
trying to spawn an instance to scale up! " +
-                                " [partition] " + 
partitionContext.getPartitionId() +  " [cluster] " + clusterId +
+                                " [application id] " + applicationId +
+                                " [cluster] " + clusterId + " [instance id] " 
+ clusterInstanceContext.getId() +
+                                " [network-partition] " + 
clusterInstanceContext.getNetworkPartitionId() +
+                                " [partition] " + 
partitionContext.getPartitionId() +
                                 " scaleup due to RIF: " + (rifReset && 
(rifPredictedValue > rifThreshold)) +
                                 " [rifPredictedValue] " + rifPredictedValue + 
" [rifThreshold] " + rifThreshold +
                                 " scaleup due to MC: " + (mcReset && 
(mcPredictedValue > mcThreshold)) +
@@ -217,7 +221,10 @@ dialect "mvel"
                         ClusterLevelPartitionContext partitionContext = 
(ClusterLevelPartitionContext) 
partitionAlgorithm.getNextScaleDownPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
                         if(partitionContext != null){
                             log.info("[scale-down] Partition available to 
scale down " +
-                                " [partition] " + 
partitionContext.getPartitionId() +  " [cluster] " + clusterId +
+                                " [application id] " + applicationId +
+                                " [cluster] " + clusterId + " [instance id] " 
+ clusterInstanceContext.getId() +
+                                " [network-partition] " + 
clusterInstanceContext.getNetworkPartitionId() +
+                                " [partition] " + 
partitionContext.getPartitionId() +
                                 " scaledown due to RIF: " + (rifReset && 
(rifPredictedValue < rifThreshold)) +
                                 " [rifPredictedValue] " + rifPredictedValue + 
" [rifThreshold] " + rifThreshold +
                                 " scaledown due to MC: " + (mcReset && 
(mcPredictedValue < mcThreshold)) +

Reply via email to