Repository: stratos
Updated Branches:
  refs/heads/master 5e5d78f19 -> 94dc9ec67


Adding more information to scaleup and scaledown logs


Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/7271c764
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/7271c764
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/7271c764

Branch: refs/heads/master
Commit: 7271c7640e742a19128f32df3059cd06bdd4e104
Parents: 390e4ff
Author: Vishanth <[email protected]>
Authored: Thu May 14 19:40:02 2015 +0530
Committer: Vishanth <[email protected]>
Committed: Thu May 14 19:40:02 2015 +0530

----------------------------------------------------------------------
 .../src/main/conf/drools/scaling.drl            | 23 +++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/stratos/blob/7271c764/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
----------------------------------------------------------------------
diff --git 
a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl 
b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
index 37c7e07..1008e52 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/scaling.drl
@@ -167,7 +167,15 @@ dialect "mvel"
                         ClusterLevelPartitionContext partitionContext = 
(ClusterLevelPartitionContext) 
partitionAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
                         if(partitionContext != null){
 
-                            log.info("[scale-up] Partition available, hence 
trying to spawn an instance to scale up!" );
+                            log.info("[scale-up] Partition available, hence 
trying to spawn an instance to scale up! " +
+                                " [partition] " + 
partitionContext.getPartitionId() +  " [cluster] " + clusterId +
+                                " scaleup due to RIF: " + (rifReset && 
(rifPredictedValue > rifThreshold)) +
+                                " [rifPredictedValue] " + rifPredictedValue + 
" [rifThreshold] " + rifThreshold +
+                                " scaleup due to MC: " + (mcReset && 
(mcPredictedValue > mcThreshold)) +
+                                " [mcPredictedValue] " + mcPredictedValue + " 
[mcThreshold] " + mcThreshold +
+                                " scaleup due to MC: " + (laReset && 
(laPredictedValue > laThreshold)) +
+                                " [laPredictedValue] " + laPredictedValue + " 
[laThreshold] " + laThreshold);
+
                             log.debug("[scale-up] " + " [partition] " + 
partitionContext.getPartitionId() + " [cluster] " + clusterId );
                             delegator.delegateSpawn(partitionContext, 
clusterId, clusterInstanceContext.getId(), isPrimary);
                             count++;
@@ -208,10 +216,15 @@ dialect "mvel"
                         boolean foundAValue = false;
                         ClusterLevelPartitionContext partitionContext = 
(ClusterLevelPartitionContext) 
partitionAlgorithm.getNextScaleDownPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
                         if(partitionContext != null){
-                            log.info("[scale-down] Partition available to 
scale down ");
-        //                    log.debug("[scale-down] " + " [partition] " + 
partition.getId() + " [cluster] " + clusterId);
-        //                    partitionContext = 
clusterInstanceContext.getPartitionCtxt(partition.getId());
-        //
+                            log.info("[scale-down] Partition available to 
scale down " +
+                                " [partition] " + 
partitionContext.getPartitionId() +  " [cluster] " + clusterId +
+                                " scaledown due to RIF: " + (rifReset && 
(rifPredictedValue < rifThreshold)) +
+                                " [rifPredictedValue] " + rifPredictedValue + 
" [rifThreshold] " + rifThreshold +
+                                " scaledown due to MC: " + (mcReset && 
(mcPredictedValue < mcThreshold)) +
+                                " [mcPredictedValue] " + mcPredictedValue + " 
[mcThreshold] " + mcThreshold +
+                                " scaledown due to MC: " + (laReset && 
(laPredictedValue < laThreshold)) +
+                                " [laPredictedValue] " + laPredictedValue + " 
[laThreshold] " + laThreshold
+                            );
 
                             // In partition context member stat context, all 
the primary members need to be
                             // avoided being selected as the member to 
terminated

Reply via email to