Re: [PATCH 4/13] find_busiest_group fixlets

2005-02-24 Thread Ingo Molnar

* Nick Piggin <[EMAIL PROTECTED]> wrote:

> 4/13
> 5/13

#insert 

Acked-by: Ingo Molnar <[EMAIL PROTECTED]>

Ingo

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 4/13] find_busiest_group fixlets

2005-02-24 Thread Ingo Molnar

* Nick Piggin [EMAIL PROTECTED] wrote:

 4/13
 5/13

#insert previous mail

Acked-by: Ingo Molnar [EMAIL PROTECTED]

Ingo

-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 4/13] find_busiest_group fixlets

2005-02-23 Thread Nick Piggin
4/13

Fix up a few small warts in the periodic multiprocessor rebalancing
code.

Signed-off-by: Nick Piggin <[EMAIL PROTECTED]>

Index: linux-2.6/kernel/sched.c
===
--- linux-2.6.orig/kernel/sched.c	2005-02-24 17:31:28.431609701 +1100
+++ linux-2.6/kernel/sched.c	2005-02-24 17:43:38.806447240 +1100
@@ -1830,13 +1830,12 @@
 	 * by pulling tasks to us.  Be careful of negative numbers as they'll
 	 * appear as very large values with unsigned longs.
 	 */
-	*imbalance = min(max_load - avg_load, avg_load - this_load);
-
 	/* How much load to actually move to equalise the imbalance */
-	*imbalance = (*imbalance * min(busiest->cpu_power, this->cpu_power))
-/ SCHED_LOAD_SCALE;
+	*imbalance = min((max_load - avg_load) * busiest->cpu_power,
+(avg_load - this_load) * this->cpu_power)
+			/ SCHED_LOAD_SCALE;
 
-	if (*imbalance < SCHED_LOAD_SCALE - 1) {
+	if (*imbalance < SCHED_LOAD_SCALE) {
 		unsigned long pwr_now = 0, pwr_move = 0;
 		unsigned long tmp;
 
@@ -1862,14 +1861,16 @@
 			max_load - tmp);
 
 		/* Amount of load we'd add */
-		tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
-		if (max_load < tmp)
-			tmp = max_load;
+		if (max_load*busiest->cpu_power <
+SCHED_LOAD_SCALE*SCHED_LOAD_SCALE)
+			tmp = max_load*busiest->cpu_power/this->cpu_power;
+		else
+			tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
 		pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
 		pwr_move /= SCHED_LOAD_SCALE;
 
-		/* Move if we gain another 8th of a CPU worth of throughput */
-		if (pwr_move < pwr_now + SCHED_LOAD_SCALE / 8)
+		/* Move if we gain throughput */
+		if (pwr_move <= pwr_now)
 			goto out_balanced;
 
 		*imbalance = 1;
@@ -1877,7 +1878,7 @@
 	}
 
 	/* Get rid of the scaling factor, rounding down as we divide */
-	*imbalance = (*imbalance + 1) / SCHED_LOAD_SCALE;
+	*imbalance = *imbalance / SCHED_LOAD_SCALE;
 
 	return busiest;
 


[PATCH 4/13] find_busiest_group fixlets

2005-02-23 Thread Nick Piggin
4/13

Fix up a few small warts in the periodic multiprocessor rebalancing
code.

Signed-off-by: Nick Piggin [EMAIL PROTECTED]

Index: linux-2.6/kernel/sched.c
===
--- linux-2.6.orig/kernel/sched.c	2005-02-24 17:31:28.431609701 +1100
+++ linux-2.6/kernel/sched.c	2005-02-24 17:43:38.806447240 +1100
@@ -1830,13 +1830,12 @@
 	 * by pulling tasks to us.  Be careful of negative numbers as they'll
 	 * appear as very large values with unsigned longs.
 	 */
-	*imbalance = min(max_load - avg_load, avg_load - this_load);
-
 	/* How much load to actually move to equalise the imbalance */
-	*imbalance = (*imbalance * min(busiest-cpu_power, this-cpu_power))
-/ SCHED_LOAD_SCALE;
+	*imbalance = min((max_load - avg_load) * busiest-cpu_power,
+(avg_load - this_load) * this-cpu_power)
+			/ SCHED_LOAD_SCALE;
 
-	if (*imbalance  SCHED_LOAD_SCALE - 1) {
+	if (*imbalance  SCHED_LOAD_SCALE) {
 		unsigned long pwr_now = 0, pwr_move = 0;
 		unsigned long tmp;
 
@@ -1862,14 +1861,16 @@
 			max_load - tmp);
 
 		/* Amount of load we'd add */
-		tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this-cpu_power;
-		if (max_load  tmp)
-			tmp = max_load;
+		if (max_load*busiest-cpu_power 
+SCHED_LOAD_SCALE*SCHED_LOAD_SCALE)
+			tmp = max_load*busiest-cpu_power/this-cpu_power;
+		else
+			tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this-cpu_power;
 		pwr_move += this-cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
 		pwr_move /= SCHED_LOAD_SCALE;
 
-		/* Move if we gain another 8th of a CPU worth of throughput */
-		if (pwr_move  pwr_now + SCHED_LOAD_SCALE / 8)
+		/* Move if we gain throughput */
+		if (pwr_move = pwr_now)
 			goto out_balanced;
 
 		*imbalance = 1;
@@ -1877,7 +1878,7 @@
 	}
 
 	/* Get rid of the scaling factor, rounding down as we divide */
-	*imbalance = (*imbalance + 1) / SCHED_LOAD_SCALE;
+	*imbalance = *imbalance / SCHED_LOAD_SCALE;
 
 	return busiest;