From: Peter W.Morreale <[EMAIL PROTECTED]>

In wakeup_next_waiter(), we take the pi_lock, and then find out whether
we have another waiter to add to the pending owner.  We can reduce
contention on the pi_lock for the pending owner if we first obtain the
pointer to the next waiter outside of the pi_lock.

This patch adds a measureable increase in throughput.

Signed-off-by: Peter W. Morreale <[EMAIL PROTECTED]>
---

 kernel/rtmutex.c |   14 +++++++++-----
 1 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index ea593e0..b81bbef 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -526,6 +526,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock, int 
savestate)
 {
        struct rt_mutex_waiter *waiter;
        struct task_struct *pendowner;
+       struct rt_mutex_waiter *next;
 
        spin_lock(&current->pi_lock);
 
@@ -587,6 +588,12 @@ static void wakeup_next_waiter(struct rt_mutex *lock, int 
savestate)
         * waiter with higher priority than pending-owner->normal_prio
         * is blocked on the unboosted (pending) owner.
         */
+
+       if (rt_mutex_has_waiters(lock))
+               next = rt_mutex_top_waiter(lock);
+       else
+               next = NULL;
+
        spin_lock(&pendowner->pi_lock);
 
        WARN_ON(!pendowner->pi_blocked_on);
@@ -595,12 +602,9 @@ static void wakeup_next_waiter(struct rt_mutex *lock, int 
savestate)
 
        pendowner->pi_blocked_on = NULL;
 
-       if (rt_mutex_has_waiters(lock)) {
-               struct rt_mutex_waiter *next;
-
-               next = rt_mutex_top_waiter(lock);
+       if (next)
                plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
-       }
+
        spin_unlock(&pendowner->pi_lock);
 }
 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to