[PATCH wq/for-3.6-fixes 2/3] workqueue: rename rebind_workers() to gcwq_associate() and let it handle locking and DISASSOCIATED clearing

2012-09-06 Thread Tejun Heo
>From 0150a04271dbcc9abbb2575911fa1d72d40451bf Mon Sep 17 00:00:00 2001
From: Tejun Heo 
Date: Thu, 6 Sep 2012 12:50:40 -0700

CPU_ONLINE used to handle locking and clearing of DISASSOCIATED and
rebind_workers() just the rebinding.  This patch renames the function
to gcwq_associate() and let it handle the whole onlining.  This is for
the scheduled fix of a subtle idle worker depletion issue during
CPU_ONLINE.

Note that this removes the unnecessary relock at the end of
gcwq_associate().

This patch doesn't introduce any functional difference.

Signed-off-by: Tejun Heo 
---
 kernel/workqueue.c |   29 ++---
 1 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 63ede1f..b19170b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -480,6 +480,8 @@ static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
 };
 
 static int worker_thread(void *__worker);
+static void gcwq_claim_management(struct global_cwq *gcwq);
+static void gcwq_release_management(struct global_cwq *gcwq);
 
 static int worker_pool_pri(struct worker_pool *pool)
 {
@@ -1355,11 +1357,11 @@ static void busy_worker_rebind_fn(struct work_struct 
*work)
 }
 
 /**
- * rebind_workers - rebind all workers of a gcwq to the associated CPU
+ * gcwq_associate - (re)associate a gcwq to its CPU and rebind its workers
  * @gcwq: gcwq of interest
  *
- * @gcwq->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
- * is different for idle and busy ones.
+ * @gcwq->cpu is coming online.  Clear %GCWQ_DISASSOCIATED and rebind all
+ * workers to the CPU.  Rebinding is different for idle and busy ones.
  *
  * The idle ones should be rebound synchronously and idle rebinding should
  * be complete before any worker starts executing work items with
@@ -1378,8 +1380,7 @@ static void busy_worker_rebind_fn(struct work_struct 
*work)
  * On return, all workers are guaranteed to either be bound or have rebind
  * work item scheduled.
  */
-static void rebind_workers(struct global_cwq *gcwq)
-   __releases(>lock) __acquires(>lock)
+static void gcwq_associate(struct global_cwq *gcwq)
 {
struct idle_rebind idle_rebind;
struct worker_pool *pool;
@@ -1387,10 +1388,10 @@ static void rebind_workers(struct global_cwq *gcwq)
struct hlist_node *pos;
int i;
 
-   lockdep_assert_held(>lock);
+   gcwq_claim_management(gcwq);
+   spin_lock_irq(>lock);
 
-   for_each_worker_pool(pool, gcwq)
-   lockdep_assert_held(>manager_mutex);
+   gcwq->flags &= ~GCWQ_DISASSOCIATED;
 
/*
 * Rebind idle workers.  Interlocked both ways.  We wait for
@@ -1477,8 +1478,11 @@ retry:
if (--idle_rebind.cnt) {
spin_unlock_irq(>lock);
wait_for_completion(_rebind.done);
-   spin_lock_irq(>lock);
+   } else {
+   spin_unlock_irq(>lock);
}
+
+   gcwq_release_management(gcwq);
 }
 
 static struct worker *alloc_worker(void)
@@ -3496,12 +3500,7 @@ static int __devinit workqueue_cpu_up_callback(struct 
notifier_block *nfb,
 
case CPU_DOWN_FAILED:
case CPU_ONLINE:
-   gcwq_claim_management(gcwq);
-   spin_lock_irq(>lock);
-   gcwq->flags &= ~GCWQ_DISASSOCIATED;
-   rebind_workers(gcwq);
-   spin_unlock_irq(>lock);
-   gcwq_release_management(gcwq);
+   gcwq_associate(gcwq);
break;
}
return NOTIFY_OK;
-- 
1.7.7.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH wq/for-3.6-fixes 2/3] workqueue: rename rebind_workers() to gcwq_associate() and let it handle locking and DISASSOCIATED clearing

2012-09-06 Thread Tejun Heo
From 0150a04271dbcc9abbb2575911fa1d72d40451bf Mon Sep 17 00:00:00 2001
From: Tejun Heo t...@kernel.org
Date: Thu, 6 Sep 2012 12:50:40 -0700

CPU_ONLINE used to handle locking and clearing of DISASSOCIATED and
rebind_workers() just the rebinding.  This patch renames the function
to gcwq_associate() and let it handle the whole onlining.  This is for
the scheduled fix of a subtle idle worker depletion issue during
CPU_ONLINE.

Note that this removes the unnecessary relock at the end of
gcwq_associate().

This patch doesn't introduce any functional difference.

Signed-off-by: Tejun Heo t...@kernel.org
---
 kernel/workqueue.c |   29 ++---
 1 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 63ede1f..b19170b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -480,6 +480,8 @@ static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
 };
 
 static int worker_thread(void *__worker);
+static void gcwq_claim_management(struct global_cwq *gcwq);
+static void gcwq_release_management(struct global_cwq *gcwq);
 
 static int worker_pool_pri(struct worker_pool *pool)
 {
@@ -1355,11 +1357,11 @@ static void busy_worker_rebind_fn(struct work_struct 
*work)
 }
 
 /**
- * rebind_workers - rebind all workers of a gcwq to the associated CPU
+ * gcwq_associate - (re)associate a gcwq to its CPU and rebind its workers
  * @gcwq: gcwq of interest
  *
- * @gcwq-cpu is coming online.  Rebind all workers to the CPU.  Rebinding
- * is different for idle and busy ones.
+ * @gcwq-cpu is coming online.  Clear %GCWQ_DISASSOCIATED and rebind all
+ * workers to the CPU.  Rebinding is different for idle and busy ones.
  *
  * The idle ones should be rebound synchronously and idle rebinding should
  * be complete before any worker starts executing work items with
@@ -1378,8 +1380,7 @@ static void busy_worker_rebind_fn(struct work_struct 
*work)
  * On return, all workers are guaranteed to either be bound or have rebind
  * work item scheduled.
  */
-static void rebind_workers(struct global_cwq *gcwq)
-   __releases(gcwq-lock) __acquires(gcwq-lock)
+static void gcwq_associate(struct global_cwq *gcwq)
 {
struct idle_rebind idle_rebind;
struct worker_pool *pool;
@@ -1387,10 +1388,10 @@ static void rebind_workers(struct global_cwq *gcwq)
struct hlist_node *pos;
int i;
 
-   lockdep_assert_held(gcwq-lock);
+   gcwq_claim_management(gcwq);
+   spin_lock_irq(gcwq-lock);
 
-   for_each_worker_pool(pool, gcwq)
-   lockdep_assert_held(pool-manager_mutex);
+   gcwq-flags = ~GCWQ_DISASSOCIATED;
 
/*
 * Rebind idle workers.  Interlocked both ways.  We wait for
@@ -1477,8 +1478,11 @@ retry:
if (--idle_rebind.cnt) {
spin_unlock_irq(gcwq-lock);
wait_for_completion(idle_rebind.done);
-   spin_lock_irq(gcwq-lock);
+   } else {
+   spin_unlock_irq(gcwq-lock);
}
+
+   gcwq_release_management(gcwq);
 }
 
 static struct worker *alloc_worker(void)
@@ -3496,12 +3500,7 @@ static int __devinit workqueue_cpu_up_callback(struct 
notifier_block *nfb,
 
case CPU_DOWN_FAILED:
case CPU_ONLINE:
-   gcwq_claim_management(gcwq);
-   spin_lock_irq(gcwq-lock);
-   gcwq-flags = ~GCWQ_DISASSOCIATED;
-   rebind_workers(gcwq);
-   spin_unlock_irq(gcwq-lock);
-   gcwq_release_management(gcwq);
+   gcwq_associate(gcwq);
break;
}
return NOTIFY_OK;
-- 
1.7.7.3

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/