[PATCH 2/6] workqueue: Replace pool->attach_mutex with global wq_pool_attach_mutex

2018-05-16 Thread Tejun Heo
To improve workqueue visibility, we want to be able to access
workqueue information from worker tasks.  The per-pool attach mutex
makes that difficult because there's no way of stabilizing task ->
worker pool association without knowing the pool first.

Worker attach/detach is a slow path and there's no need for different
pools to be able to perform them concurrently.  This patch replaces
the per-pool attach_mutex with global wq_pool_attach_mutex to prepare
for visibility improvement changes.

Signed-off-by: Tejun Heo 
---
 kernel/workqueue.c | 41 -
 1 file changed, 20 insertions(+), 21 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ca7959b..91fe0a6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -66,7 +66,7 @@ enum {
 * be executing on any CPU.  The pool behaves as an unbound one.
 *
 * Note that DISASSOCIATED should be flipped only while holding
-* attach_mutex to avoid changing binding state while
+* wq_pool_attach_mutex to avoid changing binding state while
 * worker_attach_to_pool() is in progress.
 */
POOL_MANAGER_ACTIVE = 1 << 0,   /* being managed */
@@ -123,7 +123,7 @@ enum {
  *cpu or grabbing pool->lock is enough for read access.  If
  *POOL_DISASSOCIATED is set, it's identical to L.
  *
- * A: pool->attach_mutex protected.
+ * A: wq_pool_attach_mutex protected.
  *
  * PL: wq_pool_mutex protected.
  *
@@ -166,7 +166,6 @@ struct worker_pool {
/* L: hash of busy workers */
 
struct worker   *manager;   /* L: purely informational */
-   struct mutexattach_mutex;   /* attach/detach exclusion */
struct list_headworkers;/* A: attached workers */
struct completion   *detach_completion; /* all workers detached */
 
@@ -297,6 +296,7 @@ static bool wq_numa_enabled;/* unbound NUMA 
affinity enabled */
 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 
 static DEFINE_MUTEX(wq_pool_mutex);/* protects pools and workqueues list */
+static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
 static DEFINE_SPINLOCK(wq_mayday_lock);/* protects wq->maydays list */
 static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go 
away */
 
@@ -399,14 +399,14 @@ static void workqueue_sysfs_unregister(struct 
workqueue_struct *wq);
  * @worker: iteration cursor
  * @pool: worker_pool to iterate workers of
  *
- * This must be called with @pool->attach_mutex.
+ * This must be called with wq_pool_attach_mutex.
  *
  * The if/else clause exists only for the lockdep assertion and can be
  * ignored.
  */
 #define for_each_pool_worker(worker, pool) \
list_for_each_entry((worker), &(pool)->workers, node)   \
-   if (({ lockdep_assert_held(>attach_mutex); false; })) { } 
\
+   if (({ lockdep_assert_held(_pool_attach_mutex); false; })) { 
} \
else
 
 /**
@@ -1724,7 +1724,7 @@ static struct worker *alloc_worker(int node)
 static void worker_attach_to_pool(struct worker *worker,
   struct worker_pool *pool)
 {
-   mutex_lock(>attach_mutex);
+   mutex_lock(_pool_attach_mutex);
 
/*
 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
@@ -1733,16 +1733,16 @@ static void worker_attach_to_pool(struct worker *worker,
set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
 
/*
-* The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
-* stable across this function.  See the comments above the
-* flag definition for details.
+* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
+* stable across this function.  See the comments above the flag
+* definition for details.
 */
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
 
list_add_tail(>node, >workers);
 
-   mutex_unlock(>attach_mutex);
+   mutex_unlock(_pool_attach_mutex);
 }
 
 /**
@@ -1759,11 +1759,11 @@ static void worker_detach_from_pool(struct worker 
*worker,
 {
struct completion *detach_completion = NULL;
 
-   mutex_lock(>attach_mutex);
+   mutex_lock(_pool_attach_mutex);
list_del(>node);
if (list_empty(>workers))
detach_completion = pool->detach_completion;
-   mutex_unlock(>attach_mutex);
+   mutex_unlock(_pool_attach_mutex);
 
/* clear leftover flags without pool->lock after it is detached */
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
@@ -3271,7 +3271,6 @@ static int init_worker_pool(struct worker_pool *pool)
 
timer_setup(>mayday_timer, pool_mayday_timeout, 0);
 
-   mutex_init(>attach_mutex);

[PATCH 2/6] workqueue: Replace pool->attach_mutex with global wq_pool_attach_mutex

2018-05-16 Thread Tejun Heo
To improve workqueue visibility, we want to be able to access
workqueue information from worker tasks.  The per-pool attach mutex
makes that difficult because there's no way of stabilizing task ->
worker pool association without knowing the pool first.

Worker attach/detach is a slow path and there's no need for different
pools to be able to perform them concurrently.  This patch replaces
the per-pool attach_mutex with global wq_pool_attach_mutex to prepare
for visibility improvement changes.

Signed-off-by: Tejun Heo 
---
 kernel/workqueue.c | 41 -
 1 file changed, 20 insertions(+), 21 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ca7959b..91fe0a6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -66,7 +66,7 @@ enum {
 * be executing on any CPU.  The pool behaves as an unbound one.
 *
 * Note that DISASSOCIATED should be flipped only while holding
-* attach_mutex to avoid changing binding state while
+* wq_pool_attach_mutex to avoid changing binding state while
 * worker_attach_to_pool() is in progress.
 */
POOL_MANAGER_ACTIVE = 1 << 0,   /* being managed */
@@ -123,7 +123,7 @@ enum {
  *cpu or grabbing pool->lock is enough for read access.  If
  *POOL_DISASSOCIATED is set, it's identical to L.
  *
- * A: pool->attach_mutex protected.
+ * A: wq_pool_attach_mutex protected.
  *
  * PL: wq_pool_mutex protected.
  *
@@ -166,7 +166,6 @@ struct worker_pool {
/* L: hash of busy workers */
 
struct worker   *manager;   /* L: purely informational */
-   struct mutexattach_mutex;   /* attach/detach exclusion */
struct list_headworkers;/* A: attached workers */
struct completion   *detach_completion; /* all workers detached */
 
@@ -297,6 +296,7 @@ static bool wq_numa_enabled;/* unbound NUMA 
affinity enabled */
 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 
 static DEFINE_MUTEX(wq_pool_mutex);/* protects pools and workqueues list */
+static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
 static DEFINE_SPINLOCK(wq_mayday_lock);/* protects wq->maydays list */
 static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go 
away */
 
@@ -399,14 +399,14 @@ static void workqueue_sysfs_unregister(struct 
workqueue_struct *wq);
  * @worker: iteration cursor
  * @pool: worker_pool to iterate workers of
  *
- * This must be called with @pool->attach_mutex.
+ * This must be called with wq_pool_attach_mutex.
  *
  * The if/else clause exists only for the lockdep assertion and can be
  * ignored.
  */
 #define for_each_pool_worker(worker, pool) \
list_for_each_entry((worker), &(pool)->workers, node)   \
-   if (({ lockdep_assert_held(>attach_mutex); false; })) { } 
\
+   if (({ lockdep_assert_held(_pool_attach_mutex); false; })) { 
} \
else
 
 /**
@@ -1724,7 +1724,7 @@ static struct worker *alloc_worker(int node)
 static void worker_attach_to_pool(struct worker *worker,
   struct worker_pool *pool)
 {
-   mutex_lock(>attach_mutex);
+   mutex_lock(_pool_attach_mutex);
 
/*
 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
@@ -1733,16 +1733,16 @@ static void worker_attach_to_pool(struct worker *worker,
set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
 
/*
-* The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
-* stable across this function.  See the comments above the
-* flag definition for details.
+* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
+* stable across this function.  See the comments above the flag
+* definition for details.
 */
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
 
list_add_tail(>node, >workers);
 
-   mutex_unlock(>attach_mutex);
+   mutex_unlock(_pool_attach_mutex);
 }
 
 /**
@@ -1759,11 +1759,11 @@ static void worker_detach_from_pool(struct worker 
*worker,
 {
struct completion *detach_completion = NULL;
 
-   mutex_lock(>attach_mutex);
+   mutex_lock(_pool_attach_mutex);
list_del(>node);
if (list_empty(>workers))
detach_completion = pool->detach_completion;
-   mutex_unlock(>attach_mutex);
+   mutex_unlock(_pool_attach_mutex);
 
/* clear leftover flags without pool->lock after it is detached */
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
@@ -3271,7 +3271,6 @@ static int init_worker_pool(struct worker_pool *pool)
 
timer_setup(>mayday_timer, pool_mayday_timeout, 0);
 
-   mutex_init(>attach_mutex);
INIT_LIST_HEAD(>workers);