[PATCH 16/31] workqueue: introduce workqueue_attrs

2013-03-01 Thread Tejun Heo
Introduce struct workqueue_attrs which carries worker attributes -
currently the nice level and allowed cpumask along with helper
routines alloc_workqueue_attrs() and free_workqueue_attrs().

Each worker_pool now carries ->attrs describing the attributes of its
workers.  All functions dealing with cpumask and nice level of workers
are updated to follow worker_pool->attrs instead of determining them
from other characteristics of the worker_pool, and init_workqueues()
is updated to set worker_pool->attrs appropriately for all standard
pools.

Note that create_worker() is updated to always perform set_user_nice()
and use set_cpus_allowed_ptr() combined with manual assertion of
PF_THREAD_BOUND instead of kthread_bind().  This simplifies handling
random attributes without affecting the outcome.

This patch doesn't introduce any behavior changes.

Signed-off-by: Tejun Heo 
---
 include/linux/workqueue.h |  12 ++
 kernel/workqueue.c| 103 --
 2 files changed, 93 insertions(+), 22 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 899be66..2683e8e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -115,6 +115,15 @@ struct delayed_work {
int cpu;
 };
 
+/*
+ * A struct for workqueue attributes.  This can be used to change
+ * attributes of an unbound workqueue.
+ */
+struct workqueue_attrs {
+   int nice;   /* nice level */
+   cpumask_var_t   cpumask;/* allowed CPUs */
+};
+
 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
 {
return container_of(work, struct delayed_work, work);
@@ -399,6 +408,9 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, 
int max_active,
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
+struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
+void free_workqueue_attrs(struct workqueue_attrs *attrs);
+
 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
 extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f97539b..7eba824 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -148,6 +148,8 @@ struct worker_pool {
struct mutexassoc_mutex;/* protect POOL_DISASSOCIATED */
struct ida  worker_ida; /* L: for worker IDs */
 
+   struct workqueue_attrs  *attrs; /* I: worker attributes */
+
/*
 * The current concurrency level.  As it's likely to be accessed
 * from other CPUs during try_to_wake_up(), put it in a separate
@@ -1563,14 +1565,13 @@ __acquires(>lock)
 * against POOL_DISASSOCIATED.
 */
if (!(pool->flags & POOL_DISASSOCIATED))
-   set_cpus_allowed_ptr(current, get_cpu_mask(pool->cpu));
+   set_cpus_allowed_ptr(current, pool->attrs->cpumask);
 
spin_lock_irq(>lock);
if (pool->flags & POOL_DISASSOCIATED)
return false;
if (task_cpu(current) == pool->cpu &&
-   cpumask_equal(>cpus_allowed,
- get_cpu_mask(pool->cpu)))
+   cpumask_equal(>cpus_allowed, pool->attrs->cpumask))
return true;
spin_unlock_irq(>lock);
 
@@ -1677,7 +1678,7 @@ static void rebind_workers(struct worker_pool *pool)
 * wq doesn't really matter but let's keep @worker->pool
 * and @pwq->pool consistent for sanity.
 */
-   if (std_worker_pool_pri(worker->pool))
+   if (worker->pool->attrs->nice < 0)
wq = system_highpri_wq;
else
wq = system_wq;
@@ -1719,7 +1720,7 @@ static struct worker *alloc_worker(void)
  */
 static struct worker *create_worker(struct worker_pool *pool)
 {
-   const char *pri = std_worker_pool_pri(pool) ? "H" : "";
+   const char *pri = pool->attrs->nice < 0  ? "H" : "";
struct worker *worker = NULL;
int id = -1;
 
@@ -1749,24 +1750,23 @@ static struct worker *create_worker(struct worker_pool 
*pool)
if (IS_ERR(worker->task))
goto fail;
 
-   if (std_worker_pool_pri(pool))
-   set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
+   set_user_nice(worker->task, pool->attrs->nice);
+   set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
 
/*
-* Determine CPU binding of the new worker depending on
-* %POOL_DISASSOCIATED.  The caller is responsible for ensuring the
-* flag remains stable across this function.  See the comments
-* above the flag definition for details.
-*
-* As an unbound worker may later become a 

[PATCH 16/31] workqueue: introduce workqueue_attrs

2013-03-01 Thread Tejun Heo
Introduce struct workqueue_attrs which carries worker attributes -
currently the nice level and allowed cpumask along with helper
routines alloc_workqueue_attrs() and free_workqueue_attrs().

Each worker_pool now carries -attrs describing the attributes of its
workers.  All functions dealing with cpumask and nice level of workers
are updated to follow worker_pool-attrs instead of determining them
from other characteristics of the worker_pool, and init_workqueues()
is updated to set worker_pool-attrs appropriately for all standard
pools.

Note that create_worker() is updated to always perform set_user_nice()
and use set_cpus_allowed_ptr() combined with manual assertion of
PF_THREAD_BOUND instead of kthread_bind().  This simplifies handling
random attributes without affecting the outcome.

This patch doesn't introduce any behavior changes.

Signed-off-by: Tejun Heo t...@kernel.org
---
 include/linux/workqueue.h |  12 ++
 kernel/workqueue.c| 103 --
 2 files changed, 93 insertions(+), 22 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 899be66..2683e8e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -115,6 +115,15 @@ struct delayed_work {
int cpu;
 };
 
+/*
+ * A struct for workqueue attributes.  This can be used to change
+ * attributes of an unbound workqueue.
+ */
+struct workqueue_attrs {
+   int nice;   /* nice level */
+   cpumask_var_t   cpumask;/* allowed CPUs */
+};
+
 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
 {
return container_of(work, struct delayed_work, work);
@@ -399,6 +408,9 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, 
int max_active,
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
+struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
+void free_workqueue_attrs(struct workqueue_attrs *attrs);
+
 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
 extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f97539b..7eba824 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -148,6 +148,8 @@ struct worker_pool {
struct mutexassoc_mutex;/* protect POOL_DISASSOCIATED */
struct ida  worker_ida; /* L: for worker IDs */
 
+   struct workqueue_attrs  *attrs; /* I: worker attributes */
+
/*
 * The current concurrency level.  As it's likely to be accessed
 * from other CPUs during try_to_wake_up(), put it in a separate
@@ -1563,14 +1565,13 @@ __acquires(pool-lock)
 * against POOL_DISASSOCIATED.
 */
if (!(pool-flags  POOL_DISASSOCIATED))
-   set_cpus_allowed_ptr(current, get_cpu_mask(pool-cpu));
+   set_cpus_allowed_ptr(current, pool-attrs-cpumask);
 
spin_lock_irq(pool-lock);
if (pool-flags  POOL_DISASSOCIATED)
return false;
if (task_cpu(current) == pool-cpu 
-   cpumask_equal(current-cpus_allowed,
- get_cpu_mask(pool-cpu)))
+   cpumask_equal(current-cpus_allowed, pool-attrs-cpumask))
return true;
spin_unlock_irq(pool-lock);
 
@@ -1677,7 +1678,7 @@ static void rebind_workers(struct worker_pool *pool)
 * wq doesn't really matter but let's keep @worker-pool
 * and @pwq-pool consistent for sanity.
 */
-   if (std_worker_pool_pri(worker-pool))
+   if (worker-pool-attrs-nice  0)
wq = system_highpri_wq;
else
wq = system_wq;
@@ -1719,7 +1720,7 @@ static struct worker *alloc_worker(void)
  */
 static struct worker *create_worker(struct worker_pool *pool)
 {
-   const char *pri = std_worker_pool_pri(pool) ? H : ;
+   const char *pri = pool-attrs-nice  0  ? H : ;
struct worker *worker = NULL;
int id = -1;
 
@@ -1749,24 +1750,23 @@ static struct worker *create_worker(struct worker_pool 
*pool)
if (IS_ERR(worker-task))
goto fail;
 
-   if (std_worker_pool_pri(pool))
-   set_user_nice(worker-task, HIGHPRI_NICE_LEVEL);
+   set_user_nice(worker-task, pool-attrs-nice);
+   set_cpus_allowed_ptr(worker-task, pool-attrs-cpumask);
 
/*
-* Determine CPU binding of the new worker depending on
-* %POOL_DISASSOCIATED.  The caller is responsible for ensuring the
-* flag remains stable across this function.  See the comments
-* above the flag definition for details.
-*
-* As an unbound worker may later become a