Re: [PATCH v3 04/17] workqueue: use new hashtable implementation

2012-08-22 Thread Tejun Heo
On Wed, Aug 22, 2012 at 04:26:59AM +0200, Sasha Levin wrote:
> Switch workqueues to use the new hashtable implementation. This reduces the 
> amount of
> generic unrelated code in the workqueues.
> 
> Signed-off-by: Sasha Levin 

Acked-by: Tejun Heo 

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/17] workqueue: use new hashtable implementation

2012-08-22 Thread Tejun Heo
On Wed, Aug 22, 2012 at 04:26:59AM +0200, Sasha Levin wrote:
 Switch workqueues to use the new hashtable implementation. This reduces the 
 amount of
 generic unrelated code in the workqueues.
 
 Signed-off-by: Sasha Levin levinsasha...@gmail.com

Acked-by: Tejun Heo t...@kernel.org

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v3 04/17] workqueue: use new hashtable implementation

2012-08-21 Thread Sasha Levin
Switch workqueues to use the new hashtable implementation. This reduces the 
amount of
generic unrelated code in the workqueues.

Signed-off-by: Sasha Levin 
---
 kernel/workqueue.c |   86 +---
 1 files changed, 15 insertions(+), 71 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 11723c5..fca751e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "workqueue_sched.h"
 
@@ -82,8 +83,6 @@ enum {
NR_WORKER_POOLS = 2,/* # worker pools per gcwq */
 
BUSY_WORKER_HASH_ORDER  = 6,/* 64 pointers */
-   BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
-   BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
 
MAX_IDLE_WORKERS_RATIO  = 4,/* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
@@ -180,7 +179,7 @@ struct global_cwq {
unsigned intflags;  /* L: GCWQ_* flags */
 
/* workers are chained either in busy_hash or pool idle_list */
-   struct hlist_head   busy_hash[BUSY_WORKER_HASH_SIZE];
+   DEFINE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
/* L: hash of busy workers */
 
struct worker_pool  pools[2];   /* normal and highpri pools */
@@ -288,8 +287,7 @@ EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
 
 #define for_each_busy_worker(worker, i, pos, gcwq) \
-   for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
-   hlist_for_each_entry(worker, pos, >busy_hash[i], hentry)
+   hash_for_each(gcwq->busy_hash, i, pos, worker, hentry)
 
 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
  unsigned int sw)
@@ -845,63 +843,6 @@ static inline void worker_clr_flags(struct worker *worker, 
unsigned int flags)
 }
 
 /**
- * busy_worker_head - return the busy hash head for a work
- * @gcwq: gcwq of interest
- * @work: work to be hashed
- *
- * Return hash head of @gcwq for @work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to the hash head.
- */
-static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
-  struct work_struct *work)
-{
-   const int base_shift = ilog2(sizeof(struct work_struct));
-   unsigned long v = (unsigned long)work;
-
-   /* simple shift and fold hash, do we need something better? */
-   v >>= base_shift;
-   v += v >> BUSY_WORKER_HASH_ORDER;
-   v &= BUSY_WORKER_HASH_MASK;
-
-   return >busy_hash[v];
-}
-
-/**
- * __find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @bwh: hash head as returned by busy_worker_head()
- * @work: work to find worker for
- *
- * Find a worker which is executing @work on @gcwq.  @bwh should be
- * the hash head obtained by calling busy_worker_head() with the same
- * work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to worker which is executing @work if found, NULL
- * otherwise.
- */
-static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
-  struct hlist_head *bwh,
-  struct work_struct *work)
-{
-   struct worker *worker;
-   struct hlist_node *tmp;
-
-   hlist_for_each_entry(worker, tmp, bwh, hentry)
-   if (worker->current_work == work)
-   return worker;
-   return NULL;
-}
-
-/**
  * find_worker_executing_work - find worker which is executing a work
  * @gcwq: gcwq of interest
  * @work: work to find worker for
@@ -920,8 +861,14 @@ static struct worker *__find_worker_executing_work(struct 
global_cwq *gcwq,
 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
 struct work_struct *work)
 {
-   return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
-   work);
+   struct worker *worker;
+   struct hlist_node *tmp;
+
+   hash_for_each_possible(gcwq->busy_hash, worker, tmp, hentry, (unsigned 
long)work)
+   if (worker->current_work == work)
+   return worker;
+
+   return NULL;
 }
 
 /**
@@ -2120,7 +2067,6 @@ __acquires(>lock)
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct worker_pool *pool = worker->pool;
struct global_cwq *gcwq = pool->gcwq;
-   struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
work_func_t f = work->func;
int work_color;
@@ -2152,7 +2098,7 @@ 

[PATCH v3 04/17] workqueue: use new hashtable implementation

2012-08-21 Thread Sasha Levin
Switch workqueues to use the new hashtable implementation. This reduces the 
amount of
generic unrelated code in the workqueues.

Signed-off-by: Sasha Levin levinsasha...@gmail.com
---
 kernel/workqueue.c |   86 +---
 1 files changed, 15 insertions(+), 71 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 11723c5..fca751e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
 #include linux/debug_locks.h
 #include linux/lockdep.h
 #include linux/idr.h
+#include linux/hashtable.h
 
 #include workqueue_sched.h
 
@@ -82,8 +83,6 @@ enum {
NR_WORKER_POOLS = 2,/* # worker pools per gcwq */
 
BUSY_WORKER_HASH_ORDER  = 6,/* 64 pointers */
-   BUSY_WORKER_HASH_SIZE   = 1  BUSY_WORKER_HASH_ORDER,
-   BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
 
MAX_IDLE_WORKERS_RATIO  = 4,/* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
@@ -180,7 +179,7 @@ struct global_cwq {
unsigned intflags;  /* L: GCWQ_* flags */
 
/* workers are chained either in busy_hash or pool idle_list */
-   struct hlist_head   busy_hash[BUSY_WORKER_HASH_SIZE];
+   DEFINE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
/* L: hash of busy workers */
 
struct worker_pool  pools[2];   /* normal and highpri pools */
@@ -288,8 +287,7 @@ EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 (pool)  (gcwq)-pools[NR_WORKER_POOLS]; (pool)++)
 
 #define for_each_busy_worker(worker, i, pos, gcwq) \
-   for (i = 0; i  BUSY_WORKER_HASH_SIZE; i++) \
-   hlist_for_each_entry(worker, pos, gcwq-busy_hash[i], hentry)
+   hash_for_each(gcwq-busy_hash, i, pos, worker, hentry)
 
 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
  unsigned int sw)
@@ -845,63 +843,6 @@ static inline void worker_clr_flags(struct worker *worker, 
unsigned int flags)
 }
 
 /**
- * busy_worker_head - return the busy hash head for a work
- * @gcwq: gcwq of interest
- * @work: work to be hashed
- *
- * Return hash head of @gcwq for @work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq-lock).
- *
- * RETURNS:
- * Pointer to the hash head.
- */
-static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
-  struct work_struct *work)
-{
-   const int base_shift = ilog2(sizeof(struct work_struct));
-   unsigned long v = (unsigned long)work;
-
-   /* simple shift and fold hash, do we need something better? */
-   v = base_shift;
-   v += v  BUSY_WORKER_HASH_ORDER;
-   v = BUSY_WORKER_HASH_MASK;
-
-   return gcwq-busy_hash[v];
-}
-
-/**
- * __find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @bwh: hash head as returned by busy_worker_head()
- * @work: work to find worker for
- *
- * Find a worker which is executing @work on @gcwq.  @bwh should be
- * the hash head obtained by calling busy_worker_head() with the same
- * work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq-lock).
- *
- * RETURNS:
- * Pointer to worker which is executing @work if found, NULL
- * otherwise.
- */
-static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
-  struct hlist_head *bwh,
-  struct work_struct *work)
-{
-   struct worker *worker;
-   struct hlist_node *tmp;
-
-   hlist_for_each_entry(worker, tmp, bwh, hentry)
-   if (worker-current_work == work)
-   return worker;
-   return NULL;
-}
-
-/**
  * find_worker_executing_work - find worker which is executing a work
  * @gcwq: gcwq of interest
  * @work: work to find worker for
@@ -920,8 +861,14 @@ static struct worker *__find_worker_executing_work(struct 
global_cwq *gcwq,
 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
 struct work_struct *work)
 {
-   return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
-   work);
+   struct worker *worker;
+   struct hlist_node *tmp;
+
+   hash_for_each_possible(gcwq-busy_hash, worker, tmp, hentry, (unsigned 
long)work)
+   if (worker-current_work == work)
+   return worker;
+
+   return NULL;
 }
 
 /**
@@ -2120,7 +2067,6 @@ __acquires(gcwq-lock)
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct worker_pool *pool = worker-pool;
struct global_cwq *gcwq = pool-gcwq;
-   struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq-wq-flags  WQ_CPU_INTENSIVE;