On 2015年01月06日 06:28, Paul Gortmaker wrote:
On 14-12-19 03:43 AM, Zumeng Chen wrote:
From: Lai Jiangshan <[email protected]>

worker_idr is highly bound to managers and is always/only accessed in manager
lock context. So we don't need pool->lock for it.

Signed-off-by: Lai Jiangshan <[email protected]>
Signed-off-by: Tejun Heo <[email protected]>
(cherry picked from commit 9625ab1727743f6a164df26b7b1eeeced7380b42)
---
Even though "git cherry-pick -x <ID>" puts the upstream commit ID
in the place shown here, I think you should get in the habit of
what is done universally elsewhere for nearly all kernel patches,
and put:

   commit 9625ab1727743f6a164df26b7b1eeeced7380b42 upstream.

as the 1st line of the long log.  Then the fact that it is already
upstream is immediately obvious and not buried in a sea of SOB
lines.  Which I note, your SOB line seems to be missing from all
the ones I briefly looked at as well, so that should be fixed too.

OK, thanks Paul. I'll do it if these patches are still no in our tree :)

Cheers,
Zumeng


Paul.
--

  kernel/workqueue.c |   34 ++++++----------------------------
  1 file changed, 6 insertions(+), 28 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b4defde..701f2d2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -124,8 +124,7 @@ enum {
   *    cpu or grabbing pool->lock is enough for read access.  If
   *    POOL_DISASSOCIATED is set, it's identical to L.
   *
- * MG: pool->manager_mutex and pool->lock protected.  Writes require both
- *     locks.  Reads can happen under either lock.
+ * M: pool->manager_mutex protected.
   *
   * PL: wq_pool_mutex protected.
   *
@@ -164,7 +163,7 @@ struct worker_pool {
        /* see manage_workers() for details on the two manager mutexes */
        struct mutex            manager_arb;    /* manager arbitration */
        struct mutex            manager_mutex;  /* manager exclusion */
-       struct idr              worker_idr;     /* MG: worker IDs and iteration 
*/
+       struct idr              worker_idr;     /* M: worker IDs and iteration 
*/
struct workqueue_attrs *attrs; /* I: worker attributes */
        struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
@@ -340,16 +339,6 @@ static void copy_workqueue_attrs(struct workqueue_attrs 
*to,
                           lockdep_is_held(&wq->mutex),                  \
                           "sched RCU or wq->mutex should be held")
-#ifdef CONFIG_LOCKDEP
-#define assert_manager_or_pool_lock(pool)                              \
-       WARN_ONCE(debug_locks &&                                        \
-                 !lockdep_is_held(&(pool)->manager_mutex) &&            \
-                 !lockdep_is_held(&(pool)->lock),                       \
-                 "pool->manager_mutex or ->lock should be held")
-#else
-#define assert_manager_or_pool_lock(pool)      do { } while (0)
-#endif
-
  #define for_each_cpu_worker_pool(pool, cpu)                           \
        for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];           \
             (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
@@ -378,14 +367,14 @@ static void copy_workqueue_attrs(struct workqueue_attrs 
*to,
   * @wi: integer used for iteration
   * @pool: worker_pool to iterate workers of
   *
- * This must be called with either @pool->manager_mutex or ->lock held.
+ * This must be called with @pool->manager_mutex.
   *
   * The if/else clause exists only for the lockdep assertion and can be
   * ignored.
   */
  #define for_each_pool_worker(worker, wi, pool)                                
\
        idr_for_each_entry(&(pool)->worker_idr, (worker), (wi))          \
-               if (({ assert_manager_or_pool_lock((pool)); false; })) { } \
+               if (({ lockdep_assert_held(&pool->manager_mutex); false; })) { 
} \
                else
/**
@@ -1718,13 +1707,7 @@ static struct worker *create_worker(struct worker_pool 
*pool)
         * ID is needed to determine kthread name.  Allocate ID first
         * without installing the pointer.
         */
-       idr_preload(GFP_KERNEL);
-       spin_lock_irq(&pool->lock);
-
-       id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT);
-
-       spin_unlock_irq(&pool->lock);
-       idr_preload_end();
+       id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_KERNEL);
        if (id < 0)
                goto fail;
@@ -1766,18 +1749,13 @@ static struct worker *create_worker(struct worker_pool *pool)
                worker->flags |= WORKER_UNBOUND;
/* successful, commit the pointer to idr */
-       spin_lock_irq(&pool->lock);
        idr_replace(&pool->worker_idr, worker, worker->id);
-       spin_unlock_irq(&pool->lock);
return worker; fail:
-       if (id >= 0) {
-               spin_lock_irq(&pool->lock);
+       if (id >= 0)
                idr_remove(&pool->worker_idr, id);
-               spin_unlock_irq(&pool->lock);
-       }
        kfree(worker);
        return NULL;
  }


--
_______________________________________________
linux-yocto mailing list
[email protected]
https://lists.yoctoproject.org/listinfo/linux-yocto

Reply via email to