We can now take and release the blkg_array lock within blkg_destroy()
instead of forcing the caller to hold it across the call.

Signed-off-by: Matthew Wilcox <[email protected]>
---
 block/blk-cgroup.c | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index bd6eea0587fb..6962e2fc612d 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -367,7 +367,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        int i;
 
        lockdep_assert_held(&blkg->q->queue_lock);
-       lockdep_assert_held(&blkcg->blkg_array.xa_lock);
 
        /* Something wrong if we are trying to remove same group twice */
        WARN_ON_ONCE(list_empty(&blkg->q_node));
@@ -386,7 +385,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
 
        blkg->online = false;
 
-       __xa_erase(&blkcg->blkg_array, blkg->q->id);
+       xa_erase(&blkcg->blkg_array, blkg->q->id);
        list_del_init(&blkg->q_node);
 
        /*
@@ -416,11 +415,7 @@ static void blkg_destroy_all(struct request_queue *q)
 
        spin_lock_irq(&q->queue_lock);
        list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
-               struct blkcg *blkcg = blkg->blkcg;
-
-               xa_lock(&blkcg->blkg_array);
                blkg_destroy(blkg);
-               xa_unlock(&blkcg->blkg_array);
        }
 
        q->root_blkg = NULL;
@@ -1080,9 +1075,7 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
                struct request_queue *q = blkg->q;
 
                spin_lock_irq(&q->queue_lock);
-               xa_lock(&blkcg->blkg_array);
                blkg_destroy(blkg);
-               xa_unlock(&blkcg->blkg_array);
                spin_unlock_irq(&q->queue_lock);
        }
 }
-- 
2.20.1

Reply via email to