Every bio is now associated with a blkg putting blkg_get, blkg_try_get,
and blkg_put on the hot path. Switch over the refcnt in blkg to use
percpu_ref.

Signed-off-by: Dennis Zhou <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Reviewed-by: Josef Bacik <[email protected]>
---
 block/blk-cgroup.c         | 41 ++++++++++++++++++++++++++++++++++++--
 include/linux/blk-cgroup.h | 15 +++++---------
 2 files changed, 44 insertions(+), 12 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 120f2e2835fb..2ca7611fe274 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -81,6 +81,37 @@ static void blkg_free(struct blkcg_gq *blkg)
        kfree(blkg);
 }
 
+static void __blkg_release(struct rcu_head *rcu)
+{
+       struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
+
+       percpu_ref_exit(&blkg->refcnt);
+
+       /* release the blkcg and parent blkg refs this blkg has been holding */
+       css_put(&blkg->blkcg->css);
+       if (blkg->parent)
+               blkg_put(blkg->parent);
+
+       wb_congested_put(blkg->wb_congested);
+
+       blkg_free(blkg);
+}
+
+/*
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid.  For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
+ */
+static void blkg_release(struct percpu_ref *ref)
+{
+       struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
+
+       call_rcu(&blkg->rcu_head, __blkg_release);
+}
+
 /**
  * blkg_alloc - allocate a blkg
  * @blkcg: block cgroup the new blkg is associated with
@@ -107,7 +138,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, 
struct request_queue *q,
        blkg->q = q;
        INIT_LIST_HEAD(&blkg->q_node);
        blkg->blkcg = blkcg;
-       atomic_set(&blkg->refcnt, 1);
 
        for (i = 0; i < BLKCG_MAX_POLS; i++) {
                struct blkcg_policy *pol = blkcg_policy[i];
@@ -207,6 +237,11 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
                blkg_get(blkg->parent);
        }
 
+       ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
+                             GFP_NOWAIT | __GFP_NOWARN);
+       if (ret)
+               goto err_cancel_ref;
+
        /* invoke per-policy init */
        for (i = 0; i < BLKCG_MAX_POLS; i++) {
                struct blkcg_policy *pol = blkcg_policy[i];
@@ -239,6 +274,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
        blkg_put(blkg);
        return ERR_PTR(ret);
 
+err_cancel_ref:
+       percpu_ref_exit(&blkg->refcnt);
 err_put_congested:
        wb_congested_put(wb_congested);
 err_put_css:
@@ -367,7 +404,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
         * Put the reference taken at the time of creation so that when all
         * queues are gone, group can be destroyed.
         */
-       blkg_put(blkg);
+       percpu_ref_kill(&blkg->refcnt);
 }
 
 /**
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 284819a4d122..d19ef15a673d 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -124,7 +124,7 @@ struct blkcg_gq {
        struct blkcg_gq                 *parent;
 
        /* reference count */
-       atomic_t                        refcnt;
+       struct percpu_ref               refcnt;
 
        /* is this blkg online? protected by both blkcg and q locks */
        bool                            online;
@@ -487,8 +487,7 @@ static inline int blkg_path(struct blkcg_gq *blkg, char 
*buf, int buflen)
  */
 static inline void blkg_get(struct blkcg_gq *blkg)
 {
-       WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
-       atomic_inc(&blkg->refcnt);
+       percpu_ref_get(&blkg->refcnt);
 }
 
 /**
@@ -500,7 +499,7 @@ static inline void blkg_get(struct blkcg_gq *blkg)
  */
 static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
 {
-       if (atomic_inc_not_zero(&blkg->refcnt))
+       if (percpu_ref_tryget(&blkg->refcnt))
                return blkg;
        return NULL;
 }
@@ -514,23 +513,19 @@ static inline struct blkcg_gq *blkg_try_get(struct 
blkcg_gq *blkg)
  */
 static inline struct blkcg_gq *blkg_try_get_closest(struct blkcg_gq *blkg)
 {
-       while (!atomic_inc_not_zero(&blkg->refcnt))
+       while (!percpu_ref_tryget(&blkg->refcnt))
                blkg = blkg->parent;
 
        return blkg;
 }
 
-void __blkg_release_rcu(struct rcu_head *rcu);
-
 /**
  * blkg_put - put a blkg reference
  * @blkg: blkg to put
  */
 static inline void blkg_put(struct blkcg_gq *blkg)
 {
-       WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
-       if (atomic_dec_and_test(&blkg->refcnt))
-               call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+       percpu_ref_put(&blkg->refcnt);
 }
 
 /**
-- 
2.17.1

Reply via email to