commit: 6b9d9c40593cd66b2df8055d546f7697b2cc8210 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Sat Mar 11 11:18:49 2023 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Sat Mar 11 11:18:49 2023 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6b9d9c40
Linux patch 6.2.4 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 ++ 1003_linux-6.2.4.patch | 123 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+) diff --git a/0000_README b/0000_README index 57b81d70..f3f521f1 100644 --- a/0000_README +++ b/0000_README @@ -55,6 +55,10 @@ Patch: 1002_linux-6.2.3.patch From: https://www.kernel.org Desc: Linux 6.2.3 +Patch: 1003_linux-6.2.4.patch +From: https://www.kernel.org +Desc: Linux 6.2.4 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1003_linux-6.2.4.patch b/1003_linux-6.2.4.patch new file mode 100644 index 00000000..e8c12ad2 --- /dev/null +++ b/1003_linux-6.2.4.patch @@ -0,0 +1,123 @@ +diff --git a/Makefile b/Makefile +index eef164b4172a9..83cbbc3adbb12 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 2 +-SUBLEVEL = 3 ++SUBLEVEL = 4 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index 45881f8c79130..9ac1efb053e08 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -118,32 +118,14 @@ static void blkg_free_workfn(struct work_struct *work) + { + struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, + free_work); +- struct request_queue *q = blkg->q; + int i; + +- /* +- * pd_free_fn() can also be called from blkcg_deactivate_policy(), +- * in order to make sure pd_free_fn() is called in order, the deletion +- * of the list blkg->q_node is delayed to here from blkg_destroy(), and +- * blkcg_mutex is used to synchronize blkg_free_workfn() and +- * blkcg_deactivate_policy(). +- */ +- if (q) +- mutex_lock(&q->blkcg_mutex); +- + for (i = 0; i < BLKCG_MAX_POLS; i++) + if (blkg->pd[i]) + blkcg_policy[i]->pd_free_fn(blkg->pd[i]); + +- if (blkg->parent) +- blkg_put(blkg->parent); +- +- if (q) { +- list_del_init(&blkg->q_node); +- mutex_unlock(&q->blkcg_mutex); +- blk_put_queue(q); +- } +- ++ if (blkg->q) ++ blk_put_queue(blkg->q); + free_percpu(blkg->iostat_cpu); + percpu_ref_exit(&blkg->refcnt); + kfree(blkg); +@@ -176,6 +158,8 @@ static void __blkg_release(struct rcu_head *rcu) + + /* release the blkcg and parent blkg refs this blkg has been holding */ + css_put(&blkg->blkcg->css); ++ if (blkg->parent) ++ blkg_put(blkg->parent); + blkg_free(blkg); + } + +@@ -474,14 +458,9 @@ static void blkg_destroy(struct blkcg_gq *blkg) + lockdep_assert_held(&blkg->q->queue_lock); + lockdep_assert_held(&blkcg->lock); + +- /* +- * blkg stays on the queue list until blkg_free_workfn(), see details in +- * blkg_free_workfn(), hence this function can be called from +- * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before +- * blkg_free_workfn(). +- */ +- if (hlist_unhashed(&blkg->blkcg_node)) +- return; ++ /* Something wrong if we are trying to remove same group twice */ ++ WARN_ON_ONCE(list_empty(&blkg->q_node)); ++ WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); + + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; +@@ -493,6 +472,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) + blkg->online = false; + + radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); ++ list_del_init(&blkg->q_node); + hlist_del_init_rcu(&blkg->blkcg_node); + + /* +@@ -1293,7 +1273,6 @@ int blkcg_init_disk(struct gendisk *disk) + int ret; + + INIT_LIST_HEAD(&q->blkg_list); +- mutex_init(&q->blkcg_mutex); + + new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); + if (!new_blkg) +@@ -1531,7 +1510,6 @@ void blkcg_deactivate_policy(struct request_queue *q, + if (queue_is_mq(q)) + blk_mq_freeze_queue(q); + +- mutex_lock(&q->blkcg_mutex); + spin_lock_irq(&q->queue_lock); + + __clear_bit(pol->plid, q->blkcg_pols); +@@ -1550,7 +1528,6 @@ void blkcg_deactivate_policy(struct request_queue *q, + } + + spin_unlock_irq(&q->queue_lock); +- mutex_unlock(&q->blkcg_mutex); + + if (queue_is_mq(q)) + blk_mq_unfreeze_queue(q); +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 10ee92db680c9..43d4e073b1115 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -484,7 +484,6 @@ struct request_queue { + DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); + struct blkcg_gq *root_blkg; + struct list_head blkg_list; +- struct mutex blkcg_mutex; + #endif + + struct queue_limits limits;
