The smatch checker reports a “scheduler in atomic context” problem in
the following call chain:

miss_read_end_req()
   -> cache_seg_put()
      -> cache_seg_invalidate()
         -> cache_seg_gen_increase()
            -> mutex_lock(&cache_seg->ctrl_lock);

In practice, this `mutex_lock` will not actually schedule, because it is
only called when `cache_seg_put()` drops the last reference, which is
single-threaded. That is also why the issue never shows up during real
testing.

However, the code is still buggy. The original purpose of `ctrl_lock`
was to prevent read/write conflicts on the cache segment control
information. Looking at the current usage, all control information
accesses are single-threaded: reads only occur during the init phase,
where no conflicts are possible, and writes happen once in the init
phase (also single-threaded) and once when `cache_seg_put()` drops the
last reference (again single-threaded).

Therefore, this patch removes `ctrl_lock` entirely and adds comments in
the appropriate places to document this logic.

Signed-off-by: Dongsheng Yang <dongsheng.y...@linux.dev>
---
 drivers/md/dm-pcache/cache.h         |  1 -
 drivers/md/dm-pcache/cache_segment.c | 22 +++++++++++++++++-----
 2 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/drivers/md/dm-pcache/cache.h b/drivers/md/dm-pcache/cache.h
index f005c9d9a7aa..1136d86958c8 100644
--- a/drivers/md/dm-pcache/cache.h
+++ b/drivers/md/dm-pcache/cache.h
@@ -90,7 +90,6 @@ struct pcache_cache_segment {
        u32                     gen_index;
 
        struct pcache_cache_seg_ctrl *cache_seg_ctrl;
-       struct mutex            ctrl_lock;
 };
 
 /* rbtree for cache entries */
diff --git a/drivers/md/dm-pcache/cache_segment.c 
b/drivers/md/dm-pcache/cache_segment.c
index 8f9534ae04c3..f0b58980806e 100644
--- a/drivers/md/dm-pcache/cache_segment.c
+++ b/drivers/md/dm-pcache/cache_segment.c
@@ -72,7 +72,6 @@ static int cache_seg_ctrl_load(struct pcache_cache_segment 
*cache_seg)
        struct pcache_cache_seg_gen cache_seg_gen, *cache_seg_gen_addr;
        int ret = 0;
 
-       mutex_lock(&cache_seg->ctrl_lock);
        cache_seg_gen_addr = 
pcache_meta_find_latest(&cache_seg_ctrl->gen->header,
                                             sizeof(struct 
pcache_cache_seg_gen),
                                             sizeof(struct 
pcache_cache_seg_gen),
@@ -93,7 +92,6 @@ static int cache_seg_ctrl_load(struct pcache_cache_segment 
*cache_seg)
        cache_seg->gen_seq = cache_seg_gen.header.seq;
        cache_seg->gen_index = (cache_seg_gen_addr - cache_seg_ctrl->gen);
 out:
-       mutex_unlock(&cache_seg->ctrl_lock);
 
        return ret;
 }
@@ -105,11 +103,27 @@ static inline struct pcache_cache_seg_gen 
*get_cache_seg_gen_addr(struct pcache_
        return (cache_seg_ctrl->gen + cache_seg->gen_index);
 }
 
+/*
+ * cache_seg_ctrl_write - write cache segment control information
+ * @seg: the cache segment to update
+ *
+ * This function writes the control information of a cache segment to media.
+ *
+ * Although this updates shared control data, we intentionally do not use
+ * any locking here.  All accesses to control information are single-threaded:
+ *
+ *   - All reads occur during the init phase, where no concurrent writes
+ *     can happen.
+ *   - Writes happen once during init and once when the last reference
+ *     to the segment is dropped in cache_seg_put().
+ *
+ * Both cases are guaranteed to be single-threaded, so there is no risk
+ * of concurrent read/write races.
+ */
 static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg)
 {
        struct pcache_cache_seg_gen cache_seg_gen;
 
-       mutex_lock(&cache_seg->ctrl_lock);
        cache_seg_gen.gen = cache_seg->gen;
        cache_seg_gen.header.seq = ++cache_seg->gen_seq;
        cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header,
@@ -119,7 +133,6 @@ static void cache_seg_ctrl_write(struct 
pcache_cache_segment *cache_seg)
        pmem_wmb();
 
        cache_seg->gen_index = (cache_seg->gen_index + 1) % 
PCACHE_META_INDEX_MAX;
-       mutex_unlock(&cache_seg->ctrl_lock);
 }
 
 static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg)
@@ -177,7 +190,6 @@ int cache_seg_init(struct pcache_cache *cache, u32 seg_id, 
u32 cache_seg_id,
        spin_lock_init(&cache_seg->gen_lock);
        atomic_set(&cache_seg->refs, 0);
        mutex_init(&cache_seg->info_lock);
-       mutex_init(&cache_seg->ctrl_lock);
 
        /* init pcache_segment */
        seg_options.type = PCACHE_SEGMENT_TYPE_CACHE_DATA;
-- 
2.43.0


Reply via email to