For stacking atomic writes, ensure that the start sector is aligned with
the device atomic write unit min and any boundary. Otherwise, we may
permit misaligned atomic writes.

Rework bdev_can_atomic_write() into a common helper to resuse the
alignment check. There also use atomic_write_hw_unit_min, which is more
proper (than atomic_write_unit_min).

Fixes: d7f36dc446e89 ("block: Support atomic writes limits for stacked devices")
Signed-off-by: John Garry <john.g.ga...@oracle.com>
---
 block/blk-settings.c   |  7 +++++--
 include/linux/blkdev.h | 21 ++++++++++++---------
 2 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/block/blk-settings.c b/block/blk-settings.c
index 8f09e33f41f6..a8dd5c097b8a 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -584,7 +584,7 @@ static bool blk_stack_atomic_writes_head(struct 
queue_limits *t,
 }
 
 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
-                               struct queue_limits *b)
+                               struct queue_limits *b, sector_t start)
 {
        if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED))
                goto unsupported;
@@ -592,6 +592,9 @@ static void blk_stack_atomic_writes_limits(struct 
queue_limits *t,
        if (!b->atomic_write_unit_min)
                goto unsupported;
 
+       if (!blk_atomic_write_start_sect_aligned(start, b))
+               goto unsupported;
+
        /*
         * If atomic_write_hw_max is set, we have already stacked 1x bottom
         * device, so check for compliance.
@@ -774,7 +777,7 @@ int blk_stack_limits(struct queue_limits *t, struct 
queue_limits *b,
                t->zone_write_granularity = 0;
                t->max_zone_append_sectors = 0;
        }
-       blk_stack_atomic_writes_limits(t, b);
+       blk_stack_atomic_writes_limits(t, b, start);
 
        return ret;
 }
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 378d3a1a22fc..b9776d469781 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1699,6 +1699,15 @@ struct io_comp_batch {
        void (*complete)(struct io_comp_batch *);
 };
 
+static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
+                                               struct queue_limits *limits)
+{
+       unsigned int alignment = max(limits->atomic_write_hw_unit_min,
+                               limits->atomic_write_hw_boundary);
+
+       return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
+}
+
 static inline bool bdev_can_atomic_write(struct block_device *bdev)
 {
        struct request_queue *bd_queue = bdev->bd_queue;
@@ -1707,15 +1716,9 @@ static inline bool bdev_can_atomic_write(struct 
block_device *bdev)
        if (!limits->atomic_write_unit_min)
                return false;
 
-       if (bdev_is_partition(bdev)) {
-               sector_t bd_start_sect = bdev->bd_start_sect;
-               unsigned int alignment =
-                       max(limits->atomic_write_unit_min,
-                           limits->atomic_write_hw_boundary);
-
-               if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT))
-                       return false;
-       }
+       if (bdev_is_partition(bdev))
+               return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
+                                                       limits);
 
        return true;
 }
-- 
2.31.1


Reply via email to