The commit is pushed to "branch-rh9-5.14.0-427.92.1.vz9.88.x-ovz" and will 
appear at [email protected]:openvz/vzkernel.git
after rh9-5.14.0-427.92.1.vz9.88.6
------>
commit 4e659676652faba5a9e653f67ed6967e7f403fac
Author: Pavel Tikhomirov <[email protected]>
Date:   Mon Nov 10 14:58:11 2025 +0800

    md: mark dm-ploop and dm-qcow2 targets non-fua
    
    This effectively makes dm layer handle REQ_FUA by itself, by sending
    explicit sync instead of relying that dm-ploop and dm-qcow2 will do
    that. We already had REQ_FUA support in dm-ploop but it looks more
    robust to unload this to general dm code.
    
    Note: The dm-ploop's FUA handling is not reverted in this patch, it just
    becomes unused, we can revert it now.
    
    We don't really have an existing way to force device mapper to switch to
    non-fua mode, so let's add queue_limits->fua field to configure that.
    
    https://virtuozzo.atlassian.net/browse/VSTOR-118628
    Signed-off-by: Pavel Tikhomirov <[email protected]>
    
    Feature: dm-ploop: ploop target driver
    Feature: dm-qcow2: block device over QCOW2 files driver
---
 block/blk-settings.c         | 4 ++++
 drivers/md/dm-ploop-target.c | 2 ++
 drivers/md/dm-qcow2-target.c | 2 ++
 drivers/md/dm-table.c        | 3 ++-
 include/linux/blkdev.h       | 6 ++++++
 5 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/block/blk-settings.c b/block/blk-settings.c
index 4dd59059b788e..9f647bb865ecb 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -34,6 +34,7 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  */
 void blk_set_default_limits(struct queue_limits *lim)
 {
+       lim->fua = BLK_FUA;
        lim->max_segments = BLK_MAX_SEGMENTS;
        lim->max_discard_segments = 1;
        lim->max_integrity_segments = 0;
@@ -555,6 +556,9 @@ int blk_stack_limits(struct queue_limits *t, struct 
queue_limits *b,
 {
        unsigned int top, bottom, alignment, ret = 0;
 
+       if (b->fua == BLK_NO_FUA)
+               t->fua = BLK_NO_FUA;
+
        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
        t->max_dev_sectors = min_not_zero(t->max_dev_sectors, 
b->max_dev_sectors);
diff --git a/drivers/md/dm-ploop-target.c b/drivers/md/dm-ploop-target.c
index ec12c5d2a2c36..1698131aca3e1 100644
--- a/drivers/md/dm-ploop-target.c
+++ b/drivers/md/dm-ploop-target.c
@@ -661,6 +661,8 @@ static void ploop_io_hints(struct dm_target *ti, struct 
queue_limits *limits)
        limits->discard_granularity = CLU_SIZE(ploop);
        limits->discard_alignment = 0;
        limits->discard_misaligned = 0;
+
+       limits->fua = BLK_NO_FUA;
 }
 
 static void ploop_status(struct dm_target *ti, status_type_t type,
diff --git a/drivers/md/dm-qcow2-target.c b/drivers/md/dm-qcow2-target.c
index e4fb1f9359105..fac68db7082d5 100644
--- a/drivers/md/dm-qcow2-target.c
+++ b/drivers/md/dm-qcow2-target.c
@@ -1054,6 +1054,8 @@ static void qcow2_io_hints(struct dm_target *ti, struct 
queue_limits *limits)
 
        blk_limits_io_min(limits, block_size);
        blk_limits_io_opt(limits, qcow2->clu_size);
+
+       limits->fua = BLK_NO_FUA;
 }
 
 static void qcow2_status(struct dm_target *ti, status_type_t type,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 39d6caeeeedb2..3696001919aea 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1962,7 +1962,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct 
request_queue *q,
 
        if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
                wc = true;
-               if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
+               if (limits->fua == BLK_FUA &&
+                   dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
                        fua = true;
        }
        blk_queue_write_cache(q, wc, fua);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3009964707e61..f180283fda895 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -293,8 +293,14 @@ enum blk_bounce {
        BLK_BOUNCE_HIGH,
 };
 
+enum blk_fua {
+       BLK_FUA,
+       BLK_NO_FUA,
+};
+
 struct queue_limits {
        enum blk_bounce         bounce;
+       enum blk_fua            fua;
        unsigned long           seg_boundary_mask;
        unsigned long           virt_boundary_mask;
 
_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to