The dm-writecache reads metadata in the target constructor. However, when 
we reload the target, there could be another active instance running on 
the same device. This is the sequence of operations when doing a reload:

1. construct new target
2. suspend old target
3. resume new target
4. destroy old target

Metadata that were written by the old target between steps 1 and 2 would
not be visible by the new target.

This patch fixes the data corruption by loading the metadata in the resume
handler.

Signed-off-by: Mikulas Patocka <[email protected]>
Cc: [email protected]      # v4.18+
Fixes: 48debafe4f2f ("dm: add writecache target")

---
 drivers/md/dm-writecache.c |   44 ++++++++++++++++++++++++++++++--------------
 1 file changed, 30 insertions(+), 14 deletions(-)

Index: linux-2.6/drivers/md/dm-writecache.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-writecache.c   2020-04-08 14:47:17.000000000 
+0200
+++ linux-2.6/drivers/md/dm-writecache.c        2020-04-08 20:59:15.000000000 
+0200
@@ -931,6 +931,24 @@ static int writecache_alloc_entries(stru
        return 0;
 }
 
+static int writecache_read_metadata(struct dm_writecache *wc, sector_t 
n_sectors)
+{
+       struct dm_io_region region;
+       struct dm_io_request req;
+
+       region.bdev = wc->ssd_dev->bdev;
+       region.sector = wc->start_sector;
+       region.count = wc->metadata_sectors;
+       req.bi_op = REQ_OP_READ;
+       req.bi_op_flags = REQ_SYNC;
+       req.mem.type = DM_IO_VMA;
+       req.mem.ptr.vma = (char *)wc->memory_map;
+       req.client = wc->dm_io;
+       req.notify.fn = NULL;
+
+       return dm_io(&req, 1, &region, NULL);
+}
+
 static void writecache_resume(struct dm_target *ti)
 {
        struct dm_writecache *wc = ti->private;
@@ -941,8 +959,16 @@ static void writecache_resume(struct dm_
 
        wc_lock(wc);
 
-       if (WC_MODE_PMEM(wc))
+       if (WC_MODE_PMEM(wc)) {
                persistent_memory_invalidate_cache(wc->memory_map, 
wc->memory_map_size);
+       } else {
+               r = writecache_read_metadata(wc, wc->metadata_sectors);
+               if (r) {
+                       writecache_error(wc, r, "unable to read metadata: %d", 
r);
+                       memset((char *)wc->memory_map + offsetof(struct 
wc_memory_superblock, entries), -1,
+                              (wc->metadata_sectors << SECTOR_SHIFT) - 
offsetof(struct wc_memory_superblock, entries));
+               }
+       }
 
        wc->tree = RB_ROOT;
        INIT_LIST_HEAD(&wc->lru);
@@ -2200,8 +2226,6 @@ invalid_optional:
                        goto bad;
                }
        } else {
-               struct dm_io_region region;
-               struct dm_io_request req;
                size_t n_blocks, n_metadata_blocks;
                uint64_t n_bitmap_bits;
 
@@ -2258,17 +2282,9 @@ invalid_optional:
                        goto bad;
                }
 
-               region.bdev = wc->ssd_dev->bdev;
-               region.sector = wc->start_sector;
-               region.count = wc->metadata_sectors;
-               req.bi_op = REQ_OP_READ;
-               req.bi_op_flags = REQ_SYNC;
-               req.mem.type = DM_IO_VMA;
-               req.mem.ptr.vma = (char *)wc->memory_map;
-               req.client = wc->dm_io;
-               req.notify.fn = NULL;
-
-               r = dm_io(&req, 1, &region, NULL);
+               r = writecache_read_metadata(wc,
+                       
min((sector_t)bdev_logical_block_size(wc->ssd_dev->bdev) >> SECTOR_SHIFT,
+                           (sector_t)wc->metadata_sectors));
                if (r) {
                        ti->error = "Unable to read metadata";
                        goto bad;

--
dm-devel mailing list
[email protected]
https://www.redhat.com/mailman/listinfo/dm-devel

Reply via email to