In the truncate or hole-punch path in dax, we clear out sub-page ranges.
If these sub-page ranges are sector aligned and sized, we can do the
zeroing through the driver instead so that error-clearing is handled
automatically.

For sub-sector ranges, we still have to rely on clear_pmem and have the
possibility of tripping over errors.

Cc: Matthew Wilcox <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Ross Zwisler <[email protected]>
Cc: Jeff Moyer <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: Jan Kara <[email protected]>
Signed-off-by: Vishal Verma <[email protected]>
---
 fs/dax.c | 30 +++++++++++++++++++++++++-----
 1 file changed, 25 insertions(+), 5 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 5948d9b..d8c974e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1196,6 +1196,20 @@ out:
 }
 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
 
+static bool dax_range_is_aligned(struct block_device *bdev,
+                                struct blk_dax_ctl *dax, unsigned int offset,
+                                unsigned int length)
+{
+       unsigned short sector_size = bdev_logical_block_size(bdev);
+
+       if (((u64)dax->addr + offset) % sector_size)
+               return false;
+       if (length % sector_size)
+               return false;
+
+       return true;
+}
+
 /**
  * dax_zero_page_range - zero a range within a page of a DAX file
  * @inode: The file being truncated
@@ -1240,11 +1254,17 @@ int dax_zero_page_range(struct inode *inode, loff_t 
from, unsigned length,
                        .size = PAGE_SIZE,
                };
 
-               if (dax_map_atomic(bdev, &dax) < 0)
-                       return PTR_ERR(dax.addr);
-               clear_pmem(dax.addr + offset, length);
-               wmb_pmem();
-               dax_unmap_atomic(bdev, &dax);
+               if (dax_range_is_aligned(bdev, &dax, offset, length))
+                       return blkdev_issue_zeroout(bdev, dax.sector,
+                                       length / bdev_logical_block_size(bdev),
+                                       GFP_NOFS, true);
+               else {
+                       if (dax_map_atomic(bdev, &dax) < 0)
+                               return PTR_ERR(dax.addr);
+                       clear_pmem(dax.addr + offset, length);
+                       wmb_pmem();
+                       dax_unmap_atomic(bdev, &dax);
+               }
        }
 
        return 0;
-- 
2.5.5

Reply via email to