Add dax_copy_edges() into each dax actor functions to perform COW in the
case of IOMAP_COW type is set.

Signed-off-by: Shiyang Ruan <[email protected]>
---
 fs/dax.c | 39 +++++++++++++++++++++++++++++++++------
 1 file changed, 33 insertions(+), 6 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 450baafe2ea4..084cc21d47a4 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1098,11 +1098,12 @@ EXPORT_SYMBOL_GPL(__dax_zero_page_range);
  *                 offset/offset+length are not page aligned.
  */
 static int dax_copy_edges(struct inode *inode, loff_t pos, loff_t length,
-                         struct iomap *srcmap, void *daddr)
+                         struct iomap *srcmap, void *daddr, bool pmd)
 {
-       unsigned offset = pos & (PAGE_SIZE - 1);
+       size_t page_size = pmd ? PMD_SIZE : PAGE_SIZE;
+       unsigned offset = pos & (page_size - 1);
        loff_t end = pos + length;
-       loff_t pg_end = round_up(end, PAGE_SIZE);
+       loff_t pg_end = round_up(end, page_size);
        void *saddr = 0;
        int ret = 0;
 
@@ -1153,7 +1154,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t 
length, void *data,
                        return iov_iter_zero(min(length, end - pos), iter);
        }
 
-       if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
+       if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
+                        iomap->type != IOMAP_COW))
                return -EIO;
 
        /*
@@ -1192,6 +1194,13 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t 
length, void *data,
                        break;
                }
 
+               if (iomap->type == IOMAP_COW) {
+                       ret = dax_copy_edges(inode, pos, length, srcmap, kaddr,
+                                            false);
+                       if (ret)
+                               break;
+               }
+
                map_len = PFN_PHYS(map_len);
                kaddr += offset;
                map_len -= offset;
@@ -1300,6 +1309,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault 
*vmf, pfn_t *pfnp,
        vm_fault_t ret = 0;
        void *entry;
        pfn_t pfn;
+       void *kaddr;
 
        trace_dax_pte_fault(inode, vmf, ret);
        /*
@@ -1380,19 +1390,27 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault 
*vmf, pfn_t *pfnp,
        sync = dax_fault_is_synchronous(flags, vma, &iomap);
 
        switch (iomap.type) {
+       case IOMAP_COW:
        case IOMAP_MAPPED:
                if (iomap.flags & IOMAP_F_NEW) {
                        count_vm_event(PGMAJFAULT);
                        count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
                        major = VM_FAULT_MAJOR;
                }
-               error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn, NULL);
+               error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn, &kaddr);
                if (error < 0)
                        goto error_finish_iomap;
 
                entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
                                                 0, write && !sync);
 
+               if (iomap.type == IOMAP_COW) {
+                       error = dax_copy_edges(inode, pos, PAGE_SIZE, &srcmap,
+                                              kaddr, false);
+                       if (error)
+                               goto error_finish_iomap;
+               }
+
                /*
                 * If we are doing synchronous page fault and inode needs fsync,
                 * we can insert PTE into page tables only after that happens.
@@ -1523,6 +1541,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault 
*vmf, pfn_t *pfnp,
        loff_t pos;
        int error;
        pfn_t pfn;
+       void *kaddr;
 
        /*
         * Check whether offset isn't beyond end of file now. Caller is
@@ -1602,14 +1621,22 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault 
*vmf, pfn_t *pfnp,
        sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
 
        switch (iomap.type) {
+       case IOMAP_COW:
        case IOMAP_MAPPED:
-               error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn, NULL);
+               error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn, &kaddr);
                if (error < 0)
                        goto finish_iomap;
 
                entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
                                                DAX_PMD, write && !sync);
 
+               if (iomap.type == IOMAP_COW) {
+                       error = dax_copy_edges(inode, pos, PMD_SIZE, &srcmap,
+                                              kaddr, true);
+                       if (error)
+                               goto unlock_entry;
+               }
+
                /*
                 * If we are doing synchronous page fault and inode needs fsync,
                 * we can insert PMD into page tables only after that happens.
-- 
2.17.0



Reply via email to