RE: [PATCH v3 06/10] fsdax: Add dax_iomap_cow_copy() for dax_iomap_zero

2021-04-01 Thread ruansy.f...@fujitsu.com


> -Original Message-
> From: Ritesh Harjani 
> Sent: Thursday, April 1, 2021 2:45 PM
> Subject: Re: [PATCH v3 06/10] fsdax: Add dax_iomap_cow_copy() for
> dax_iomap_zero
> 
> On 21/03/19 09:52AM, Shiyang Ruan wrote:
> > Punch hole on a reflinked file needs dax_copy_edge() too.  Otherwise,
> > data in not aligned area will be not correct.  So, add the srcmap to
> > dax_iomap_zero() and replace memset() as dax_copy_edge().
> >
> > Signed-off-by: Shiyang Ruan 
> > ---
> >  fs/dax.c   | 9 +++--
> >  fs/iomap/buffered-io.c | 2 +-
> >  include/linux/dax.h| 3 ++-
> >  3 files changed, 10 insertions(+), 4 deletions(-)
> >
> > diff --git a/fs/dax.c b/fs/dax.c
> > index cfe513eb111e..348297b38f76 100644
> > --- a/fs/dax.c
> > +++ b/fs/dax.c
> > @@ -1174,7 +1174,8 @@ static vm_fault_t dax_pmd_load_hole(struct
> > xa_state *xas, struct vm_fault *vmf,  }  #endif /* CONFIG_FS_DAX_PMD
> > */
> >
> > -s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
> > +s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap,
> > +   struct iomap *srcmap)
> 
> Do we know why does dax_iomap_zero() operates on PAGE_SIZE range?
> IIUC, dax_zero_page_range() can take nr_pages as a parameter. But we still
> always use one page at a time. Why is that?

I think we can handle more than one page here.  The length can be more than one 
PAGE_SIZE.

> 
> >  {
> > sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
> > pgoff_t pgoff;
> > @@ -1204,7 +1205,11 @@ s64 dax_iomap_zero(loff_t pos, u64 length, struct
> iomap *iomap)
> > }
> >
> > if (!page_aligned) {
> > -   memset(kaddr + offset, 0, size);
> > +   if (iomap->addr != srcmap->addr)
> > +   dax_iomap_cow_copy(offset, size, PAGE_SIZE, srcmap,
> > +  kaddr, true);
> > +   else
> > +   memset(kaddr + offset, 0, size);
> > dax_flush(iomap->dax_dev, kaddr + offset, size);
> > }
> > dax_read_unlock(id);
> >
> 
> Maybe the above could be simplified to this?
> 
>   if (page_aligned) {
>   rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
>   } else {
>   rc = dax_direct_access(iomap->dax_dev, pgoff, 1, , NULL);

This looks good.  Need add check for rc here.

>   if (iomap->addr != srcmap->addr)
>   dax_iomap_cow_copy(offset, size, PAGE_SIZE, srcmap,
>  kaddr, true);
>   else
>   memset(kaddr + offset, 0, size);
>   dax_flush(iomap->dax_dev, kaddr + offset, size);
>   }
> 
>   dax_read_unlock(id);
>   return rc < 0 ? rc : size;
> 
> Other than that looks good.
> Feel free to add.
> Reviewed-by: Ritesh Harjani 
> 

--
Thanks,
Ruan Shiyang.



Re: [PATCH v3 06/10] fsdax: Add dax_iomap_cow_copy() for dax_iomap_zero

2021-04-01 Thread Ritesh Harjani
On 21/03/19 09:52AM, Shiyang Ruan wrote:
> Punch hole on a reflinked file needs dax_copy_edge() too.  Otherwise,
> data in not aligned area will be not correct.  So, add the srcmap to
> dax_iomap_zero() and replace memset() as dax_copy_edge().
>
> Signed-off-by: Shiyang Ruan 
> ---
>  fs/dax.c   | 9 +++--
>  fs/iomap/buffered-io.c | 2 +-
>  include/linux/dax.h| 3 ++-
>  3 files changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/fs/dax.c b/fs/dax.c
> index cfe513eb111e..348297b38f76 100644
> --- a/fs/dax.c
> +++ b/fs/dax.c
> @@ -1174,7 +1174,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state 
> *xas, struct vm_fault *vmf,
>  }
>  #endif /* CONFIG_FS_DAX_PMD */
>
> -s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
> +s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap,
> + struct iomap *srcmap)

Do we know why does dax_iomap_zero() operates on PAGE_SIZE range?
IIUC, dax_zero_page_range() can take nr_pages as a parameter. But we still
always use one page at a time. Why is that?

>  {
>   sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
>   pgoff_t pgoff;
> @@ -1204,7 +1205,11 @@ s64 dax_iomap_zero(loff_t pos, u64 length, struct 
> iomap *iomap)
>   }
>
>   if (!page_aligned) {
> - memset(kaddr + offset, 0, size);
> + if (iomap->addr != srcmap->addr)
> + dax_iomap_cow_copy(offset, size, PAGE_SIZE, srcmap,
> +kaddr, true);
> + else
> + memset(kaddr + offset, 0, size);
>   dax_flush(iomap->dax_dev, kaddr + offset, size);
>   }
>   dax_read_unlock(id);
>

Maybe the above could be simplified to this?

if (page_aligned) {
rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
} else {
rc = dax_direct_access(iomap->dax_dev, pgoff, 1, , NULL);
if (iomap->addr != srcmap->addr)
dax_iomap_cow_copy(offset, size, PAGE_SIZE, srcmap,
   kaddr, true);
else
memset(kaddr + offset, 0, size);
dax_flush(iomap->dax_dev, kaddr + offset, size);
}

dax_read_unlock(id);
return rc < 0 ? rc : size;

Other than that looks good.
Feel free to add.
Reviewed-by: Ritesh Harjani 




[PATCH v3 06/10] fsdax: Add dax_iomap_cow_copy() for dax_iomap_zero

2021-03-18 Thread Shiyang Ruan
Punch hole on a reflinked file needs dax_copy_edge() too.  Otherwise,
data in not aligned area will be not correct.  So, add the srcmap to
dax_iomap_zero() and replace memset() as dax_copy_edge().

Signed-off-by: Shiyang Ruan 
---
 fs/dax.c   | 9 +++--
 fs/iomap/buffered-io.c | 2 +-
 include/linux/dax.h| 3 ++-
 3 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index cfe513eb111e..348297b38f76 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1174,7 +1174,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, 
struct vm_fault *vmf,
 }
 #endif /* CONFIG_FS_DAX_PMD */
 
-s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
+s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap,
+   struct iomap *srcmap)
 {
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
pgoff_t pgoff;
@@ -1204,7 +1205,11 @@ s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap 
*iomap)
}
 
if (!page_aligned) {
-   memset(kaddr + offset, 0, size);
+   if (iomap->addr != srcmap->addr)
+   dax_iomap_cow_copy(offset, size, PAGE_SIZE, srcmap,
+  kaddr, true);
+   else
+   memset(kaddr + offset, 0, size);
dax_flush(iomap->dax_dev, kaddr + offset, size);
}
dax_read_unlock(id);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 16a1e82e3aeb..d754b1f1a05d 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -933,7 +933,7 @@ static loff_t iomap_zero_range_actor(struct inode *inode, 
loff_t pos,
s64 bytes;
 
if (IS_DAX(inode))
-   bytes = dax_iomap_zero(pos, length, iomap);
+   bytes = dax_iomap_zero(pos, length, iomap, srcmap);
else
bytes = iomap_zero(inode, pos, length, iomap, srcmap);
if (bytes < 0)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index b52f084aa643..3275e01ed33d 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -237,7 +237,8 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
  pgoff_t index);
-s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
+s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap,
+   struct iomap *srcmap);
 static inline bool dax_mapping(struct address_space *mapping)
 {
return mapping->host && IS_DAX(mapping->host);
-- 
2.30.1