Re: [PATCH v2 9/9] iomap: Change calling convention for zeroing

2020-09-17 Thread Darrick J. Wong
On Thu, Sep 17, 2020 at 11:11:15PM +0100, Matthew Wilcox wrote:
> On Thu, Sep 17, 2020 at 03:05:00PM -0700, Darrick J. Wong wrote:
> > > -static loff_t
> > > -iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
> > > - void *data, struct iomap *iomap, struct iomap *srcmap)
> > > +static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
> > > + loff_t length, void *data, struct iomap *iomap,
> > 
> > Any reason not to change @length and the return value to s64?
> 
> Because it's an actor, passed to iomap_apply, so its types have to match.
> I can change that, but it'll be a separate patch series.

Ah, right.  I seemingly forgot that. :(

Carry on.
Reviewed-by: Darrick J. Wong 

--D
___
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org


Re: [PATCH v2 9/9] iomap: Change calling convention for zeroing

2020-09-17 Thread Matthew Wilcox
On Thu, Sep 17, 2020 at 03:05:00PM -0700, Darrick J. Wong wrote:
> > -static loff_t
> > -iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
> > -   void *data, struct iomap *iomap, struct iomap *srcmap)
> > +static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
> > +   loff_t length, void *data, struct iomap *iomap,
> 
> Any reason not to change @length and the return value to s64?

Because it's an actor, passed to iomap_apply, so its types have to match.
I can change that, but it'll be a separate patch series.
___
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org


Re: [PATCH v2 9/9] iomap: Change calling convention for zeroing

2020-09-17 Thread Darrick J. Wong
On Fri, Sep 11, 2020 at 12:47:07AM +0100, Matthew Wilcox (Oracle) wrote:
> Pass the full length to iomap_zero() and dax_iomap_zero(), and have
> them return how many bytes they actually handled.  This is preparatory
> work for handling THP, although it looks like DAX could actually take
> advantage of it if there's a larger contiguous area.
> 
> Signed-off-by: Matthew Wilcox (Oracle) 
> ---
>  fs/dax.c   | 13 ++---
>  fs/iomap/buffered-io.c | 33 +++--
>  include/linux/dax.h|  3 +--
>  3 files changed, 22 insertions(+), 27 deletions(-)
> 
> diff --git a/fs/dax.c b/fs/dax.c
> index 994ab66a9907..6ad346352a8c 100644
> --- a/fs/dax.c
> +++ b/fs/dax.c
> @@ -1037,18 +1037,18 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
>   return ret;
>  }
>  
> -int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
> -struct iomap *iomap)
> +s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
>  {
>   sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
>   pgoff_t pgoff;
>   long rc, id;
>   void *kaddr;
>   bool page_aligned = false;
> -
> + unsigned offset = offset_in_page(pos);
> + unsigned size = min_t(u64, PAGE_SIZE - offset, length);
>  
>   if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
> - IS_ALIGNED(size, PAGE_SIZE))
> + (size == PAGE_SIZE))
>   page_aligned = true;
>  
>   rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, );
> @@ -1058,8 +1058,7 @@ int dax_iomap_zero(loff_t pos, unsigned offset, 
> unsigned size,
>   id = dax_read_lock();
>  
>   if (page_aligned)
> - rc = dax_zero_page_range(iomap->dax_dev, pgoff,
> -  size >> PAGE_SHIFT);
> + rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
>   else
>   rc = dax_direct_access(iomap->dax_dev, pgoff, 1, , NULL);
>   if (rc < 0) {
> @@ -1072,7 +1071,7 @@ int dax_iomap_zero(loff_t pos, unsigned offset, 
> unsigned size,
>   dax_flush(iomap->dax_dev, kaddr + offset, size);
>   }
>   dax_read_unlock(id);
> - return 0;
> + return size;
>  }
>  
>  static loff_t
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index cb25a7b70401..3e1eb40a73fd 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -898,11 +898,13 @@ iomap_file_unshare(struct inode *inode, loff_t pos, 
> loff_t len,
>  }
>  EXPORT_SYMBOL_GPL(iomap_file_unshare);
>  
> -static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
> - unsigned bytes, struct iomap *iomap, struct iomap *srcmap)
> +static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
> + struct iomap *iomap, struct iomap *srcmap)
>  {
>   struct page *page;
>   int status;
> + unsigned offset = offset_in_page(pos);
> + unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
>  
>   status = iomap_write_begin(inode, pos, bytes, 0, , iomap, srcmap);
>   if (status)
> @@ -914,38 +916,33 @@ static int iomap_zero(struct inode *inode, loff_t pos, 
> unsigned offset,
>   return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
>  }
>  
> -static loff_t
> -iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
> - void *data, struct iomap *iomap, struct iomap *srcmap)
> +static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
> + loff_t length, void *data, struct iomap *iomap,

Any reason not to change @length and the return value to s64?

--D

> + struct iomap *srcmap)
>  {
>   bool *did_zero = data;
>   loff_t written = 0;
> - int status;
>  
>   /* already zeroed?  we're done. */
>   if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
> - return count;
> + return length;
>  
>   do {
> - unsigned offset, bytes;
> -
> - offset = offset_in_page(pos);
> - bytes = min_t(loff_t, PAGE_SIZE - offset, count);
> + s64 bytes;
>  
>   if (IS_DAX(inode))
> - status = dax_iomap_zero(pos, offset, bytes, iomap);
> + bytes = dax_iomap_zero(pos, length, iomap);
>   else
> - status = iomap_zero(inode, pos, offset, bytes, iomap,
> - srcmap);
> - if (status < 0)
> - return status;
> + bytes = iomap_zero(inode, pos, length, iomap, srcmap);
> + if (bytes < 0)
> + return bytes;
>  
>   pos += bytes;
> - count -= bytes;
> + length -= bytes;
>   written += bytes;
>   if (did_zero)
>   *did_zero = true;
> - } while (count > 0);
> + } while (length > 0);
>  
>   return written;
>  }
> diff --git 

Re: [PATCH v2 9/9] iomap: Change calling convention for zeroing

2020-09-11 Thread Christoph Hellwig
On Fri, Sep 11, 2020 at 12:47:07AM +0100, Matthew Wilcox (Oracle) wrote:
> Pass the full length to iomap_zero() and dax_iomap_zero(), and have
> them return how many bytes they actually handled.  This is preparatory
> work for handling THP, although it looks like DAX could actually take
> advantage of it if there's a larger contiguous area.

Looks good,

Reviewed-by: Christoph Hellwig 
___
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org


[PATCH v2 9/9] iomap: Change calling convention for zeroing

2020-09-10 Thread Matthew Wilcox (Oracle)
Pass the full length to iomap_zero() and dax_iomap_zero(), and have
them return how many bytes they actually handled.  This is preparatory
work for handling THP, although it looks like DAX could actually take
advantage of it if there's a larger contiguous area.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/dax.c   | 13 ++---
 fs/iomap/buffered-io.c | 33 +++--
 include/linux/dax.h|  3 +--
 3 files changed, 22 insertions(+), 27 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 994ab66a9907..6ad346352a8c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1037,18 +1037,18 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
return ret;
 }
 
-int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
-  struct iomap *iomap)
+s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
 {
sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
pgoff_t pgoff;
long rc, id;
void *kaddr;
bool page_aligned = false;
-
+   unsigned offset = offset_in_page(pos);
+   unsigned size = min_t(u64, PAGE_SIZE - offset, length);
 
if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
-   IS_ALIGNED(size, PAGE_SIZE))
+   (size == PAGE_SIZE))
page_aligned = true;
 
rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, );
@@ -1058,8 +1058,7 @@ int dax_iomap_zero(loff_t pos, unsigned offset, unsigned 
size,
id = dax_read_lock();
 
if (page_aligned)
-   rc = dax_zero_page_range(iomap->dax_dev, pgoff,
-size >> PAGE_SHIFT);
+   rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
else
rc = dax_direct_access(iomap->dax_dev, pgoff, 1, , NULL);
if (rc < 0) {
@@ -1072,7 +1071,7 @@ int dax_iomap_zero(loff_t pos, unsigned offset, unsigned 
size,
dax_flush(iomap->dax_dev, kaddr + offset, size);
}
dax_read_unlock(id);
-   return 0;
+   return size;
 }
 
 static loff_t
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index cb25a7b70401..3e1eb40a73fd 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -898,11 +898,13 @@ iomap_file_unshare(struct inode *inode, loff_t pos, 
loff_t len,
 }
 EXPORT_SYMBOL_GPL(iomap_file_unshare);
 
-static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
-   unsigned bytes, struct iomap *iomap, struct iomap *srcmap)
+static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
+   struct iomap *iomap, struct iomap *srcmap)
 {
struct page *page;
int status;
+   unsigned offset = offset_in_page(pos);
+   unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
 
status = iomap_write_begin(inode, pos, bytes, 0, , iomap, srcmap);
if (status)
@@ -914,38 +916,33 @@ static int iomap_zero(struct inode *inode, loff_t pos, 
unsigned offset,
return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
 }
 
-static loff_t
-iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
-   void *data, struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
+   loff_t length, void *data, struct iomap *iomap,
+   struct iomap *srcmap)
 {
bool *did_zero = data;
loff_t written = 0;
-   int status;
 
/* already zeroed?  we're done. */
if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
-   return count;
+   return length;
 
do {
-   unsigned offset, bytes;
-
-   offset = offset_in_page(pos);
-   bytes = min_t(loff_t, PAGE_SIZE - offset, count);
+   s64 bytes;
 
if (IS_DAX(inode))
-   status = dax_iomap_zero(pos, offset, bytes, iomap);
+   bytes = dax_iomap_zero(pos, length, iomap);
else
-   status = iomap_zero(inode, pos, offset, bytes, iomap,
-   srcmap);
-   if (status < 0)
-   return status;
+   bytes = iomap_zero(inode, pos, length, iomap, srcmap);
+   if (bytes < 0)
+   return bytes;
 
pos += bytes;
-   count -= bytes;
+   length -= bytes;
written += bytes;
if (did_zero)
*did_zero = true;
-   } while (count > 0);
+   } while (length > 0);
 
return written;
 }
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 6904d4e0b2e0..951a851a0481 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -214,8 +214,7 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
 int dax_delete_mapping_entry(struct address_space