Re: [PATCH v2 6/9] iomap: Convert read_count to read_bytes_pending

2020-09-17 Thread Darrick J. Wong
On Fri, Sep 11, 2020 at 12:47:04AM +0100, Matthew Wilcox (Oracle) wrote:
> Instead of counting bio segments, count the number of bytes submitted.
> This insulates us from the block layer's definition of what a 'same page'
> is, which is not necessarily clear once THPs are involved.
> 
> Signed-off-by: Matthew Wilcox (Oracle) 
> ---
>  fs/iomap/buffered-io.c | 41 -
>  1 file changed, 12 insertions(+), 29 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 9670c096b83e..1cf976a8e55c 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -26,7 +26,7 @@
>   * to track sub-page uptodate status and I/O completions.
>   */
>  struct iomap_page {
> - atomic_tread_count;
> + atomic_tread_bytes_pending;
>   atomic_twrite_count;
>   spinlock_t  uptodate_lock;
>   unsigned long   uptodate[];
> @@ -72,7 +72,7 @@ iomap_page_release(struct page *page)
>  
>   if (!iop)
>   return;
> - WARN_ON_ONCE(atomic_read(>read_count));
> + WARN_ON_ONCE(atomic_read(>read_bytes_pending));
>   WARN_ON_ONCE(atomic_read(>write_count));
>   WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
>   PageUptodate(page));
> @@ -167,13 +167,6 @@ iomap_set_range_uptodate(struct page *page, unsigned 
> off, unsigned len)
>   SetPageUptodate(page);
>  }
>  
> -static void
> -iomap_read_finish(struct iomap_page *iop, struct page *page)
> -{
> - if (!iop || atomic_dec_and_test(>read_count))
> - unlock_page(page);
> -}
> -
>  static void
>  iomap_read_page_end_io(struct bio_vec *bvec, int error)
>  {
> @@ -187,7 +180,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
>   iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
>   }
>  
> - iomap_read_finish(iop, page);
> + if (!iop || atomic_sub_and_test(bvec->bv_len, >read_bytes_pending))
> + unlock_page(page);
>  }
>  
>  static void
> @@ -267,30 +261,19 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, 
> loff_t length, void *data,
>   }
>  
>   ctx->cur_page_in_bio = true;
> + if (iop)
> + atomic_add(plen, >read_bytes_pending);
>  
> - /*
> -  * Try to merge into a previous segment if we can.
> -  */
> + /* Try to merge into a previous segment if we can */
>   sector = iomap_sector(iomap, pos);
> - if (ctx->bio && bio_end_sector(ctx->bio) == sector)
> + if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
> + if (__bio_try_merge_page(ctx->bio, page, plen, poff,
> + _page))
> + goto done;
>   is_contig = true;
> -
> - if (is_contig &&
> - __bio_try_merge_page(ctx->bio, page, plen, poff, _page)) {
> - if (!same_page && iop)
> - atomic_inc(>read_count);
> - goto done;
>   }
>  
> - /*
> -  * If we start a new segment we need to increase the read count, and we
> -  * need to do so before submitting any previous full bio to make sure
> -  * that we don't prematurely unlock the page.
> -  */
> - if (iop)
> - atomic_inc(>read_count);
> -
> - if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
> + if (!is_contig || bio_full(ctx->bio, plen)) {
>   gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
>   gfp_t orig_gfp = gfp;
>   int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
> -- 
> 2.28.0
> 
___
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org


Re: [PATCH v2 6/9] iomap: Convert read_count to read_bytes_pending

2020-09-10 Thread Christoph Hellwig
On Fri, Sep 11, 2020 at 12:47:04AM +0100, Matthew Wilcox (Oracle) wrote:
> Instead of counting bio segments, count the number of bytes submitted.
> This insulates us from the block layer's definition of what a 'same page'
> is, which is not necessarily clear once THPs are involved.
> 
> Signed-off-by: Matthew Wilcox (Oracle) 

Looks good,

Reviewed-by: Christoph Hellwig 
___
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org


[PATCH v2 6/9] iomap: Convert read_count to read_bytes_pending

2020-09-10 Thread Matthew Wilcox (Oracle)
Instead of counting bio segments, count the number of bytes submitted.
This insulates us from the block layer's definition of what a 'same page'
is, which is not necessarily clear once THPs are involved.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/iomap/buffered-io.c | 41 -
 1 file changed, 12 insertions(+), 29 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 9670c096b83e..1cf976a8e55c 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -26,7 +26,7 @@
  * to track sub-page uptodate status and I/O completions.
  */
 struct iomap_page {
-   atomic_tread_count;
+   atomic_tread_bytes_pending;
atomic_twrite_count;
spinlock_t  uptodate_lock;
unsigned long   uptodate[];
@@ -72,7 +72,7 @@ iomap_page_release(struct page *page)
 
if (!iop)
return;
-   WARN_ON_ONCE(atomic_read(>read_count));
+   WARN_ON_ONCE(atomic_read(>read_bytes_pending));
WARN_ON_ONCE(atomic_read(>write_count));
WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
PageUptodate(page));
@@ -167,13 +167,6 @@ iomap_set_range_uptodate(struct page *page, unsigned off, 
unsigned len)
SetPageUptodate(page);
 }
 
-static void
-iomap_read_finish(struct iomap_page *iop, struct page *page)
-{
-   if (!iop || atomic_dec_and_test(>read_count))
-   unlock_page(page);
-}
-
 static void
 iomap_read_page_end_io(struct bio_vec *bvec, int error)
 {
@@ -187,7 +180,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
}
 
-   iomap_read_finish(iop, page);
+   if (!iop || atomic_sub_and_test(bvec->bv_len, >read_bytes_pending))
+   unlock_page(page);
 }
 
 static void
@@ -267,30 +261,19 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, 
loff_t length, void *data,
}
 
ctx->cur_page_in_bio = true;
+   if (iop)
+   atomic_add(plen, >read_bytes_pending);
 
-   /*
-* Try to merge into a previous segment if we can.
-*/
+   /* Try to merge into a previous segment if we can */
sector = iomap_sector(iomap, pos);
-   if (ctx->bio && bio_end_sector(ctx->bio) == sector)
+   if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
+   if (__bio_try_merge_page(ctx->bio, page, plen, poff,
+   _page))
+   goto done;
is_contig = true;
-
-   if (is_contig &&
-   __bio_try_merge_page(ctx->bio, page, plen, poff, _page)) {
-   if (!same_page && iop)
-   atomic_inc(>read_count);
-   goto done;
}
 
-   /*
-* If we start a new segment we need to increase the read count, and we
-* need to do so before submitting any previous full bio to make sure
-* that we don't prematurely unlock the page.
-*/
-   if (iop)
-   atomic_inc(>read_count);
-
-   if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
+   if (!is_contig || bio_full(ctx->bio, plen)) {
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
-- 
2.28.0
___
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org