This patch adds a desired folio order in file_ra_state in order to get
non-zero-order allocation for readahead.

Cc: David Hildenbrand <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
---
 include/linux/fs.h | 1 +
 mm/filemap.c       | 2 +-
 mm/readahead.c     | 2 +-
 3 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/include/linux/fs.h b/include/linux/fs.h
index c895146c1444..8233b166139b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1167,6 +1167,7 @@ struct file_ra_state {
        unsigned int async_size;
        unsigned int ra_pages;
        unsigned short order;
+       unsigned short desired_order;
        unsigned short mmap_miss;
        loff_t prev_pos;
 };
diff --git a/mm/filemap.c b/mm/filemap.c
index 13f0259d993c..6aa0e26c5d17 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3332,7 +3332,7 @@ static struct file *do_sync_mmap_readahead(struct 
vm_fault *vmf)
                ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
                ra->size = ra->ra_pages;
                ra->async_size = ra->ra_pages / 4;
-               ra->order = 0;
+               ra->order = ra->desired_order;
        }
 
        fpin = maybe_unlock_mmap_for_io(vmf, fpin);
diff --git a/mm/readahead.c b/mm/readahead.c
index 3a4b5d58eeb6..5194211dfdee 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -621,7 +621,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
        ra->size = min(contig_count + req_count, max_pages);
        ra->async_size = 1;
 readit:
-       ra->order = 0;
+       ra->order = ra->desired_order;
        ractl->_index = ra->start;
        page_cache_ra_order(ractl, ra);
 }
-- 
2.52.0.487.g5c8c507ade-goog



_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to