That way file systems don't have to go spotting for non-contiguous pages
and work around them.  It also kicks off I/O earlier, allowing it to
finish earlier and reduce latency.

Signed-off-by: Christoph Hellwig <[email protected]>
---
 mm/readahead.c | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/mm/readahead.c b/mm/readahead.c
index fa4d4b767130..044ab0c137cc 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -177,8 +177,18 @@ unsigned int __do_page_cache_readahead(struct 
address_space *mapping,
                rcu_read_lock();
                page = radix_tree_lookup(&mapping->i_pages, page_offset);
                rcu_read_unlock();
-               if (page && !radix_tree_exceptional_entry(page))
+               if (page && !radix_tree_exceptional_entry(page)) {
+                       /*
+                        * Page already present?  Kick off the current batch of
+                        * contiguous pages before continuing with the next
+                        * batch.
+                        */
+                       if (nr_pages)
+                               read_pages(mapping, filp, &page_pool, nr_pages,
+                                               gfp_mask);
+                       nr_pages = 0;
                        continue;
+               }
 
                page = __page_cache_alloc(gfp_mask);
                if (!page)
-- 
2.17.0

Reply via email to