Previously, we ra_sum_pages to pre-read contiguous pages as more
as possible, and if we fail to alloc more pages, an ENOMEM error
will be reported upstream, even though we have alloced some pages
yet. In fact, we can use the available pages to do the job partly,
and continue the rest in the following circle. Only reporting ENOMEM
upstream if we really can not alloc any available page.

And another fix is ignoring dealing with the following pages if an
EIO occurs when reading page from page_list.

Signed-off-by: Gu Zheng <guz.f...@cn.fujitsu.com>
---
 fs/f2fs/node.c    |   44 ++++++++++++++++++++------------------------
 fs/f2fs/segment.c |    7 +++++--
 2 files changed, 25 insertions(+), 26 deletions(-)

diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 8787469..4b7861d 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct 
list_head *pages,
        for (; page_idx < start + nrpages; page_idx++) {
                /* alloc temporal page for read node summary info*/
                page = alloc_page(GFP_F2FS_ZERO);
-               if (!page) {
-                       struct page *tmp;
-                       list_for_each_entry_safe(page, tmp, pages, lru) {
-                               list_del(&page->lru);
-                               unlock_page(page);
-                               __free_pages(page, 0);
-                       }
-                       return -ENOMEM;
-               }
+               if (!page)
+                       break;
 
                lock_page(page);
                page->index = page_idx;
@@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct 
list_head *pages,
                f2fs_submit_page_mbio(sbi, page, page->index, &fio);
 
        f2fs_submit_merged_bio(sbi, META, READ);
-       return 0;
+
+       return page_idx - start;
 }
 
 int restore_node_summary(struct f2fs_sb_info *sbi,
@@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
                nrpages = min(last_offset - i, bio_blocks);
 
                /* read ahead node pages */
-               err = ra_sum_pages(sbi, &page_list, addr, nrpages);
-               if (err)
-                       return err;
+               nrpages = ra_sum_pages(sbi, &page_list, addr, nrpages);
+               if (!nrpages)
+                       return -ENOMEM;
 
                list_for_each_entry_safe(page, tmp, &page_list, lru) {
-
-                       lock_page(page);
-                       if (unlikely(!PageUptodate(page))) {
-                               err = -EIO;
-                       } else {
-                               rn = F2FS_NODE(page);
-                               sum_entry->nid = rn->footer.nid;
-                               sum_entry->version = 0;
-                               sum_entry->ofs_in_node = 0;
-                               sum_entry++;
+                       if (!err) {
+                               lock_page(page);
+                               if (unlikely(!PageUptodate(page))) {
+                                       err = -EIO;
+                               } else {
+                                       rn = F2FS_NODE(page);
+                                       sum_entry->nid = rn->footer.nid;
+                                       sum_entry->version = 0;
+                                       sum_entry->ofs_in_node = 0;
+                                       sum_entry++;
+                               }
+                               unlock_page(page);
                        }
 
                        list_del(&page->lru);
-                       unlock_page(page);
                        __free_pages(page, 0);
                }
        }
+
        return err;
 }
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 199c964..b3f8431 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1160,9 +1160,12 @@ static int read_normal_summaries(struct f2fs_sb_info 
*sbi, int type)
                                ns->ofs_in_node = 0;
                        }
                } else {
-                       if (restore_node_summary(sbi, segno, sum)) {
+                       int err;
+
+                       err = restore_node_summary(sbi, segno, sum);
+                       if (err) {
                                f2fs_put_page(new, 1);
-                               return -EINVAL;
+                               return err;
                        }
                }
        }
-- 
1.7.7


------------------------------------------------------------------------------
Subversion Kills Productivity. Get off Subversion & Make the Move to Perforce.
With Perforce, you get hassle-free workflows. Merge that actually works. 
Faster operations. Version large binaries.  Built-in WAN optimization and the
freedom to use Git, Perforce or both. Make the move to Perforce.
http://pubads.g.doubleclick.net/gampad/clk?id=122218951&iu=/4140/ostg.clktrk
_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to