RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly
Hi, > -Original Message- > From: Jaegeuk Kim [mailto:jaegeuk@samsung.com] > Sent: Monday, March 10, 2014 1:38 PM > To: Chao Yu > Cc: 'Gu Zheng'; 'linux-kernel'; 'f2fs' > Subject: RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary > slightly > > Hi, > > 2014-03-10 (월), 13:13 +0800, Chao Yu: > > Hi Gu, Kim: > > > > One more comment. > > > > > -Original Message- > > > From: Jaegeuk Kim [mailto:jaegeuk@samsung.com] > > > Sent: Monday, March 10, 2014 12:46 PM > > > To: Gu Zheng > > > Cc: linux-kernel; f2fs > > > Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary > > > slightly > > > > > > Hi Gu, > > > > > > 2014-03-07 (금), 18:43 +0800, Gu Zheng: > > > > Previously, we ra_sum_pages to pre-read contiguous pages as more > > > > as possible, and if we fail to alloc more pages, an ENOMEM error > > > > will be reported upstream, even though we have alloced some pages > > > > yet. In fact, we can use the available pages to do the job partly, > > > > and continue the rest in the following circle. Only reporting ENOMEM > > > > upstream if we really can not alloc any available page. > > > > > > > > And another fix is ignoring dealing with the following pages if an > > > > EIO occurs when reading page from page_list. > > > > > > > > Signed-off-by: Gu Zheng > > > > Reviewed-by: Chao Yu > > > > > > --- > > > > fs/f2fs/node.c| 44 > > > > fs/f2fs/segment.c |7 +-- > > > > 2 files changed, 25 insertions(+), 26 deletions(-) > > > > > > > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c > > > > index 8787469..4b7861d 100644 > > > > --- a/fs/f2fs/node.c > > > > +++ b/fs/f2fs/node.c > > > > @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info > > > > *sbi, struct list_head > > > *pages, > > > > for (; page_idx < start + nrpages; page_idx++) { > > > > /* alloc temporal page for read node summary info*/ > > > > page = alloc_page(GFP_F2FS_ZERO); > > > > - if (!page) { > > > > - struct page *tmp; > > > > - list_for_each_entry_safe(page, tmp, pages, lru) > > > > { > > > > - list_del(>lru); > > > > - unlock_page(page); > > > > - __free_pages(page, 0); > > > > - } > > > > - return -ENOMEM; > > > > - } > > > > + if (!page) > > > > + break; > > > > > > > > lock_page(page); > > > > page->index = page_idx; > > > > @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, > > > > struct list_head > > > *pages, > > > > f2fs_submit_page_mbio(sbi, page, page->index, ); > > > > > > > > f2fs_submit_merged_bio(sbi, META, READ); > > > > - return 0; > > > > + > > > > + return page_idx - start; > > > > } > > > > > > > > int restore_node_summary(struct f2fs_sb_info *sbi, > > > > @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info > > > > *sbi, > > > > nrpages = min(last_offset - i, bio_blocks); > > > > > > > > /* read ahead node pages */ > > > > - err = ra_sum_pages(sbi, _list, addr, nrpages); > > > > - if (err) > > > > - return err; > > > > + nrpages = ra_sum_pages(sbi, _list, addr, nrpages); > > > > + if (!nrpages) > > > > + return -ENOMEM; > > > > > > > > list_for_each_entry_safe(page, tmp, _list, lru) { > > > > - > > > > > > Here we can just add: > > > if (err) > > > goto skip; > > > lock_page(); > > > ... > > > unlock_page(); > > > skip: > &g
RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly
Hi, -Original Message- From: Jaegeuk Kim [mailto:jaegeuk@samsung.com] Sent: Monday, March 10, 2014 1:38 PM To: Chao Yu Cc: 'Gu Zheng'; 'linux-kernel'; 'f2fs' Subject: RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly Hi, 2014-03-10 (월), 13:13 +0800, Chao Yu: Hi Gu, Kim: One more comment. -Original Message- From: Jaegeuk Kim [mailto:jaegeuk@samsung.com] Sent: Monday, March 10, 2014 12:46 PM To: Gu Zheng Cc: linux-kernel; f2fs Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly Hi Gu, 2014-03-07 (금), 18:43 +0800, Gu Zheng: Previously, we ra_sum_pages to pre-read contiguous pages as more as possible, and if we fail to alloc more pages, an ENOMEM error will be reported upstream, even though we have alloced some pages yet. In fact, we can use the available pages to do the job partly, and continue the rest in the following circle. Only reporting ENOMEM upstream if we really can not alloc any available page. And another fix is ignoring dealing with the following pages if an EIO occurs when reading page from page_list. Signed-off-by: Gu Zheng guz.f...@cn.fujitsu.com Reviewed-by: Chao Yu chao2...@samsung.com --- fs/f2fs/node.c| 44 fs/f2fs/segment.c |7 +-- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 8787469..4b7861d 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages, for (; page_idx start + nrpages; page_idx++) { /* alloc temporal page for read node summary info*/ page = alloc_page(GFP_F2FS_ZERO); - if (!page) { - struct page *tmp; - list_for_each_entry_safe(page, tmp, pages, lru) { - list_del(page-lru); - unlock_page(page); - __free_pages(page, 0); - } - return -ENOMEM; - } + if (!page) + break; lock_page(page); page-index = page_idx; @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages, f2fs_submit_page_mbio(sbi, page, page-index, fio); f2fs_submit_merged_bio(sbi, META, READ); - return 0; + + return page_idx - start; } int restore_node_summary(struct f2fs_sb_info *sbi, @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info *sbi, nrpages = min(last_offset - i, bio_blocks); /* read ahead node pages */ - err = ra_sum_pages(sbi, page_list, addr, nrpages); - if (err) - return err; + nrpages = ra_sum_pages(sbi, page_list, addr, nrpages); + if (!nrpages) + return -ENOMEM; list_for_each_entry_safe(page, tmp, page_list, lru) { - Here we can just add: if (err) goto skip; lock_page(); ... unlock_page(); skip: list_del(); __free_pages(); IMO, it's more neat, so if you have any objection, let me know. Otherwise, I'll handle this by myself. :) Thanks, - lock_page(page); - if (unlikely(!PageUptodate(page))) { - err = -EIO; - } else { - rn = F2FS_NODE(page); - sum_entry-nid = rn-footer.nid; - sum_entry-version = 0; - sum_entry-ofs_in_node = 0; - sum_entry++; + if (!err) { If we skip here, next round we will fill these summary page entries with wrong info because we skip the code 'sum_entry++;'. There is no next round. Once err = -EIO, there's no route to make err = 0. Ah, you're right. Only old code has this problem, this new patch can fix it well. Thanks for the mention. :) Thanks. + lock_page(page); + if (unlikely(!PageUptodate(page))) { + err = -EIO; + } else { + rn = F2FS_NODE
RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly
Hi, 2014-03-10 (월), 13:13 +0800, Chao Yu: > Hi Gu, Kim: > > One more comment. > > > -Original Message- > > From: Jaegeuk Kim [mailto:jaegeuk@samsung.com] > > Sent: Monday, March 10, 2014 12:46 PM > > To: Gu Zheng > > Cc: linux-kernel; f2fs > > Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary > > slightly > > > > Hi Gu, > > > > 2014-03-07 (금), 18:43 +0800, Gu Zheng: > > > Previously, we ra_sum_pages to pre-read contiguous pages as more > > > as possible, and if we fail to alloc more pages, an ENOMEM error > > > will be reported upstream, even though we have alloced some pages > > > yet. In fact, we can use the available pages to do the job partly, > > > and continue the rest in the following circle. Only reporting ENOMEM > > > upstream if we really can not alloc any available page. > > > > > > And another fix is ignoring dealing with the following pages if an > > > EIO occurs when reading page from page_list. > > > > > > Signed-off-by: Gu Zheng > > Reviewed-by: Chao Yu > > > > --- > > > fs/f2fs/node.c| 44 > > > fs/f2fs/segment.c |7 +-- > > > 2 files changed, 25 insertions(+), 26 deletions(-) > > > > > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c > > > index 8787469..4b7861d 100644 > > > --- a/fs/f2fs/node.c > > > +++ b/fs/f2fs/node.c > > > @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, > > > struct list_head > > *pages, > > > for (; page_idx < start + nrpages; page_idx++) { > > > /* alloc temporal page for read node summary info*/ > > > page = alloc_page(GFP_F2FS_ZERO); > > > - if (!page) { > > > - struct page *tmp; > > > - list_for_each_entry_safe(page, tmp, pages, lru) { > > > - list_del(>lru); > > > - unlock_page(page); > > > - __free_pages(page, 0); > > > - } > > > - return -ENOMEM; > > > - } > > > + if (!page) > > > + break; > > > > > > lock_page(page); > > > page->index = page_idx; > > > @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, > > > struct list_head > > *pages, > > > f2fs_submit_page_mbio(sbi, page, page->index, ); > > > > > > f2fs_submit_merged_bio(sbi, META, READ); > > > - return 0; > > > + > > > + return page_idx - start; > > > } > > > > > > int restore_node_summary(struct f2fs_sb_info *sbi, > > > @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info *sbi, > > > nrpages = min(last_offset - i, bio_blocks); > > > > > > /* read ahead node pages */ > > > - err = ra_sum_pages(sbi, _list, addr, nrpages); > > > - if (err) > > > - return err; > > > + nrpages = ra_sum_pages(sbi, _list, addr, nrpages); > > > + if (!nrpages) > > > + return -ENOMEM; > > > > > > list_for_each_entry_safe(page, tmp, _list, lru) { > > > - > > > > Here we can just add: > > if (err) > > goto skip; > > lock_page(); > > ... > > unlock_page(); > > skip: > > list_del(); > > __free_pages(); > > > > IMO, it's more neat, so if you have any objection, let me know. > > Otherwise, I'll handle this by myself. :) > > Thanks, > > > > > - lock_page(page); > > > - if (unlikely(!PageUptodate(page))) { > > > - err = -EIO; > > > - } else { > > > - rn = F2FS_NODE(page); > > > - sum_entry->nid = rn->footer.nid; > > > - sum_entry->version = 0; > > > - sum_entry->ofs_in_node = 0; > > > - sum_entry++; > > > + if (!err) { > > If we skip here, next round we will fill these summary page entries with > wrong info because we skip t
RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly
Hi Gu, Kim: One more comment. > -Original Message- > From: Jaegeuk Kim [mailto:jaegeuk@samsung.com] > Sent: Monday, March 10, 2014 12:46 PM > To: Gu Zheng > Cc: linux-kernel; f2fs > Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary > slightly > > Hi Gu, > > 2014-03-07 (금), 18:43 +0800, Gu Zheng: > > Previously, we ra_sum_pages to pre-read contiguous pages as more > > as possible, and if we fail to alloc more pages, an ENOMEM error > > will be reported upstream, even though we have alloced some pages > > yet. In fact, we can use the available pages to do the job partly, > > and continue the rest in the following circle. Only reporting ENOMEM > > upstream if we really can not alloc any available page. > > > > And another fix is ignoring dealing with the following pages if an > > EIO occurs when reading page from page_list. > > > > Signed-off-by: Gu Zheng Reviewed-by: Chao Yu > > --- > > fs/f2fs/node.c| 44 > > fs/f2fs/segment.c |7 +-- > > 2 files changed, 25 insertions(+), 26 deletions(-) > > > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c > > index 8787469..4b7861d 100644 > > --- a/fs/f2fs/node.c > > +++ b/fs/f2fs/node.c > > @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, > > struct list_head > *pages, > > for (; page_idx < start + nrpages; page_idx++) { > > /* alloc temporal page for read node summary info*/ > > page = alloc_page(GFP_F2FS_ZERO); > > - if (!page) { > > - struct page *tmp; > > - list_for_each_entry_safe(page, tmp, pages, lru) { > > - list_del(>lru); > > - unlock_page(page); > > - __free_pages(page, 0); > > - } > > - return -ENOMEM; > > - } > > + if (!page) > > + break; > > > > lock_page(page); > > page->index = page_idx; > > @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, > > struct list_head > *pages, > > f2fs_submit_page_mbio(sbi, page, page->index, ); > > > > f2fs_submit_merged_bio(sbi, META, READ); > > - return 0; > > + > > + return page_idx - start; > > } > > > > int restore_node_summary(struct f2fs_sb_info *sbi, > > @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info *sbi, > > nrpages = min(last_offset - i, bio_blocks); > > > > /* read ahead node pages */ > > - err = ra_sum_pages(sbi, _list, addr, nrpages); > > - if (err) > > - return err; > > + nrpages = ra_sum_pages(sbi, _list, addr, nrpages); > > + if (!nrpages) > > + return -ENOMEM; > > > > list_for_each_entry_safe(page, tmp, _list, lru) { > > - > > Here we can just add: > if (err) > goto skip; > lock_page(); > ... > unlock_page(); > skip: > list_del(); > __free_pages(); > > IMO, it's more neat, so if you have any objection, let me know. > Otherwise, I'll handle this by myself. :) > Thanks, > > > - lock_page(page); > > - if (unlikely(!PageUptodate(page))) { > > - err = -EIO; > > - } else { > > - rn = F2FS_NODE(page); > > - sum_entry->nid = rn->footer.nid; > > - sum_entry->version = 0; > > - sum_entry->ofs_in_node = 0; > > - sum_entry++; > > + if (!err) { If we skip here, next round we will fill these summary page entries with wrong info because we skip the code 'sum_entry++;'. > > + lock_page(page); > > + if (unlikely(!PageUptodate(page))) { > > + err = -EIO; > > + } else { > > + rn = F2FS_NODE(page); > > + sum_entry->nid = rn->footer.nid; > > + sum_entry->version = 0; > > +
RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly
Hi Gu, Kim: One more comment. -Original Message- From: Jaegeuk Kim [mailto:jaegeuk@samsung.com] Sent: Monday, March 10, 2014 12:46 PM To: Gu Zheng Cc: linux-kernel; f2fs Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly Hi Gu, 2014-03-07 (금), 18:43 +0800, Gu Zheng: Previously, we ra_sum_pages to pre-read contiguous pages as more as possible, and if we fail to alloc more pages, an ENOMEM error will be reported upstream, even though we have alloced some pages yet. In fact, we can use the available pages to do the job partly, and continue the rest in the following circle. Only reporting ENOMEM upstream if we really can not alloc any available page. And another fix is ignoring dealing with the following pages if an EIO occurs when reading page from page_list. Signed-off-by: Gu Zheng guz.f...@cn.fujitsu.com Reviewed-by: Chao Yu chao2...@samsung.com --- fs/f2fs/node.c| 44 fs/f2fs/segment.c |7 +-- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 8787469..4b7861d 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages, for (; page_idx start + nrpages; page_idx++) { /* alloc temporal page for read node summary info*/ page = alloc_page(GFP_F2FS_ZERO); - if (!page) { - struct page *tmp; - list_for_each_entry_safe(page, tmp, pages, lru) { - list_del(page-lru); - unlock_page(page); - __free_pages(page, 0); - } - return -ENOMEM; - } + if (!page) + break; lock_page(page); page-index = page_idx; @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages, f2fs_submit_page_mbio(sbi, page, page-index, fio); f2fs_submit_merged_bio(sbi, META, READ); - return 0; + + return page_idx - start; } int restore_node_summary(struct f2fs_sb_info *sbi, @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info *sbi, nrpages = min(last_offset - i, bio_blocks); /* read ahead node pages */ - err = ra_sum_pages(sbi, page_list, addr, nrpages); - if (err) - return err; + nrpages = ra_sum_pages(sbi, page_list, addr, nrpages); + if (!nrpages) + return -ENOMEM; list_for_each_entry_safe(page, tmp, page_list, lru) { - Here we can just add: if (err) goto skip; lock_page(); ... unlock_page(); skip: list_del(); __free_pages(); IMO, it's more neat, so if you have any objection, let me know. Otherwise, I'll handle this by myself. :) Thanks, - lock_page(page); - if (unlikely(!PageUptodate(page))) { - err = -EIO; - } else { - rn = F2FS_NODE(page); - sum_entry-nid = rn-footer.nid; - sum_entry-version = 0; - sum_entry-ofs_in_node = 0; - sum_entry++; + if (!err) { If we skip here, next round we will fill these summary page entries with wrong info because we skip the code 'sum_entry++;'. + lock_page(page); + if (unlikely(!PageUptodate(page))) { + err = -EIO; + } else { + rn = F2FS_NODE(page); + sum_entry-nid = rn-footer.nid; + sum_entry-version = 0; + sum_entry-ofs_in_node = 0; + sum_entry++; + } + unlock_page(page); } list_del(page-lru); - unlock_page(page); __free_pages(page, 0); } Maybe we should add code here. if (err) return err; } + return err; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 199c964..b3f8431 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1160,9 +1160,12 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type
RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly
Hi, 2014-03-10 (월), 13:13 +0800, Chao Yu: Hi Gu, Kim: One more comment. -Original Message- From: Jaegeuk Kim [mailto:jaegeuk@samsung.com] Sent: Monday, March 10, 2014 12:46 PM To: Gu Zheng Cc: linux-kernel; f2fs Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly Hi Gu, 2014-03-07 (금), 18:43 +0800, Gu Zheng: Previously, we ra_sum_pages to pre-read contiguous pages as more as possible, and if we fail to alloc more pages, an ENOMEM error will be reported upstream, even though we have alloced some pages yet. In fact, we can use the available pages to do the job partly, and continue the rest in the following circle. Only reporting ENOMEM upstream if we really can not alloc any available page. And another fix is ignoring dealing with the following pages if an EIO occurs when reading page from page_list. Signed-off-by: Gu Zheng guz.f...@cn.fujitsu.com Reviewed-by: Chao Yu chao2...@samsung.com --- fs/f2fs/node.c| 44 fs/f2fs/segment.c |7 +-- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 8787469..4b7861d 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages, for (; page_idx start + nrpages; page_idx++) { /* alloc temporal page for read node summary info*/ page = alloc_page(GFP_F2FS_ZERO); - if (!page) { - struct page *tmp; - list_for_each_entry_safe(page, tmp, pages, lru) { - list_del(page-lru); - unlock_page(page); - __free_pages(page, 0); - } - return -ENOMEM; - } + if (!page) + break; lock_page(page); page-index = page_idx; @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages, f2fs_submit_page_mbio(sbi, page, page-index, fio); f2fs_submit_merged_bio(sbi, META, READ); - return 0; + + return page_idx - start; } int restore_node_summary(struct f2fs_sb_info *sbi, @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info *sbi, nrpages = min(last_offset - i, bio_blocks); /* read ahead node pages */ - err = ra_sum_pages(sbi, page_list, addr, nrpages); - if (err) - return err; + nrpages = ra_sum_pages(sbi, page_list, addr, nrpages); + if (!nrpages) + return -ENOMEM; list_for_each_entry_safe(page, tmp, page_list, lru) { - Here we can just add: if (err) goto skip; lock_page(); ... unlock_page(); skip: list_del(); __free_pages(); IMO, it's more neat, so if you have any objection, let me know. Otherwise, I'll handle this by myself. :) Thanks, - lock_page(page); - if (unlikely(!PageUptodate(page))) { - err = -EIO; - } else { - rn = F2FS_NODE(page); - sum_entry-nid = rn-footer.nid; - sum_entry-version = 0; - sum_entry-ofs_in_node = 0; - sum_entry++; + if (!err) { If we skip here, next round we will fill these summary page entries with wrong info because we skip the code 'sum_entry++;'. There is no next round. Once err = -EIO, there's no route to make err = 0. + lock_page(page); + if (unlikely(!PageUptodate(page))) { + err = -EIO; + } else { + rn = F2FS_NODE(page); + sum_entry-nid = rn-footer.nid; + sum_entry-version = 0; + sum_entry-ofs_in_node = 0; + sum_entry++; + } + unlock_page(page); } list_del(page-lru); - unlock_page(page); __free_pages(page, 0); } Maybe we should add code here. if (err) return err; This can reduce unnecessary loop executions. I'll add this. Thanks, } + return err; } diff --git a/fs/f2fs/segment.c b/fs/f2fs