RE: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve roll-forward speed

2014-09-23 Thread Chao Yu
Hi Jaegeuk,

> -Original Message-
> From: Jaegeuk Kim [mailto:jaeg...@kernel.org]
> Sent: Tuesday, September 23, 2014 12:47 PM
> To: Chao Yu
> Cc: linux-kernel@vger.kernel.org; linux-fsde...@vger.kernel.org;
> linux-f2fs-de...@lists.sourceforge.net
> Subject: Re: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve 
> roll-forward speed
> 
> Hi Chao,
> 
> On Mon, Sep 22, 2014 at 10:36:25AM +0800, Chao Yu wrote:
> > Hi Jaegeuk,
> >
> > > -Original Message-
> > > From: Jaegeuk Kim [mailto:jaeg...@kernel.org]
> > > Sent: Monday, September 15, 2014 6:14 AM
> > > To: linux-kernel@vger.kernel.org; linux-fsde...@vger.kernel.org;
> > > linux-f2fs-de...@lists.sourceforge.net
> > > Cc: Jaegeuk Kim
> > > Subject: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve 
> > > roll-forward speed
> > >
> > > Previously, all the dnode pages should be read during the roll-forward 
> > > recovery.
> > > Even worsely, whole the chain was traversed twice.
> > > This patch removes that redundant and costly read operations by using 
> > > page cache
> > > of meta_inode and readahead function as well.
> > >
> > > Signed-off-by: Jaegeuk Kim 
> > > ---
> > >  fs/f2fs/checkpoint.c | 11 --
> > >  fs/f2fs/f2fs.h   |  5 +++--
> > >  fs/f2fs/recovery.c   | 59 
> > > +---
> > >  fs/f2fs/segment.h|  5 +++--
> > >  4 files changed, 43 insertions(+), 37 deletions(-)
> > >
> > > diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
> > > index 7262d99..d1ed889 100644
> > > --- a/fs/f2fs/checkpoint.c
> > > +++ b/fs/f2fs/checkpoint.c
> > > @@ -82,6 +82,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
> > > *sbi, int type)
> > >   case META_SSA:
> > >   case META_CP:
> > >   return 0;
> > > + case META_POR:
> > > + return SM_I(sbi)->main_blkaddr + sbi->user_block_count;
> >
> > Here we will skip virtual over-provision segments, so better to use 
> > TOTAL_BLKS(sbi).
> >
> > >   default:
> > >   BUG();
> > >   }
> > > @@ -90,11 +92,11 @@ static inline int get_max_meta_blks(struct 
> > > f2fs_sb_info *sbi, int type)
> > >  /*
> > >   * Readahead CP/NAT/SIT/SSA pages
> > >   */
> > > -int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int 
> > > type)
> > > +int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 
> > > int type)
> > >  {
> > >   block_t prev_blk_addr = 0;
> > >   struct page *page;
> > > - int blkno = start;
> > > + block_t blkno = start;
> > >   int max_blks = get_max_meta_blks(sbi, type);
> > >
> > >   struct f2fs_io_info fio = {
> > > @@ -128,6 +130,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int 
> > > start, int nrpages,
> int
> > > type)
> > >   /* get ssa/cp block addr */
> > >   blk_addr = blkno;
> > >   break;
> > > + case META_POR:
> > > + if (unlikely(blkno >= max_blks))
> > > + goto out;
> > > + blk_addr = blkno;
> > > + break;
> >
> > The real modification in patch which is merged to dev of f2fs is as 
> > following:
> >
> > - /* get ssa/cp block addr */
> > + case META_POR:
> > + if (blkno >= max_blks || blkno < min_blks)
> > + goto out;
> >
> > IMHO, it's better to verify boundary separately for META_{SSA,CP,POR} with 
> > unlikely.
> > How do you think?
> 
> Not bad.
> Could you check the v2 below?
> 
> >
> > >   default:
> > >   BUG();
> > >   }
> > > diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> > > index 4f84d2a..48d7d46 100644
> > > --- a/fs/f2fs/f2fs.h
> > > +++ b/fs/f2fs/f2fs.h
> > > @@ -103,7 +103,8 @@ enum {
> > >   META_CP,
> > >   META_NAT,
> > >   META_SIT,
> > > - META_SSA
> > > + META_SSA,
> > > + META_POR,
> > >  };
> > >
> > >  /* for the list of ino */
> > > @@ -1291,7 +1292,7 @@ void destroy_segment_manager_caches(void);
> > >   */
> > >  struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
> > > 

RE: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve roll-forward speed

2014-09-23 Thread Chao Yu
Hi Jaegeuk,

 -Original Message-
 From: Jaegeuk Kim [mailto:jaeg...@kernel.org]
 Sent: Tuesday, September 23, 2014 12:47 PM
 To: Chao Yu
 Cc: linux-kernel@vger.kernel.org; linux-fsde...@vger.kernel.org;
 linux-f2fs-de...@lists.sourceforge.net
 Subject: Re: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve 
 roll-forward speed
 
 Hi Chao,
 
 On Mon, Sep 22, 2014 at 10:36:25AM +0800, Chao Yu wrote:
  Hi Jaegeuk,
 
   -Original Message-
   From: Jaegeuk Kim [mailto:jaeg...@kernel.org]
   Sent: Monday, September 15, 2014 6:14 AM
   To: linux-kernel@vger.kernel.org; linux-fsde...@vger.kernel.org;
   linux-f2fs-de...@lists.sourceforge.net
   Cc: Jaegeuk Kim
   Subject: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve 
   roll-forward speed
  
   Previously, all the dnode pages should be read during the roll-forward 
   recovery.
   Even worsely, whole the chain was traversed twice.
   This patch removes that redundant and costly read operations by using 
   page cache
   of meta_inode and readahead function as well.
  
   Signed-off-by: Jaegeuk Kim jaeg...@kernel.org
   ---
fs/f2fs/checkpoint.c | 11 --
fs/f2fs/f2fs.h   |  5 +++--
fs/f2fs/recovery.c   | 59 
   +---
fs/f2fs/segment.h|  5 +++--
4 files changed, 43 insertions(+), 37 deletions(-)
  
   diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
   index 7262d99..d1ed889 100644
   --- a/fs/f2fs/checkpoint.c
   +++ b/fs/f2fs/checkpoint.c
   @@ -82,6 +82,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
   *sbi, int type)
 case META_SSA:
 case META_CP:
 return 0;
   + case META_POR:
   + return SM_I(sbi)-main_blkaddr + sbi-user_block_count;
 
  Here we will skip virtual over-provision segments, so better to use 
  TOTAL_BLKS(sbi).
 
 default:
 BUG();
 }
   @@ -90,11 +92,11 @@ static inline int get_max_meta_blks(struct 
   f2fs_sb_info *sbi, int type)
/*
 * Readahead CP/NAT/SIT/SSA pages
 */
   -int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int 
   type)
   +int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 
   int type)
{
 block_t prev_blk_addr = 0;
 struct page *page;
   - int blkno = start;
   + block_t blkno = start;
 int max_blks = get_max_meta_blks(sbi, type);
  
 struct f2fs_io_info fio = {
   @@ -128,6 +130,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int 
   start, int nrpages,
 int
   type)
 /* get ssa/cp block addr */
 blk_addr = blkno;
 break;
   + case META_POR:
   + if (unlikely(blkno = max_blks))
   + goto out;
   + blk_addr = blkno;
   + break;
 
  The real modification in patch which is merged to dev of f2fs is as 
  following:
 
  - /* get ssa/cp block addr */
  + case META_POR:
  + if (blkno = max_blks || blkno  min_blks)
  + goto out;
 
  IMHO, it's better to verify boundary separately for META_{SSA,CP,POR} with 
  unlikely.
  How do you think?
 
 Not bad.
 Could you check the v2 below?
 
 
 default:
 BUG();
 }
   diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
   index 4f84d2a..48d7d46 100644
   --- a/fs/f2fs/f2fs.h
   +++ b/fs/f2fs/f2fs.h
   @@ -103,7 +103,8 @@ enum {
 META_CP,
 META_NAT,
 META_SIT,
   - META_SSA
   + META_SSA,
   + META_POR,
};
  
/* for the list of ino */
   @@ -1291,7 +1292,7 @@ void destroy_segment_manager_caches(void);
 */
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
   -int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
   +int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
   diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
   index 3736728..6f7fbfa 100644
   --- a/fs/f2fs/recovery.c
   +++ b/fs/f2fs/recovery.c
   @@ -173,7 +173,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info 
   *sbi, struct list_head
   *head)
{
 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
 struct curseg_info *curseg;
   - struct page *page;
   + struct page *page = NULL;
 block_t blkaddr;
 int err = 0;
  
   @@ -181,20 +181,19 @@ static int find_fsync_dnodes(struct f2fs_sb_info 
   *sbi, struct list_head
   *head)
 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  
   - /* read node page */
   - page = alloc_page(GFP_F2FS_ZERO);
   - if (!page)
   - return -ENOMEM;
   - lock_page(page);
   -
 while (1) {
 struct fsync_inode_entry *entry;
  
   - err

Re: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve roll-forward speed

2014-09-22 Thread Jaegeuk Kim
Hi Chao,

On Mon, Sep 22, 2014 at 10:36:25AM +0800, Chao Yu wrote:
> Hi Jaegeuk,
> 
> > -Original Message-
> > From: Jaegeuk Kim [mailto:jaeg...@kernel.org]
> > Sent: Monday, September 15, 2014 6:14 AM
> > To: linux-kernel@vger.kernel.org; linux-fsde...@vger.kernel.org;
> > linux-f2fs-de...@lists.sourceforge.net
> > Cc: Jaegeuk Kim
> > Subject: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve 
> > roll-forward speed
> > 
> > Previously, all the dnode pages should be read during the roll-forward 
> > recovery.
> > Even worsely, whole the chain was traversed twice.
> > This patch removes that redundant and costly read operations by using page 
> > cache
> > of meta_inode and readahead function as well.
> > 
> > Signed-off-by: Jaegeuk Kim 
> > ---
> >  fs/f2fs/checkpoint.c | 11 --
> >  fs/f2fs/f2fs.h   |  5 +++--
> >  fs/f2fs/recovery.c   | 59 
> > +---
> >  fs/f2fs/segment.h|  5 +++--
> >  4 files changed, 43 insertions(+), 37 deletions(-)
> > 
> > diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
> > index 7262d99..d1ed889 100644
> > --- a/fs/f2fs/checkpoint.c
> > +++ b/fs/f2fs/checkpoint.c
> > @@ -82,6 +82,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
> > *sbi, int type)
> > case META_SSA:
> > case META_CP:
> > return 0;
> > +   case META_POR:
> > +   return SM_I(sbi)->main_blkaddr + sbi->user_block_count;
> 
> Here we will skip virtual over-provision segments, so better to use 
> TOTAL_BLKS(sbi).
> 
> > default:
> > BUG();
> > }
> > @@ -90,11 +92,11 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
> > *sbi, int type)
> >  /*
> >   * Readahead CP/NAT/SIT/SSA pages
> >   */
> > -int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int 
> > type)
> > +int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 
> > int type)
> >  {
> > block_t prev_blk_addr = 0;
> > struct page *page;
> > -   int blkno = start;
> > +   block_t blkno = start;
> > int max_blks = get_max_meta_blks(sbi, type);
> > 
> > struct f2fs_io_info fio = {
> > @@ -128,6 +130,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, 
> > int nrpages, int
> > type)
> > /* get ssa/cp block addr */
> > blk_addr = blkno;
> > break;
> > +   case META_POR:
> > +   if (unlikely(blkno >= max_blks))
> > +   goto out;
> > +   blk_addr = blkno;
> > +   break;
> 
> The real modification in patch which is merged to dev of f2fs is as following:
> 
> - /* get ssa/cp block addr */
> + case META_POR:
> + if (blkno >= max_blks || blkno < min_blks)
> + goto out;
> 
> IMHO, it's better to verify boundary separately for META_{SSA,CP,POR} with 
> unlikely.
> How do you think?

Not bad.
Could you check the v2 below?

> 
> > default:
> > BUG();
> > }
> > diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> > index 4f84d2a..48d7d46 100644
> > --- a/fs/f2fs/f2fs.h
> > +++ b/fs/f2fs/f2fs.h
> > @@ -103,7 +103,8 @@ enum {
> > META_CP,
> > META_NAT,
> > META_SIT,
> > -   META_SSA
> > +   META_SSA,
> > +   META_POR,
> >  };
> > 
> >  /* for the list of ino */
> > @@ -1291,7 +1292,7 @@ void destroy_segment_manager_caches(void);
> >   */
> >  struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
> >  struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
> > -int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
> > +int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
> >  long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
> >  void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
> >  void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
> > diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
> > index 3736728..6f7fbfa 100644
> > --- a/fs/f2fs/recovery.c
> > +++ b/fs/f2fs/recovery.c
> > @@ -173,7 +173,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, 
> > struct list_head
> > *head)
> >  {
> > unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
> > struct curseg_info *curseg;
> > -   struct page *page;
> > +   struct page *page = NULL;
> > block_t blkaddr;
> > int err = 0;
> > 
> > @@ -181,20 +181,19 @@ static int find_fsync_dnodes(struct f2fs_sb_info 
> > *sbi, struct list_head
> > *head)
> > curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
> > blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
> > 
> > -   /* read node page */
> > -   page = alloc_page(GFP_F2FS_ZERO);
> > -   if (!page)
> > -   return -ENOMEM;
> > -   lock_page(page);
> > -
> > while (1) {
> > struct fsync_inode_entry *entry;
> > 
> > -   err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
> > -   if (err)
> > -   return err;
> > +   if (blkaddr < 

Re: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve roll-forward speed

2014-09-22 Thread Jaegeuk Kim
Hi Chao,

On Mon, Sep 22, 2014 at 10:36:25AM +0800, Chao Yu wrote:
 Hi Jaegeuk,
 
  -Original Message-
  From: Jaegeuk Kim [mailto:jaeg...@kernel.org]
  Sent: Monday, September 15, 2014 6:14 AM
  To: linux-kernel@vger.kernel.org; linux-fsde...@vger.kernel.org;
  linux-f2fs-de...@lists.sourceforge.net
  Cc: Jaegeuk Kim
  Subject: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve 
  roll-forward speed
  
  Previously, all the dnode pages should be read during the roll-forward 
  recovery.
  Even worsely, whole the chain was traversed twice.
  This patch removes that redundant and costly read operations by using page 
  cache
  of meta_inode and readahead function as well.
  
  Signed-off-by: Jaegeuk Kim jaeg...@kernel.org
  ---
   fs/f2fs/checkpoint.c | 11 --
   fs/f2fs/f2fs.h   |  5 +++--
   fs/f2fs/recovery.c   | 59 
  +---
   fs/f2fs/segment.h|  5 +++--
   4 files changed, 43 insertions(+), 37 deletions(-)
  
  diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
  index 7262d99..d1ed889 100644
  --- a/fs/f2fs/checkpoint.c
  +++ b/fs/f2fs/checkpoint.c
  @@ -82,6 +82,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
  *sbi, int type)
  case META_SSA:
  case META_CP:
  return 0;
  +   case META_POR:
  +   return SM_I(sbi)-main_blkaddr + sbi-user_block_count;
 
 Here we will skip virtual over-provision segments, so better to use 
 TOTAL_BLKS(sbi).
 
  default:
  BUG();
  }
  @@ -90,11 +92,11 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
  *sbi, int type)
   /*
* Readahead CP/NAT/SIT/SSA pages
*/
  -int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int 
  type)
  +int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 
  int type)
   {
  block_t prev_blk_addr = 0;
  struct page *page;
  -   int blkno = start;
  +   block_t blkno = start;
  int max_blks = get_max_meta_blks(sbi, type);
  
  struct f2fs_io_info fio = {
  @@ -128,6 +130,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, 
  int nrpages, int
  type)
  /* get ssa/cp block addr */
  blk_addr = blkno;
  break;
  +   case META_POR:
  +   if (unlikely(blkno = max_blks))
  +   goto out;
  +   blk_addr = blkno;
  +   break;
 
 The real modification in patch which is merged to dev of f2fs is as following:
 
 - /* get ssa/cp block addr */
 + case META_POR:
 + if (blkno = max_blks || blkno  min_blks)
 + goto out;
 
 IMHO, it's better to verify boundary separately for META_{SSA,CP,POR} with 
 unlikely.
 How do you think?

Not bad.
Could you check the v2 below?

 
  default:
  BUG();
  }
  diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
  index 4f84d2a..48d7d46 100644
  --- a/fs/f2fs/f2fs.h
  +++ b/fs/f2fs/f2fs.h
  @@ -103,7 +103,8 @@ enum {
  META_CP,
  META_NAT,
  META_SIT,
  -   META_SSA
  +   META_SSA,
  +   META_POR,
   };
  
   /* for the list of ino */
  @@ -1291,7 +1292,7 @@ void destroy_segment_manager_caches(void);
*/
   struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
   struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
  -int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
  +int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
   long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
   void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
   void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
  diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
  index 3736728..6f7fbfa 100644
  --- a/fs/f2fs/recovery.c
  +++ b/fs/f2fs/recovery.c
  @@ -173,7 +173,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, 
  struct list_head
  *head)
   {
  unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
  struct curseg_info *curseg;
  -   struct page *page;
  +   struct page *page = NULL;
  block_t blkaddr;
  int err = 0;
  
  @@ -181,20 +181,19 @@ static int find_fsync_dnodes(struct f2fs_sb_info 
  *sbi, struct list_head
  *head)
  curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
  blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  
  -   /* read node page */
  -   page = alloc_page(GFP_F2FS_ZERO);
  -   if (!page)
  -   return -ENOMEM;
  -   lock_page(page);
  -
  while (1) {
  struct fsync_inode_entry *entry;
  
  -   err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
  -   if (err)
  -   return err;
  +   if (blkaddr  SM_I(sbi)-main_blkaddr ||
  +   blkaddr  TOTAL_BLKS(sbi))
 
 blkaddr = TOTAL_BLKS(sbi) ?

Right.

 
  +   break;
  
  -   lock_page(page);
  +   page = get_meta_page(sbi, blkaddr);
  +   if 

RE: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve roll-forward speed

2014-09-21 Thread Chao Yu
Hi Jaegeuk,

> -Original Message-
> From: Jaegeuk Kim [mailto:jaeg...@kernel.org]
> Sent: Monday, September 15, 2014 6:14 AM
> To: linux-kernel@vger.kernel.org; linux-fsde...@vger.kernel.org;
> linux-f2fs-de...@lists.sourceforge.net
> Cc: Jaegeuk Kim
> Subject: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve 
> roll-forward speed
> 
> Previously, all the dnode pages should be read during the roll-forward 
> recovery.
> Even worsely, whole the chain was traversed twice.
> This patch removes that redundant and costly read operations by using page 
> cache
> of meta_inode and readahead function as well.
> 
> Signed-off-by: Jaegeuk Kim 
> ---
>  fs/f2fs/checkpoint.c | 11 --
>  fs/f2fs/f2fs.h   |  5 +++--
>  fs/f2fs/recovery.c   | 59 
> +---
>  fs/f2fs/segment.h|  5 +++--
>  4 files changed, 43 insertions(+), 37 deletions(-)
> 
> diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
> index 7262d99..d1ed889 100644
> --- a/fs/f2fs/checkpoint.c
> +++ b/fs/f2fs/checkpoint.c
> @@ -82,6 +82,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
> *sbi, int type)
>   case META_SSA:
>   case META_CP:
>   return 0;
> + case META_POR:
> + return SM_I(sbi)->main_blkaddr + sbi->user_block_count;

Here we will skip virtual over-provision segments, so better to use 
TOTAL_BLKS(sbi).

>   default:
>   BUG();
>   }
> @@ -90,11 +92,11 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
> *sbi, int type)
>  /*
>   * Readahead CP/NAT/SIT/SSA pages
>   */
> -int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
> +int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int 
> type)
>  {
>   block_t prev_blk_addr = 0;
>   struct page *page;
> - int blkno = start;
> + block_t blkno = start;
>   int max_blks = get_max_meta_blks(sbi, type);
> 
>   struct f2fs_io_info fio = {
> @@ -128,6 +130,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, 
> int nrpages, int
> type)
>   /* get ssa/cp block addr */
>   blk_addr = blkno;
>   break;
> + case META_POR:
> + if (unlikely(blkno >= max_blks))
> + goto out;
> + blk_addr = blkno;
> + break;

The real modification in patch which is merged to dev of f2fs is as following:

- /* get ssa/cp block addr */
+ case META_POR:
+ if (blkno >= max_blks || blkno < min_blks)
+ goto out;

IMHO, it's better to verify boundary separately for META_{SSA,CP,POR} with 
unlikely.
How do you think?

>   default:
>   BUG();
>   }
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 4f84d2a..48d7d46 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -103,7 +103,8 @@ enum {
>   META_CP,
>   META_NAT,
>   META_SIT,
> - META_SSA
> + META_SSA,
> + META_POR,
>  };
> 
>  /* for the list of ino */
> @@ -1291,7 +1292,7 @@ void destroy_segment_manager_caches(void);
>   */
>  struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
>  struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
> -int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
> +int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
>  long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
>  void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
>  void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
> diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
> index 3736728..6f7fbfa 100644
> --- a/fs/f2fs/recovery.c
> +++ b/fs/f2fs/recovery.c
> @@ -173,7 +173,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, 
> struct list_head
> *head)
>  {
>   unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
>   struct curseg_info *curseg;
> - struct page *page;
> + struct page *page = NULL;
>   block_t blkaddr;
>   int err = 0;
> 
> @@ -181,20 +181,19 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, 
> struct list_head
> *head)
>   curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
>   blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
> 
> - /* read node page */
> - page = alloc_page(GFP_F2FS_ZERO);
> - if (!page)
> - return -ENOMEM;
> - lock_page(page);
> -
>   while (1) {
>   struct fsync_inode_entry *entry;
> 
> - err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
> - if (err)
> - return err;
> + if (blkaddr < SM_I(sbi)->main_blkaddr ||
> + blkaddr > TOTAL_BLKS(sbi))

blkaddr >= TOTAL_BLKS(sbi) ?

> + break;
> 
> - lock_page(page);
> + page = get_meta_page(sbi, blkaddr);
> + if (IS_ERR(page))
> + 

RE: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve roll-forward speed

2014-09-21 Thread Chao Yu
Hi Jaegeuk,

 -Original Message-
 From: Jaegeuk Kim [mailto:jaeg...@kernel.org]
 Sent: Monday, September 15, 2014 6:14 AM
 To: linux-kernel@vger.kernel.org; linux-fsde...@vger.kernel.org;
 linux-f2fs-de...@lists.sourceforge.net
 Cc: Jaegeuk Kim
 Subject: [f2fs-dev] [PATCH 07/10] f2fs: use meta_inode cache to improve 
 roll-forward speed
 
 Previously, all the dnode pages should be read during the roll-forward 
 recovery.
 Even worsely, whole the chain was traversed twice.
 This patch removes that redundant and costly read operations by using page 
 cache
 of meta_inode and readahead function as well.
 
 Signed-off-by: Jaegeuk Kim jaeg...@kernel.org
 ---
  fs/f2fs/checkpoint.c | 11 --
  fs/f2fs/f2fs.h   |  5 +++--
  fs/f2fs/recovery.c   | 59 
 +---
  fs/f2fs/segment.h|  5 +++--
  4 files changed, 43 insertions(+), 37 deletions(-)
 
 diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
 index 7262d99..d1ed889 100644
 --- a/fs/f2fs/checkpoint.c
 +++ b/fs/f2fs/checkpoint.c
 @@ -82,6 +82,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
 *sbi, int type)
   case META_SSA:
   case META_CP:
   return 0;
 + case META_POR:
 + return SM_I(sbi)-main_blkaddr + sbi-user_block_count;

Here we will skip virtual over-provision segments, so better to use 
TOTAL_BLKS(sbi).

   default:
   BUG();
   }
 @@ -90,11 +92,11 @@ static inline int get_max_meta_blks(struct f2fs_sb_info 
 *sbi, int type)
  /*
   * Readahead CP/NAT/SIT/SSA pages
   */
 -int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
 +int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int 
 type)
  {
   block_t prev_blk_addr = 0;
   struct page *page;
 - int blkno = start;
 + block_t blkno = start;
   int max_blks = get_max_meta_blks(sbi, type);
 
   struct f2fs_io_info fio = {
 @@ -128,6 +130,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, 
 int nrpages, int
 type)
   /* get ssa/cp block addr */
   blk_addr = blkno;
   break;
 + case META_POR:
 + if (unlikely(blkno = max_blks))
 + goto out;
 + blk_addr = blkno;
 + break;

The real modification in patch which is merged to dev of f2fs is as following:

- /* get ssa/cp block addr */
+ case META_POR:
+ if (blkno = max_blks || blkno  min_blks)
+ goto out;

IMHO, it's better to verify boundary separately for META_{SSA,CP,POR} with 
unlikely.
How do you think?

   default:
   BUG();
   }
 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
 index 4f84d2a..48d7d46 100644
 --- a/fs/f2fs/f2fs.h
 +++ b/fs/f2fs/f2fs.h
 @@ -103,7 +103,8 @@ enum {
   META_CP,
   META_NAT,
   META_SIT,
 - META_SSA
 + META_SSA,
 + META_POR,
  };
 
  /* for the list of ino */
 @@ -1291,7 +1292,7 @@ void destroy_segment_manager_caches(void);
   */
  struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
  struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
 -int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
 +int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
  long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
  void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
  void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
 diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
 index 3736728..6f7fbfa 100644
 --- a/fs/f2fs/recovery.c
 +++ b/fs/f2fs/recovery.c
 @@ -173,7 +173,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, 
 struct list_head
 *head)
  {
   unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
   struct curseg_info *curseg;
 - struct page *page;
 + struct page *page = NULL;
   block_t blkaddr;
   int err = 0;
 
 @@ -181,20 +181,19 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, 
 struct list_head
 *head)
   curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
   blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
 - /* read node page */
 - page = alloc_page(GFP_F2FS_ZERO);
 - if (!page)
 - return -ENOMEM;
 - lock_page(page);
 -
   while (1) {
   struct fsync_inode_entry *entry;
 
 - err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
 - if (err)
 - return err;
 + if (blkaddr  SM_I(sbi)-main_blkaddr ||
 + blkaddr  TOTAL_BLKS(sbi))

blkaddr = TOTAL_BLKS(sbi) ?

 + break;
 
 - lock_page(page);
 + page = get_meta_page(sbi, blkaddr);
 + if (IS_ERR(page))
 + return PTR_ERR(page);
 +
 + ra_meta_pages(sbi, next_blkaddr_of_node(page),
 +