Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Chao Yu
Hi Xiang,

On 2018/8/13 10:36, Gao Xiang wrote:
> Hi Chao,
> 
> On 2018/8/13 10:00, Chao Yu wrote:
>> On 2018/8/12 22:01, Chao Yu wrote:
>>> From: Gao Xiang 
>>>
>>> This patch enhances the missing error handling code for
>>> xattr submodule, which improves the stability for the rare cases.
>>>
>>> Signed-off-by: Gao Xiang 
>>> Reviewed-by: Chao Yu 
>>> Signed-off-by: Chao Yu 
>>> ---
>>>  drivers/staging/erofs/internal.h |   6 +-
>>>  drivers/staging/erofs/xattr.c| 120 ---
>>>  2 files changed, 83 insertions(+), 43 deletions(-)
>>>
>>> diff --git a/drivers/staging/erofs/internal.h 
>>> b/drivers/staging/erofs/internal.h
>>> index a756abeff7e9..8951e01216e3 100644
>>> --- a/drivers/staging/erofs/internal.h
>>> +++ b/drivers/staging/erofs/internal.h
>>> @@ -485,10 +485,10 @@ struct erofs_map_blocks_iter {
>>>  
>>>  
>>>  static inline struct page *
>>> -erofs_get_inline_page_nofail(struct inode *inode,
>>> -erofs_blk_t blkaddr)
>>> +erofs_get_inline_page(struct inode *inode,
>>> + erofs_blk_t blkaddr)
>>>  {
>>> -   return erofs_get_meta_page_nofail(inode->i_sb,
>>> +   return erofs_get_meta_page(inode->i_sb,
>>> blkaddr, S_ISDIR(inode->i_mode));
>>>  }
>>>  
>>> diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
>>> index 2593c856b079..1b5815fc70db 100644
>>> --- a/drivers/staging/erofs/xattr.c
>>> +++ b/drivers/staging/erofs/xattr.c
>>> @@ -24,16 +24,25 @@ struct xattr_iter {
>>>  
>>>  static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
>>>  {
>>> -   /* only init_inode_xattrs use non-atomic once */
>>> +   /* the only user of kunmap() is 'init_inode_xattrs' */
>>> if (unlikely(!atomic))
>>> kunmap(it->page);
>>> else
>>> kunmap_atomic(it->kaddr);
>>> +
>>> unlock_page(it->page);
>>> put_page(it->page);
>>>  }
>>>  
>>> -static void init_inode_xattrs(struct inode *inode)
>>> +static inline void xattr_iter_end_final(struct xattr_iter *it)
>>> +{
>>> +   if (unlikely(it->page == NULL))
>>> +   return;
>>> +
>>> +   xattr_iter_end(it, true);
>>> +}
>>> +
>>> +static int init_inode_xattrs(struct inode *inode)
>>>  {
>>> struct xattr_iter it;
>>> unsigned i;
>>> @@ -44,7 +53,7 @@ static void init_inode_xattrs(struct inode *inode)
>>> bool atomic_map;
>>>  
>>> if (likely(inode_has_inited_xattr(inode)))
>>> -   return;
>>> +   return 0;
>>>  
>>> vi = EROFS_V(inode);
>>> BUG_ON(!vi->xattr_isize);
>>> @@ -54,8 +63,9 @@ static void init_inode_xattrs(struct inode *inode)
>>> it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
>>> it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
>>>  
>>> -   it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
>>> -   BUG_ON(IS_ERR(it.page));
>>> +   it.page = erofs_get_inline_page(inode, it.blkaddr);
>>> +   if (IS_ERR(it.page))
>>> +   return PTR_ERR(it.page);
>>>  
>>> /* read in shared xattr array (non-atomic, see kmalloc below) */
>>> it.kaddr = kmap(it.page);
>>> @@ -64,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
>>> ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
>>>  
>>> vi->xattr_shared_count = ih->h_shared_count;
>>> -   vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
>>> -   vi->xattr_shared_count, sizeof(unsigned),
>>> -   GFP_KERNEL | __GFP_NOFAIL);
>>> +   vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
>>> +   sizeof(unsigned), GFP_KERNEL);
>>> +   if (unlikely(vi->xattr_shared_xattrs == NULL)) {
>>> +   xattr_iter_end(&it, atomic_map);
>>> +   return -ENOMEM;
>>> +   }
>>>  
>>> /* let's skip ibody header */
>>> it.ofs += sizeof(struct erofs_xattr_ibody_header);
>>> @@ -77,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
>>> BUG_ON(it.ofs != EROFS_BLKSIZ);
>>> xattr_iter_end(&it, atomic_map);
>>>  
>>> -   it.page = erofs_get_meta_page_nofail(sb,
>>> +   it.page = erofs_get_meta_page(sb,
>>> ++it.blkaddr, S_ISDIR(inode->i_mode));
>>> -   BUG_ON(IS_ERR(it.page));
>>> +   if (IS_ERR(it.page))
>>> +   return PTR_ERR(it.page);
>>>  
>>> it.kaddr = kmap_atomic(it.page);
>>> atomic_map = true;
>>> @@ -92,6 +106,7 @@ static void init_inode_xattrs(struct inode *inode)
>>> xattr_iter_end(&it, atomic_map);
>>>  
>>> inode_set_inited_xattr(inode);
>>> +   return 0;
>>>  }
>>>  
>>>  struct xattr_iter_handlers {
>>> @@ -101,19 +116,24 @@ struct xattr_iter_handlers {
>>> void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
>>>  };
>>>  
>>> -static void xattr_iter_fixup(struct xattr_iter *it)
>>> +static inline int xattr_iter_fixup(struct xattr_

Re: [PATCH 2/8] staging: erofs: separate erofs_get_meta_page

2018-08-13 Thread Dan Carpenter
On Sun, Aug 12, 2018 at 10:01:44PM +0800, Chao Yu wrote:
> --- a/drivers/staging/erofs/data.c
> +++ b/drivers/staging/erofs/data.c
> @@ -39,31 +39,44 @@ static inline void read_endio(struct bio *bio)
>  }
>  
>  /* prio -- true is used for dir */
> -struct page *erofs_get_meta_page(struct super_block *sb,
> - erofs_blk_t blkaddr, bool prio)
> +struct page *__erofs_get_meta_page(struct super_block *sb,
> + erofs_blk_t blkaddr, bool prio, bool nofail)
>  {
> - struct inode *bd_inode = sb->s_bdev->bd_inode;
> - struct address_space *mapping = bd_inode->i_mapping;
> + struct inode *const bd_inode = sb->s_bdev->bd_inode;
> + struct address_space *const mapping = bd_inode->i_mapping;
> + /* prefer retrying in the allocator to blindly looping below */
> + const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
> + (nofail ? __GFP_NOFAIL : 0);
> + unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
>   struct page *page;
>  
>  repeat:
> - page = find_or_create_page(mapping, blkaddr,
> - /*
> -  * Prefer looping in the allocator rather than here,
> -  * at least that code knows what it's doing.
> -  */
> - mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
> -
> - BUG_ON(!page || !PageLocked(page));
> + page = find_or_create_page(mapping, blkaddr, gfp);
> + if (unlikely(page == NULL)) {
> + DBG_BUGON(nofail);
> + return ERR_PTR(-ENOMEM);
> + }
> + DBG_BUGON(!PageLocked(page));
>  
>   if (!PageUptodate(page)) {
>   struct bio *bio;
>   int err;
>  
> - bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
> + bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
> + if (unlikely(bio == NULL)) {
> + DBG_BUGON(nofail);
> + err = -ENOMEM;
> +err_out:
> + unlock_page(page);
> + put_page(page);
> + return ERR_PTR(err);


Put this err_out stuff at the bottom of the function so that we don't
have to do backward hops to get to it.

> + }
>  
>   err = bio_add_page(bio, page, PAGE_SIZE, 0);
> - BUG_ON(err != PAGE_SIZE);
> + if (unlikely(err != PAGE_SIZE)) {
> + err = -EFAULT;
> + goto err_out;

Like this.  Generally avoid backwards hops if you can.

> + }
>  
>   __submit_bio(bio, REQ_OP_READ,
>   REQ_META | (prio ? REQ_PRIO : 0));
> @@ -72,6 +85,7 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>  
>   /* the page has been truncated by others? */
>   if (unlikely(page->mapping != mapping)) {
> +unlock_repeat:

The question mark in the "truncated by others?" is a little concerning.
It's slightly weird that we don't check "io_retries" on this path.

>   unlock_page(page);
>   put_page(page);
>   goto repeat;
> @@ -79,10 +93,12 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>  
>   /* more likely a read error */
>   if (unlikely(!PageUptodate(page))) {
> - unlock_page(page);
> - put_page(page);
> -
> - page = ERR_PTR(-EIO);
> + if (io_retries) {
> + --io_retries;
> + goto unlock_repeat;
> + }
> + err = -EIO;
> + goto err_out;

regards,
dan carpenter



Re: [PATCH 2/8] staging: erofs: separate erofs_get_meta_page

2018-08-13 Thread Gao Xiang
Hi Dan,

On 2018/8/13 19:04, Dan Carpenter wrote:
> On Sun, Aug 12, 2018 at 10:01:44PM +0800, Chao Yu wrote:
>> --- a/drivers/staging/erofs/data.c
>> +++ b/drivers/staging/erofs/data.c
>> @@ -39,31 +39,44 @@ static inline void read_endio(struct bio *bio)
>>  }
>>  
>>  /* prio -- true is used for dir */
>> -struct page *erofs_get_meta_page(struct super_block *sb,
>> -erofs_blk_t blkaddr, bool prio)
>> +struct page *__erofs_get_meta_page(struct super_block *sb,
>> +erofs_blk_t blkaddr, bool prio, bool nofail)
>>  {
>> -struct inode *bd_inode = sb->s_bdev->bd_inode;
>> -struct address_space *mapping = bd_inode->i_mapping;
>> +struct inode *const bd_inode = sb->s_bdev->bd_inode;
>> +struct address_space *const mapping = bd_inode->i_mapping;
>> +/* prefer retrying in the allocator to blindly looping below */
>> +const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
>> +(nofail ? __GFP_NOFAIL : 0);
>> +unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
>>  struct page *page;
>>  
>>  repeat:
>> -page = find_or_create_page(mapping, blkaddr,
>> -/*
>> - * Prefer looping in the allocator rather than here,
>> - * at least that code knows what it's doing.
>> - */
>> -mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
>> -
>> -BUG_ON(!page || !PageLocked(page));
>> +page = find_or_create_page(mapping, blkaddr, gfp);
>> +if (unlikely(page == NULL)) {
>> +DBG_BUGON(nofail);
>> +return ERR_PTR(-ENOMEM);
>> +}
>> +DBG_BUGON(!PageLocked(page));
>>  
>>  if (!PageUptodate(page)) {
>>  struct bio *bio;
>>  int err;
>>  
>> -bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
>> +bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
>> +if (unlikely(bio == NULL)) {
>> +DBG_BUGON(nofail);
>> +err = -ENOMEM;
>> +err_out:
>> +unlock_page(page);
>> +put_page(page);
>> +return ERR_PTR(err);
> 
> Put this err_out stuff at the bottom of the function so that we don't
> have to do backward hops to get to it.
> 
>> +}
>>  
>>  err = bio_add_page(bio, page, PAGE_SIZE, 0);
>> -BUG_ON(err != PAGE_SIZE);
>> +if (unlikely(err != PAGE_SIZE)) {
>> +err = -EFAULT;
>> +goto err_out;
> 
> Like this.  Generally avoid backwards hops if you can.
> 
>> +}
>>  
>>  __submit_bio(bio, REQ_OP_READ,
>>  REQ_META | (prio ? REQ_PRIO : 0));
>> @@ -72,6 +85,7 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>>  
>>  /* the page has been truncated by others? */
>>  if (unlikely(page->mapping != mapping)) {
>> +unlock_repeat:
> The question mark in the "truncated by others?" is a little concerning.
> It's slightly weird that we don't check "io_retries" on this path.
>
Thanks for your reply.

erofs use the shared block device bd_inode->mapping for its meta use, which is 
visible
to other code rather than erofs internally only, so I think it needs to get rid 
of
potentially truncating by others, and such comments can also be found at

- pagecache_get_page
1557 /* Has the page been truncated? */
1558 if (unlikely(page->mapping != mapping)) {
1559 unlock_page(page);
1560 put_page(page);
1561 goto repeat;
1562 }

- filemap_fault
2535 /* Did it get truncated? */
2536 if (unlikely(page->mapping != mapping)) {
2537 unlock_page(page);
2538 put_page(page);
2539 goto retry_find;
2540 }

I think it is somewhat necessary, and some suggestion about this? thanks in 
advance.

"It's slightly weird that we don't check "io_retries" on this path"
I think you are right, let me rethink about it and send the next patch...

Thanks,
Gao Xiang



Re: [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}

2018-08-13 Thread Dan Carpenter
On Sun, Aug 12, 2018 at 10:01:46PM +0800, Chao Yu wrote:
> From: Gao Xiang 
> 
> This patch introduces 'struct z_erofs_vle_work_finder' to clean up
> arguments of z_erofs_vle_work_lookup and z_erofs_vle_work_register.
> 
> Signed-off-by: Gao Xiang 
> Reviewed-by: Chao Yu 
> Signed-off-by: Chao Yu 
> ---
>  drivers/staging/erofs/unzip_vle.c | 89 ---
>  1 file changed, 47 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/staging/erofs/unzip_vle.c 
> b/drivers/staging/erofs/unzip_vle.c
> index b2e05e2b4116..5032b3b05de1 100644
> --- a/drivers/staging/erofs/unzip_vle.c
> +++ b/drivers/staging/erofs/unzip_vle.c
> @@ -271,36 +271,42 @@ static inline bool try_to_claim_workgroup(
>   return true;/* lucky, I am the followee :) */
>  }
>  
> +struct z_erofs_vle_work_finder {
> + struct super_block *sb;
> + pgoff_t idx;
> + unsigned pageofs;
> +
> + struct z_erofs_vle_workgroup **grp_ret;
> + enum z_erofs_vle_work_role *role;
> + z_erofs_vle_owned_workgrp_t *owned_head;
> + bool *hosted;
> +};
> +
>  static struct z_erofs_vle_work *
> -z_erofs_vle_work_lookup(struct super_block *sb,
> - pgoff_t idx, unsigned pageofs,
> - struct z_erofs_vle_workgroup **grp_ret,
> - enum z_erofs_vle_work_role *role,
> - z_erofs_vle_owned_workgrp_t *owned_head,
> - bool *hosted)
> +z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
>  {
>   bool tag, primary;
>   struct erofs_workgroup *egrp;
>   struct z_erofs_vle_workgroup *grp;
>   struct z_erofs_vle_work *work;
>  
> - egrp = erofs_find_workgroup(sb, idx, &tag);
> + egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
>   if (egrp == NULL) {
> - *grp_ret = NULL;
> + *f->grp_ret = NULL;

All these pointers to pointer seem a bit messy.  Just do this:

struct z_erofs_vle_workgroup *grp;

Then replace "grp" in z_erofs_vle_work_iter_begin() with finder.grp;

regards,
dan carpenter



Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Dan Carpenter
> -static void xattr_iter_fixup(struct xattr_iter *it)
> +static inline int xattr_iter_fixup(struct xattr_iter *it)
>  {
> - if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
> - xattr_iter_end(it, true);
> + if (likely(it->ofs < EROFS_BLKSIZ))
> + return 0;
>  
> - it->blkaddr += erofs_blknr(it->ofs);
> - it->page = erofs_get_meta_page_nofail(it->sb,
> - it->blkaddr, false);
> - BUG_ON(IS_ERR(it->page));
> + xattr_iter_end(it, true);
>  
> - it->kaddr = kmap_atomic(it->page);
> - it->ofs = erofs_blkoff(it->ofs);
> + it->blkaddr += erofs_blknr(it->ofs);
> +
> + it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
> + if (IS_ERR(it->page)) {
> + it->page = NULL;
> + return PTR_ERR(it->page);

This is returning PTR_ERR(NULL) which is success.  There is a smatch
test for this and maybe other static checkers as well so it would have
been caught very quickly.

>   }
> +
> + it->kaddr = kmap_atomic(it->page);
> + it->ofs = erofs_blkoff(it->ofs);
> + return 0;
>  }
>  
>  static int inline_xattr_iter_begin(struct xattr_iter *it,
> @@ -134,22 +154,25 @@ static int inline_xattr_iter_begin(struct xattr_iter 
> *it,
>   it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
>   it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
>  
> - it->page = erofs_get_inline_page_nofail(inode, it->blkaddr);
> - BUG_ON(IS_ERR(it->page));
> - it->kaddr = kmap_atomic(it->page);
> + it->page = erofs_get_inline_page(inode, it->blkaddr);
> + if (IS_ERR(it->page))
> + return PTR_ERR(it->page);
>  
> + it->kaddr = kmap_atomic(it->page);
>   return vi->xattr_isize - xattr_header_sz;
>  }
>  
>  static int xattr_foreach(struct xattr_iter *it,
> - struct xattr_iter_handlers *op, unsigned *tlimit)
> + const struct xattr_iter_handlers *op, unsigned *tlimit)
>  {
>   struct erofs_xattr_entry entry;
>   unsigned value_sz, processed, slice;
>   int err;
>  
>   /* 0. fixup blkaddr, ofs, ipage */
> - xattr_iter_fixup(it);
> + err = xattr_iter_fixup(it);
> + if (unlikely(err))
> + return err;
>  
>   /*
>* 1. read xattr entry to the memory,
> @@ -181,7 +204,9 @@ static int xattr_foreach(struct xattr_iter *it,
>   if (it->ofs >= EROFS_BLKSIZ) {
>   BUG_ON(it->ofs > EROFS_BLKSIZ);
>  
> - xattr_iter_fixup(it);
> + err = xattr_iter_fixup(it);
> + if (unlikely(err))
> + goto out;
>   it->ofs = 0;
>   }
>  
> @@ -213,7 +238,10 @@ static int xattr_foreach(struct xattr_iter *it,
>   while (processed < value_sz) {
>   if (it->ofs >= EROFS_BLKSIZ) {
>   BUG_ON(it->ofs > EROFS_BLKSIZ);
> - xattr_iter_fixup(it);
> +
> + err = xattr_iter_fixup(it);
> + if (unlikely(err))
> + goto out;
>   it->ofs = 0;
>   }
>  
> @@ -273,7 +301,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
>   memcpy(it->buffer + processed, buf, len);
>  }
>  
> -static struct xattr_iter_handlers find_xattr_handlers = {
> +static const struct xattr_iter_handlers find_xattr_handlers = {
>   .entry = xattr_entrymatch,
>   .name = xattr_namematch,
>   .alloc_buffer = xattr_checkbuffer,
> @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, struct 
> getxattr_iter *it)
>   ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
>   if (ret >= 0)
>   break;
> +
> + if (unlikely(ret != -ENOATTR))  /* -ENOMEM, -EIO, etc. */

I have held off commenting on all the likely/unlikely annotations we
are adding because I don't know what the fast paths are in this code.
However, this is clearly an error path here, not on a fast path.

Generally the rule on likely/unlikely is that they hurt readability so
we should only add them if it makes a difference in benchmarking.

> + break;
>   }
> - xattr_iter_end(&it->it, true);
> + xattr_iter_end_final(&it->it);
>  
>   return ret < 0 ? ret : it->buffer_size;
>  }
> @@ -318,9 +349,10 @@ static int shared_getxattr(struct inode *inode, struct 
> getxattr_iter *it)
>   if (i)
>   xattr_iter_end(&it->it, true);
>  
> - it->it.page = erofs_get_meta_page_nofail(sb,
> - blkaddr, false);
> - BUG_ON(IS_ERR(it->it.page));
> + it->it.page = erofs_get_meta_page(sb, blkaddr, false);
> + if (IS_ERR(it->it.page))
> + return PTR_ERR(it->it.page);
> +
>  

Re: [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs

2018-08-13 Thread Dan Carpenter
> -static inline unsigned
> -vle_compressed_index_clusterofs(unsigned clustersize,
> - struct z_erofs_vle_decompressed_index *di)
> +static inline int
> +vle_decompressed_index_clusterofs(unsigned int *clusterofs,

Not related to your patch, but don't make functions inline.  Leave it to
the compiler to decide.

regards,
dan carpenter




[PATCH] staging: erofs: add int to usigned

2018-08-13 Thread Leon Imhof
Fix coding style issue "Prefer 'unsigned int' to bare use of 'unsigned'"
detected by checkpatch.pl

Signed-off-by: Leon Imhof 
---
 drivers/staging/erofs/data.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index ac263a180253..2a401216007a 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -337,7 +337,7 @@ static int erofs_raw_access_readpage(struct file *file, 
struct page *page)
 
 static int erofs_raw_access_readpages(struct file *filp,
struct address_space *mapping,
-   struct list_head *pages, unsigned nr_pages)
+   struct list_head *pages, unsigned int nr_pages)
 {
erofs_off_t last_block;
struct bio *bio = NULL;
-- 
2.18.0



Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Gao Xiang
Hi Dan,

On 2018/8/13 19:47, Dan Carpenter wrote:
>> -static void xattr_iter_fixup(struct xattr_iter *it)
>> +static inline int xattr_iter_fixup(struct xattr_iter *it)
>>  {
>> -if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
>> -xattr_iter_end(it, true);
>> +if (likely(it->ofs < EROFS_BLKSIZ))
>> +return 0;
>>  
>> -it->blkaddr += erofs_blknr(it->ofs);
>> -it->page = erofs_get_meta_page_nofail(it->sb,
>> -it->blkaddr, false);
>> -BUG_ON(IS_ERR(it->page));
>> +xattr_iter_end(it, true);
>>  
>> -it->kaddr = kmap_atomic(it->page);
>> -it->ofs = erofs_blkoff(it->ofs);
>> +it->blkaddr += erofs_blknr(it->ofs);
>> +
>> +it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
>> +if (IS_ERR(it->page)) {
>> +it->page = NULL;
>> +return PTR_ERR(it->page);
> 
> This is returning PTR_ERR(NULL) which is success.  There is a smatch
> test for this and maybe other static checkers as well so it would have
> been caught very quickly.
> 

I am sorry about this. It has already fixed in patch v2.

>>  }
>> +
>> +it->kaddr = kmap_atomic(it->page);
>> +it->ofs = erofs_blkoff(it->ofs);
>> +return 0;
>>  }
>>  
>>  static int inline_xattr_iter_begin(struct xattr_iter *it,
>> @@ -134,22 +154,25 @@ static int inline_xattr_iter_begin(struct xattr_iter 
>> *it,
>>  it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
>>  it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
>>  
>> -it->page = erofs_get_inline_page_nofail(inode, it->blkaddr);
>> -BUG_ON(IS_ERR(it->page));
>> -it->kaddr = kmap_atomic(it->page);
>> +it->page = erofs_get_inline_page(inode, it->blkaddr);
>> +if (IS_ERR(it->page))
>> +return PTR_ERR(it->page);
>>  
>> +it->kaddr = kmap_atomic(it->page);
>>  return vi->xattr_isize - xattr_header_sz;
>>  }
>>  
>>  static int xattr_foreach(struct xattr_iter *it,
>> -struct xattr_iter_handlers *op, unsigned *tlimit)
>> +const struct xattr_iter_handlers *op, unsigned *tlimit)
>>  {
>>  struct erofs_xattr_entry entry;
>>  unsigned value_sz, processed, slice;
>>  int err;
>>  
>>  /* 0. fixup blkaddr, ofs, ipage */
>> -xattr_iter_fixup(it);
>> +err = xattr_iter_fixup(it);
>> +if (unlikely(err))
>> +return err;
>>  
>>  /*
>>   * 1. read xattr entry to the memory,
>> @@ -181,7 +204,9 @@ static int xattr_foreach(struct xattr_iter *it,
>>  if (it->ofs >= EROFS_BLKSIZ) {
>>  BUG_ON(it->ofs > EROFS_BLKSIZ);
>>  
>> -xattr_iter_fixup(it);
>> +err = xattr_iter_fixup(it);
>> +if (unlikely(err))
>> +goto out;
>>  it->ofs = 0;
>>  }
>>  
>> @@ -213,7 +238,10 @@ static int xattr_foreach(struct xattr_iter *it,
>>  while (processed < value_sz) {
>>  if (it->ofs >= EROFS_BLKSIZ) {
>>  BUG_ON(it->ofs > EROFS_BLKSIZ);
>> -xattr_iter_fixup(it);
>> +
>> +err = xattr_iter_fixup(it);
>> +if (unlikely(err))
>> +goto out;
>>  it->ofs = 0;
>>  }
>>  
>> @@ -273,7 +301,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
>>  memcpy(it->buffer + processed, buf, len);
>>  }
>>  
>> -static struct xattr_iter_handlers find_xattr_handlers = {
>> +static const struct xattr_iter_handlers find_xattr_handlers = {
>>  .entry = xattr_entrymatch,
>>  .name = xattr_namematch,
>>  .alloc_buffer = xattr_checkbuffer,
>> @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, struct 
>> getxattr_iter *it)
>>  ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
>>  if (ret >= 0)
>>  break;
>> +
>> +if (unlikely(ret != -ENOATTR))  /* -ENOMEM, -EIO, etc. */
> 
> I have held off commenting on all the likely/unlikely annotations we
> are adding because I don't know what the fast paths are in this code.
> However, this is clearly an error path here, not on a fast path.
> 
> Generally the rule on likely/unlikely is that they hurt readability so
> we should only add them if it makes a difference in benchmarking.
> 

In my opinion, return values other than 0 and ENOATTR(ENODATA) rarely happens,
it should be in the slow path...

>> +break;
>>  }
>> -xattr_iter_end(&it->it, true);
>> +xattr_iter_end_final(&it->it);
>>  
>>  return ret < 0 ? ret : it->buffer_size;
>>  }
>> @@ -318,9 +349,10 @@ static int shared_getxattr(struct inode *inode, struct 
>> getxattr_iter *it)
>>  if (i)
>>  xattr_iter_end(&it->it, true);
>>  
>> -it->it.page = erofs_get_meta_page_nofail(sb,
>> -  

Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Dan Carpenter
On Mon, Aug 13, 2018 at 08:17:27PM +0800, Gao Xiang wrote:
> > /* we assume that ofs is aligned with 4 bytes */
> > it->ofs = EROFS_XATTR_ALIGN(it->ofs);
> > return err;
> > 

This might be cleaner if we wrote:

return (err < 0) ? error : 0;

The callers all treate zero and one the same so there isn't any reason
to propogate the 1 back.

regards,
dan carpenter



Re: [PATCH 2/8] staging: erofs: separate erofs_get_meta_page

2018-08-13 Thread Chao Yu
On 2018/8/13 19:04, Dan Carpenter wrote:
> On Sun, Aug 12, 2018 at 10:01:44PM +0800, Chao Yu wrote:
>> --- a/drivers/staging/erofs/data.c
>> +++ b/drivers/staging/erofs/data.c
>> @@ -39,31 +39,44 @@ static inline void read_endio(struct bio *bio)
>>  }
>>  
>>  /* prio -- true is used for dir */
>> -struct page *erofs_get_meta_page(struct super_block *sb,
>> -erofs_blk_t blkaddr, bool prio)
>> +struct page *__erofs_get_meta_page(struct super_block *sb,
>> +erofs_blk_t blkaddr, bool prio, bool nofail)
>>  {
>> -struct inode *bd_inode = sb->s_bdev->bd_inode;
>> -struct address_space *mapping = bd_inode->i_mapping;
>> +struct inode *const bd_inode = sb->s_bdev->bd_inode;
>> +struct address_space *const mapping = bd_inode->i_mapping;
>> +/* prefer retrying in the allocator to blindly looping below */
>> +const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
>> +(nofail ? __GFP_NOFAIL : 0);
>> +unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
>>  struct page *page;
>>  
>>  repeat:
>> -page = find_or_create_page(mapping, blkaddr,
>> -/*
>> - * Prefer looping in the allocator rather than here,
>> - * at least that code knows what it's doing.
>> - */
>> -mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
>> -
>> -BUG_ON(!page || !PageLocked(page));
>> +page = find_or_create_page(mapping, blkaddr, gfp);
>> +if (unlikely(page == NULL)) {
>> +DBG_BUGON(nofail);
>> +return ERR_PTR(-ENOMEM);
>> +}
>> +DBG_BUGON(!PageLocked(page));
>>  
>>  if (!PageUptodate(page)) {
>>  struct bio *bio;
>>  int err;
>>  
>> -bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
>> +bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
>> +if (unlikely(bio == NULL)) {
>> +DBG_BUGON(nofail);
>> +err = -ENOMEM;
>> +err_out:
>> +unlock_page(page);
>> +put_page(page);
>> +return ERR_PTR(err);
> 
> 
> Put this err_out stuff at the bottom of the function so that we don't
> have to do backward hops to get to it.

Agreed, we can move error path at the bottom of this function.

> 
>> +}
>>  
>>  err = bio_add_page(bio, page, PAGE_SIZE, 0);
>> -BUG_ON(err != PAGE_SIZE);
>> +if (unlikely(err != PAGE_SIZE)) {
>> +err = -EFAULT;
>> +goto err_out;
> 
> Like this.  Generally avoid backwards hops if you can.
> 
>> +}
>>  
>>  __submit_bio(bio, REQ_OP_READ,
>>  REQ_META | (prio ? REQ_PRIO : 0));
>> @@ -72,6 +85,7 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>>  
>>  /* the page has been truncated by others? */
>>  if (unlikely(page->mapping != mapping)) {
>> +unlock_repeat:
> 
> The question mark in the "truncated by others?" is a little concerning.

Yup, we can remove the question mark.

> It's slightly weird that we don't check "io_retries" on this path.

We don't need to cover this path since io_retries is used for accounting retry
time only when IO occurs.

Thanks,

> 
>>  unlock_page(page);
>>  put_page(page);
>>  goto repeat;
>> @@ -79,10 +93,12 @@ struct page *erofs_get_meta_page(struct super_block *sb,
>>  
>>  /* more likely a read error */
>>  if (unlikely(!PageUptodate(page))) {
>> -unlock_page(page);
>> -put_page(page);
>> -
>> -page = ERR_PTR(-EIO);
>> +if (io_retries) {
>> +--io_retries;
>> +goto unlock_repeat;
>> +}
>> +err = -EIO;
>> +goto err_out;
> 
> regards,
> dan carpenter
> 


Re: [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}

2018-08-13 Thread Gao Xiang
Hi Dan,

On 2018/8/13 20:00, Dan Carpenter wrote:
> On Sun, Aug 12, 2018 at 10:01:46PM +0800, Chao Yu wrote:
>> From: Gao Xiang 
>>
>> This patch introduces 'struct z_erofs_vle_work_finder' to clean up
>> arguments of z_erofs_vle_work_lookup and z_erofs_vle_work_register.
>>
>> Signed-off-by: Gao Xiang 
>> Reviewed-by: Chao Yu 
>> Signed-off-by: Chao Yu 
>> ---
>>  drivers/staging/erofs/unzip_vle.c | 89 ---
>>  1 file changed, 47 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/staging/erofs/unzip_vle.c 
>> b/drivers/staging/erofs/unzip_vle.c
>> index b2e05e2b4116..5032b3b05de1 100644
>> --- a/drivers/staging/erofs/unzip_vle.c
>> +++ b/drivers/staging/erofs/unzip_vle.c
>> @@ -271,36 +271,42 @@ static inline bool try_to_claim_workgroup(
>>  return true;/* lucky, I am the followee :) */
>>  }
>>  
>> +struct z_erofs_vle_work_finder {
>> +struct super_block *sb;
>> +pgoff_t idx;
>> +unsigned pageofs;
>> +
>> +struct z_erofs_vle_workgroup **grp_ret;
>> +enum z_erofs_vle_work_role *role;
>> +z_erofs_vle_owned_workgrp_t *owned_head;
>> +bool *hosted;
>> +};
>> +
>>  static struct z_erofs_vle_work *
>> -z_erofs_vle_work_lookup(struct super_block *sb,
>> -pgoff_t idx, unsigned pageofs,
>> -struct z_erofs_vle_workgroup **grp_ret,
>> -enum z_erofs_vle_work_role *role,
>> -z_erofs_vle_owned_workgrp_t *owned_head,
>> -bool *hosted)
>> +z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
>>  {
>>  bool tag, primary;
>>  struct erofs_workgroup *egrp;
>>  struct z_erofs_vle_workgroup *grp;
>>  struct z_erofs_vle_work *work;
>>  
>> -egrp = erofs_find_workgroup(sb, idx, &tag);
>> +egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
>>  if (egrp == NULL) {
>> -*grp_ret = NULL;
>> +*f->grp_ret = NULL;
> 
> All these pointers to pointer seem a bit messy.  Just do this:
> 
>   struct z_erofs_vle_workgroup *grp;
> 
> Then replace "grp" in z_erofs_vle_work_iter_begin() with finder.grp;
> 

I wrote this because I am not sure of all compiler behaviors.

Notice that the struct `struct z_erofs_vle_work_finder' has been all marked as 
const.

If I use `struct z_erofs_vle_workgroup *grp;' and drop the `const' decorator,
compilers could do some re-read operations bacause its value could potentially 
change by its caller at the same time.

Thanks,
Gao Xiang

> regards,
> dan carpenter
> 


Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Dan Carpenter
On Mon, Aug 13, 2018 at 08:17:27PM +0800, Gao Xiang wrote:
> >> @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, 
> >> struct getxattr_iter *it)
> >>ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
> >>if (ret >= 0)
> >>break;
> >> +
> >> +  if (unlikely(ret != -ENOATTR))  /* -ENOMEM, -EIO, etc. */
> > 
> > I have held off commenting on all the likely/unlikely annotations we
> > are adding because I don't know what the fast paths are in this code.
> > However, this is clearly an error path here, not on a fast path.
> > 
> > Generally the rule on likely/unlikely is that they hurt readability so
> > we should only add them if it makes a difference in benchmarking.
> > 
> 
> In my opinion, return values other than 0 and ENOATTR(ENODATA) rarely happens,
> it should be in the slow path...
> 

What I'm trying to say is please stop adding so many likely/unlikely
annotations.  You should only add them if you have the benchmark data to
show the it really is required.


regards,
dan carpenter



Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Chao Yu
On 2018/8/13 20:17, Gao Xiang wrote:
>> Generally the rule on likely/unlikely is that they hurt readability so
>> we should only add them if it makes a difference in benchmarking.
>>
> 
> In my opinion, return values other than 0 and ENOATTR(ENODATA) rarely happens,
> it should be in the slow path...

Hi Dan, thanks for the comments.

IMO, we should check and clean up all likely/unlikely in erofs, to make sure
they are used in the right place.

Thanks,


Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Gao Xiang
Hi Dan,

On 2018/8/13 20:40, Dan Carpenter wrote:
> On Mon, Aug 13, 2018 at 08:17:27PM +0800, Gao Xiang wrote:
 @@ -294,8 +322,11 @@ static int inline_getxattr(struct inode *inode, 
 struct getxattr_iter *it)
ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
if (ret >= 0)
break;
 +
 +  if (unlikely(ret != -ENOATTR))  /* -ENOMEM, -EIO, etc. */
>>> I have held off commenting on all the likely/unlikely annotations we
>>> are adding because I don't know what the fast paths are in this code.
>>> However, this is clearly an error path here, not on a fast path.
>>>
>>> Generally the rule on likely/unlikely is that they hurt readability so
>>> we should only add them if it makes a difference in benchmarking.
>>>
>> In my opinion, return values other than 0 and ENOATTR(ENODATA) rarely 
>> happens,
>> it should be in the slow path...
>>
> What I'm trying to say is please stop adding so many likely/unlikely
> annotations.  You should only add them if you have the benchmark data to
> show the it really is required.
> 
> 
OK, I'll follow your suggestion.

I could say it is my personal code tendency(style).
If you don't like it/think them useless, I will remove them all. 

Thanks for your suggestion.

Thanks,
Gao Xiang

> regards,
> dan carpenter
> 


Re: [PATCH 6/8] staging: erofs: fix vle_decompressed_index_clusterofs

2018-08-13 Thread Gao Xiang
Hi Dan,

On 2018/8/13 20:03, Dan Carpenter wrote:
>> -static inline unsigned
>> -vle_compressed_index_clusterofs(unsigned clustersize,
>> -struct z_erofs_vle_decompressed_index *di)
>> +static inline int
>> +vle_decompressed_index_clusterofs(unsigned int *clusterofs,
> 
> Not related to your patch, but don't make functions inline.  Leave it to
> the compiler to decide.

OK, thanks for your suggestion.

Thanks,
Gao Xiang

> 
> regards,
> dan carpenter
> 
> 


Re: [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}

2018-08-13 Thread Dan Carpenter
Yeah.  You'd have to remove the const.

Anyway, on looking at it more, I guess this patch is fine for now.  We
will probably end up changing z_erofs_vle_work_lookup() and
z_erofs_vle_work_register() some more in the future.

regards,
dan carpenter



Re: [PATCH] staging: erofs: add int to usigned

2018-08-13 Thread Gao Xiang
Hi Leon,

On 2018/8/13 20:09, Leon Imhof wrote:
> Fix coding style issue "Prefer 'unsigned int' to bare use of 'unsigned'"
> detected by checkpatch.pl
> 
> Signed-off-by: Leon Imhof 

Looks fine,

Reviewed-by: Gao Xiang 

(BTW, could you fix the title of this commit? Thanks.)

> ---
>  drivers/staging/erofs/data.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
> index ac263a180253..2a401216007a 100644
> --- a/drivers/staging/erofs/data.c
> +++ b/drivers/staging/erofs/data.c
> @@ -337,7 +337,7 @@ static int erofs_raw_access_readpage(struct file *file, 
> struct page *page)
>  
>  static int erofs_raw_access_readpages(struct file *filp,
>   struct address_space *mapping,
> - struct list_head *pages, unsigned nr_pages)
> + struct list_head *pages, unsigned int nr_pages)
>  {
>   erofs_off_t last_block;
>   struct bio *bio = NULL;
> 


Re: [PATCH 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}

2018-08-13 Thread Gao Xiang
Hi Dan,

On 2018/8/13 21:05, Dan Carpenter wrote:
> Yeah.  You'd have to remove the const.
> 
> Anyway, on looking at it more, I guess this patch is fine for now.  We
> will probably end up changing z_erofs_vle_work_lookup() and
> z_erofs_vle_work_register() some more in the future.
> 

Thanks for review, I personally tend to leave this patch unmodified for now :( 
.

The struct is wrote in order to read-only or assign and read at once, and let 
compilers notice that
(all modifications are local so compilers can handle them safely)...

Thanks,
Gao Xiang

> regards,
> dan carpenter
> 


Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Gao Xiang
Hi Dan,

On 2018/8/13 20:25, Dan Carpenter wrote:
> On Mon, Aug 13, 2018 at 08:17:27PM +0800, Gao Xiang wrote:
>>> /* we assume that ofs is aligned with 4 bytes */
>>> it->ofs = EROFS_XATTR_ALIGN(it->ofs);
>>> return err;
>>>
> This might be cleaner if we wrote:
> 
>   return (err < 0) ? error : 0;
> 
> The callers all treate zero and one the same so there isn't any reason
> to propogate the 1 back.
> 

Thanks, I will recheck all callers and fix as you suggested.
But it is better to fix them in an independent patch. :) I will send a new 
patch later.

Thanks,
Gao Xiang

> regards,
> dan carpenter
> 


Re: [PATCH 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Dan Carpenter
> But it is better to fix them in an independent patch. :)

Yah.  Of course.  This was completely unrelated.

regards,
dan carpenter


[PATCH] staging: erofs: change 'unsigned' to 'unsigned int'

2018-08-13 Thread Leon Imhof
Fix coding style issue "Prefer 'unsigned int' to bare use of 'unsigned'"
detected by checkpatch.pl

Signed-off-by: Leon Imhof 
---
 drivers/staging/erofs/data.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index ac263a180253..2a401216007a 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -337,7 +337,7 @@ static int erofs_raw_access_readpage(struct file *file, 
struct page *page)
 
 static int erofs_raw_access_readpages(struct file *filp,
struct address_space *mapping,
-   struct list_head *pages, unsigned nr_pages)
+   struct list_head *pages, unsigned int nr_pages)
 {
erofs_off_t last_block;
struct bio *bio = NULL;
-- 
2.18.0



Re: [PATCH] staging: erofs: change 'unsigned' to 'unsigned int'

2018-08-13 Thread Gao Xiang via Linux-erofs
Hi Leon,

On 2018/8/13 23:20, Leon Imhof wrote:
> Fix coding style issue "Prefer 'unsigned int' to bare use of 'unsigned'"
> detected by checkpatch.pl
> 
> Signed-off-by: Leon Imhof 
> ---

Thanks for the patch, could be better if use `[PATCH v2]' to indicate
that you send a new version of `[PATCH] staging: erofs: add int to usigned'.

Reviewed-by: Gao Xiang 

Thanks,
Gao Xiang

>  drivers/staging/erofs/data.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
> index ac263a180253..2a401216007a 100644
> --- a/drivers/staging/erofs/data.c
> +++ b/drivers/staging/erofs/data.c
> @@ -337,7 +337,7 @@ static int erofs_raw_access_readpage(struct file *file, 
> struct page *page)
>  
>  static int erofs_raw_access_readpages(struct file *filp,
>   struct address_space *mapping,
> - struct list_head *pages, unsigned nr_pages)
> + struct list_head *pages, unsigned int nr_pages)
>  {
>   erofs_off_t last_block;
>   struct bio *bio = NULL;
> 


[PREVIEW] [PATCH RESEND staging-next 1/8] staging: erofs: introduce erofs_grab_bio

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

this patch renames prepare_bio to erofs_grab_bio, and
adds a nofail option in order to retry in the bio allocator
under memory pressure.

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
 drivers/staging/erofs/data.c  | 12 ++--
 drivers/staging/erofs/internal.h  | 36 ++--
 drivers/staging/erofs/unzip_vle.c |  4 ++--
 3 files changed, 30 insertions(+), 22 deletions(-)

diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index ac263a1..e0c046d 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -60,7 +60,8 @@ struct page *erofs_get_meta_page(struct super_block *sb,
struct bio *bio;
int err;
 
-   bio = prepare_bio(sb, blkaddr, 1, read_endio);
+   bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
+
err = bio_add_page(bio, page, PAGE_SIZE, 0);
BUG_ON(err != PAGE_SIZE);
 
@@ -278,7 +279,14 @@ static inline struct bio *erofs_read_raw_page(
if (nblocks > BIO_MAX_PAGES)
nblocks = BIO_MAX_PAGES;
 
-   bio = prepare_bio(inode->i_sb, blknr, nblocks, read_endio);
+   bio = erofs_grab_bio(inode->i_sb,
+   blknr, nblocks, read_endio, false);
+
+   if (IS_ERR(bio)) {
+   err = PTR_ERR(bio);
+   bio = NULL;
+   goto err_out;
+   }
}
 
err = bio_add_page(bio, page, PAGE_SIZE, 0);
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 367b39f..1353b3f 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -420,26 +420,26 @@ struct erofs_map_blocks {
 #define EROFS_GET_BLOCKS_RAW0x0001
 
 /* data.c */
-static inline struct bio *prepare_bio(
-   struct super_block *sb,
-   erofs_blk_t blkaddr, unsigned nr_pages,
-   bio_end_io_t endio)
+static inline struct bio *
+erofs_grab_bio(struct super_block *sb,
+  erofs_blk_t blkaddr, unsigned int nr_pages,
+  bio_end_io_t endio, bool nofail)
 {
-   gfp_t gfp = GFP_NOIO;
-   struct bio *bio = bio_alloc(gfp, nr_pages);
-
-   if (unlikely(bio == NULL) &&
-   (current->flags & PF_MEMALLOC)) {
-   do {
-   nr_pages /= 2;
-   if (unlikely(!nr_pages)) {
-   bio = bio_alloc(gfp | __GFP_NOFAIL, 1);
-   BUG_ON(bio == NULL);
-   break;
+   const gfp_t gfp = GFP_NOIO;
+   struct bio *bio;
+
+   do {
+   if (nr_pages == 1) {
+   bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
+   if (unlikely(bio == NULL)) {
+   DBG_BUGON(nofail);
+   return ERR_PTR(-ENOMEM);
}
-   bio = bio_alloc(gfp, nr_pages);
-   } while (bio == NULL);
-   }
+   break;
+   }
+   bio = bio_alloc(gfp, nr_pages);
+   nr_pages /= 2;
+   } while (unlikely(bio == NULL));
 
bio->bi_end_io = endio;
bio_set_dev(bio, sb->s_bdev);
diff --git a/drivers/staging/erofs/unzip_vle.c 
b/drivers/staging/erofs/unzip_vle.c
index 8721f0a..375c171 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1213,8 +1213,8 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
}
 
if (bio == NULL) {
-   bio = prepare_bio(sb, first_index + i,
-   BIO_MAX_PAGES, z_erofs_vle_read_endio);
+   bio = erofs_grab_bio(sb, first_index + i,
+   BIO_MAX_PAGES, z_erofs_vle_read_endio, true);
bio->bi_private = tagptr_cast_ptr(bi_private);
 
++nr_bios;
-- 
2.7.4



[PREVIEW] [PATCH v3 staging-next 2/8] staging: erofs: separate erofs_get_meta_page

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

This patch separates 'erofs_get_meta_page' into 'erofs_get_meta_page'
and 'erofs_get_meta_page_nofail'. The second one ensures that it
should not fail under memory pressure and should make best efforts
if IO errors occur.

It also adds auxiliary variables in order to fulfill 80 character limit.

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
change log v3:
 - fix as pointed out by Dan Carpenter :
1) Put err_out stuff at the bottom of the function;
2) Drop the question mark in the "truncated by others?";

 - it is unsafe to introduce retry count for the truncated repeat path.

 drivers/staging/erofs/Kconfig |  9 +++
 drivers/staging/erofs/data.c  | 56 ++-
 drivers/staging/erofs/internal.h  | 30 +
 drivers/staging/erofs/unzip_vle.c | 12 +
 drivers/staging/erofs/xattr.c | 23 +---
 5 files changed, 92 insertions(+), 38 deletions(-)

diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index 96f6149..7f209b3 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -78,6 +78,15 @@ config EROFS_FAULT_INJECTION
  Test EROFS to inject faults such as ENOMEM, EIO, and so on.
  If unsure, say N.
 
+config EROFS_FS_IO_MAX_RETRIES
+   int "EROFS IO Maximum Retries"
+   depends on EROFS_FS
+   default "5"
+   help
+ Maximum retry count of IO Errors.
+
+ If unsure, leave the default value (5 retries, 6 IOs at most).
+
 config EROFS_FS_ZIP
bool "EROFS Data Compresssion Support"
depends on EROFS_FS
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index e0c046d..96775de 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -39,39 +39,50 @@ static inline void read_endio(struct bio *bio)
 }
 
 /* prio -- true is used for dir */
-struct page *erofs_get_meta_page(struct super_block *sb,
-   erofs_blk_t blkaddr, bool prio)
+struct page *__erofs_get_meta_page(struct super_block *sb,
+   erofs_blk_t blkaddr, bool prio, bool nofail)
 {
-   struct inode *bd_inode = sb->s_bdev->bd_inode;
-   struct address_space *mapping = bd_inode->i_mapping;
+   struct inode *const bd_inode = sb->s_bdev->bd_inode;
+   struct address_space *const mapping = bd_inode->i_mapping;
+   /* prefer retrying in the allocator to blindly looping below */
+   const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
+   (nofail ? __GFP_NOFAIL : 0);
+   unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
struct page *page;
 
 repeat:
-   page = find_or_create_page(mapping, blkaddr,
-   /*
-* Prefer looping in the allocator rather than here,
-* at least that code knows what it's doing.
-*/
-   mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
-
-   BUG_ON(!page || !PageLocked(page));
+   page = find_or_create_page(mapping, blkaddr, gfp);
+   if (unlikely(page == NULL)) {
+   DBG_BUGON(nofail);
+   return ERR_PTR(-ENOMEM);
+   }
+   DBG_BUGON(!PageLocked(page));
 
if (!PageUptodate(page)) {
struct bio *bio;
int err;
 
-   bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, true);
+   bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
+   if (unlikely(bio == NULL)) {
+   DBG_BUGON(nofail);
+   err = -ENOMEM;
+   goto err_out;
+   }
 
err = bio_add_page(bio, page, PAGE_SIZE, 0);
-   BUG_ON(err != PAGE_SIZE);
+   if (unlikely(err != PAGE_SIZE)) {
+   err = -EFAULT;
+   goto err_out;
+   }
 
__submit_bio(bio, REQ_OP_READ,
REQ_META | (prio ? REQ_PRIO : 0));
 
lock_page(page);
 
-   /* the page has been truncated by others? */
+   /* this page has been truncated by others */
if (unlikely(page->mapping != mapping)) {
+unlock_repeat:
unlock_page(page);
put_page(page);
goto repeat;
@@ -79,13 +90,20 @@ struct page *erofs_get_meta_page(struct super_block *sb,
 
/* more likely a read error */
if (unlikely(!PageUptodate(page))) {
-   unlock_page(page);
-   put_page(page);
-
-   page = ERR_PTR(-EIO);
+   if (io_retries) {
+   --io_retries;
+   goto unlock_repeat;
+   }
+   err = -EIO;
+   goto err_out;
}
}
return page;
+
+err_out:
+   unlock_

[PREVIEW] [PATCH v2 staging-next 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

This patch enhances the missing error handling code for
xattr submodule, which improves the stability for the rare cases.

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
change log v2:
 - fix as pointed out by Dan Carpenter :
1) drop all likely/unlikely hints in this patch;

 - documentation and an attempt to avoid to propogate `return 1' to
   callers of xattr_foreach will be sent independently in the later
   patchset.

 drivers/staging/erofs/internal.h |   6 +-
 drivers/staging/erofs/xattr.c| 122 ++-
 2 files changed, 85 insertions(+), 43 deletions(-)

diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index a756abe..8951e01 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -485,10 +485,10 @@ struct erofs_map_blocks_iter {
 
 
 static inline struct page *
-erofs_get_inline_page_nofail(struct inode *inode,
-erofs_blk_t blkaddr)
+erofs_get_inline_page(struct inode *inode,
+ erofs_blk_t blkaddr)
 {
-   return erofs_get_meta_page_nofail(inode->i_sb,
+   return erofs_get_meta_page(inode->i_sb,
blkaddr, S_ISDIR(inode->i_mode));
 }
 
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 2593c85..2eca6ed 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -24,16 +24,25 @@ struct xattr_iter {
 
 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
 {
-   /* only init_inode_xattrs use non-atomic once */
+   /* the only user of kunmap() is 'init_inode_xattrs' */
if (unlikely(!atomic))
kunmap(it->page);
else
kunmap_atomic(it->kaddr);
+
unlock_page(it->page);
put_page(it->page);
 }
 
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
+{
+   if (it->page == NULL)
+   return;
+
+   xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
 {
struct xattr_iter it;
unsigned i;
@@ -44,7 +53,7 @@ static void init_inode_xattrs(struct inode *inode)
bool atomic_map;
 
if (likely(inode_has_inited_xattr(inode)))
-   return;
+   return 0;
 
vi = EROFS_V(inode);
BUG_ON(!vi->xattr_isize);
@@ -54,8 +63,9 @@ static void init_inode_xattrs(struct inode *inode)
it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
 
-   it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
-   BUG_ON(IS_ERR(it.page));
+   it.page = erofs_get_inline_page(inode, it.blkaddr);
+   if (IS_ERR(it.page))
+   return PTR_ERR(it.page);
 
/* read in shared xattr array (non-atomic, see kmalloc below) */
it.kaddr = kmap(it.page);
@@ -64,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
 
vi->xattr_shared_count = ih->h_shared_count;
-   vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
-   vi->xattr_shared_count, sizeof(unsigned),
-   GFP_KERNEL | __GFP_NOFAIL);
+   vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+   sizeof(uint), GFP_KERNEL);
+   if (vi->xattr_shared_xattrs == NULL) {
+   xattr_iter_end(&it, atomic_map);
+   return -ENOMEM;
+   }
 
/* let's skip ibody header */
it.ofs += sizeof(struct erofs_xattr_ibody_header);
@@ -77,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
BUG_ON(it.ofs != EROFS_BLKSIZ);
xattr_iter_end(&it, atomic_map);
 
-   it.page = erofs_get_meta_page_nofail(sb,
+   it.page = erofs_get_meta_page(sb,
++it.blkaddr, S_ISDIR(inode->i_mode));
-   BUG_ON(IS_ERR(it.page));
+   if (IS_ERR(it.page))
+   return PTR_ERR(it.page);
 
it.kaddr = kmap_atomic(it.page);
atomic_map = true;
@@ -92,6 +106,7 @@ static void init_inode_xattrs(struct inode *inode)
xattr_iter_end(&it, atomic_map);
 
inode_set_inited_xattr(inode);
+   return 0;
 }
 
 struct xattr_iter_handlers {
@@ -101,19 +116,26 @@ struct xattr_iter_handlers {
void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
 };
 
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
 {
-   if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
-   xattr_iter_end(it, true);
+   if (it->ofs < EROFS_BLKSIZ)
+   return 0;
+
+   xattr_i

[PREVIEW] [PATCH RESEND staging-next 4/8] staging: erofs: cleanup z_erofs_vle_work_{lookup, register}

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

This patch introduces 'struct z_erofs_vle_work_finder' to clean up
arguments of z_erofs_vle_work_lookup and z_erofs_vle_work_register.

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
 drivers/staging/erofs/unzip_vle.c | 89 +--
 1 file changed, 47 insertions(+), 42 deletions(-)

diff --git a/drivers/staging/erofs/unzip_vle.c 
b/drivers/staging/erofs/unzip_vle.c
index b2e05e2..5032b3b 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -271,36 +271,42 @@ static inline bool try_to_claim_workgroup(
return true;/* lucky, I am the followee :) */
 }
 
+struct z_erofs_vle_work_finder {
+   struct super_block *sb;
+   pgoff_t idx;
+   unsigned pageofs;
+
+   struct z_erofs_vle_workgroup **grp_ret;
+   enum z_erofs_vle_work_role *role;
+   z_erofs_vle_owned_workgrp_t *owned_head;
+   bool *hosted;
+};
+
 static struct z_erofs_vle_work *
-z_erofs_vle_work_lookup(struct super_block *sb,
-   pgoff_t idx, unsigned pageofs,
-   struct z_erofs_vle_workgroup **grp_ret,
-   enum z_erofs_vle_work_role *role,
-   z_erofs_vle_owned_workgrp_t *owned_head,
-   bool *hosted)
+z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
 {
bool tag, primary;
struct erofs_workgroup *egrp;
struct z_erofs_vle_workgroup *grp;
struct z_erofs_vle_work *work;
 
-   egrp = erofs_find_workgroup(sb, idx, &tag);
+   egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
if (egrp == NULL) {
-   *grp_ret = NULL;
+   *f->grp_ret = NULL;
return NULL;
}
 
-   *grp_ret = grp = container_of(egrp,
-   struct z_erofs_vle_workgroup, obj);
+   grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
+   *f->grp_ret = grp;
 
 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
-   work = z_erofs_vle_grab_work(grp, pageofs);
+   work = z_erofs_vle_grab_work(grp, f->pageofs);
primary = true;
 #else
BUG();
 #endif
 
-   DBG_BUGON(work->pageofs != pageofs);
+   DBG_BUGON(work->pageofs != f->pageofs);
 
/*
 * lock must be taken first to avoid grp->next == NIL between
@@ -340,29 +346,24 @@ z_erofs_vle_work_lookup(struct super_block *sb,
 */
mutex_lock(&work->lock);
 
-   *hosted = false;
+   *f->hosted = false;
if (!primary)
-   *role = Z_EROFS_VLE_WORK_SECONDARY;
+   *f->role = Z_EROFS_VLE_WORK_SECONDARY;
/* claim the workgroup if possible */
-   else if (try_to_claim_workgroup(grp, owned_head, hosted))
-   *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+   else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
+   *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
else
-   *role = Z_EROFS_VLE_WORK_PRIMARY;
+   *f->role = Z_EROFS_VLE_WORK_PRIMARY;
 
return work;
 }
 
 static struct z_erofs_vle_work *
-z_erofs_vle_work_register(struct super_block *sb,
- struct z_erofs_vle_workgroup **grp_ret,
- struct erofs_map_blocks *map,
- pgoff_t index, unsigned pageofs,
- enum z_erofs_vle_work_role *role,
- z_erofs_vle_owned_workgrp_t *owned_head,
- bool *hosted)
+z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
+ struct erofs_map_blocks *map)
 {
-   bool newgrp = false;
-   struct z_erofs_vle_workgroup *grp = *grp_ret;
+   bool gnew = false;
+   struct z_erofs_vle_workgroup *grp = *f->grp_ret;
struct z_erofs_vle_work *work;
 
 #ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
@@ -376,7 +377,7 @@ z_erofs_vle_work_register(struct super_block *sb,
if (unlikely(grp == NULL))
return ERR_PTR(-ENOMEM);
 
-   grp->obj.index = index;
+   grp->obj.index = f->idx;
grp->llen = map->m_llen;
 
z_erofs_vle_set_workgrp_fmt(grp,
@@ -386,13 +387,13 @@ z_erofs_vle_work_register(struct super_block *sb,
atomic_set(&grp->obj.refcount, 1);
 
/* new workgrps have been claimed as type 1 */
-   WRITE_ONCE(grp->next, *owned_head);
+   WRITE_ONCE(grp->next, *f->owned_head);
/* primary and followed work for all new workgrps */
-   *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+   *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
/* it should be submitted by ourselves */
-   *hosted = true;
+   *f->hosted = true;
 
-   newgrp = true;
+   gnew = true;
 #ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
 skip:
/* currently unimplemented */
@@ -400,12 +401,12 @@ z_erofs_vle_work_register(struct super_block *sb,
 #else
work = z_erofs_vle_grab_primary_work(grp);
 #

[PREVIEW] [PATCH RESEND staging-next 7/8] staging: erofs: fix integer overflow on 32-bit platform

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

This patch fixes integer overflow on multiplication
of 32-bit `lcn' in z_erofs_map_blocks_iter.

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
 drivers/staging/erofs/unzip_vle.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/staging/erofs/unzip_vle.c 
b/drivers/staging/erofs/unzip_vle.c
index e31393a..dab8921 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1513,7 +1513,7 @@ static erofs_off_t vle_get_logical_extent_head(
*flags ^= EROFS_MAP_ZIPPED;
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
/* clustersize should be a power of two */
-   ofs = ((unsigned long long)lcn << clusterbits) +
+   ofs = ((u64)lcn << clusterbits) +
(le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
*pcn = le32_to_cpu(di->di_u.blkaddr);
break;
@@ -1595,7 +1595,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* by default, compressed */
map->m_flags |= EROFS_MAP_ZIPPED;
 
-   end = (u64)(lcn + 1) * clustersize;
+   end = ((u64)lcn + 1) * clustersize;
 
cluster_type = vle_cluster_type(di);
 
@@ -1611,7 +1611,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
}
 
if (ofs_rem > logical_cluster_ofs) {
-   ofs = lcn * clustersize | logical_cluster_ofs;
+   ofs = (u64)lcn * clustersize | logical_cluster_ofs;
pcn = le32_to_cpu(di->di_u.blkaddr);
break;
}
@@ -1623,7 +1623,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
err = -EIO;
goto unmap_out;
}
-   end = (lcn-- * clustersize) | logical_cluster_ofs;
+   end = ((u64)lcn-- * clustersize) | logical_cluster_ofs;
/* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */
-- 
2.7.4



[PREVIEW] [PATCH RESEND staging-next 8/8] staging: erofs: fix compression mapping beyond EOF

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

Logical address of EOF LTP mapping should start at
`inode->i_size' rather than `inode->i_size - 1' to
`m_la(in)', fix it.

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
 drivers/staging/erofs/unzip_vle.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/staging/erofs/unzip_vle.c 
b/drivers/staging/erofs/unzip_vle.c
index dab8921..6c4c928 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1548,7 +1548,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
if (unlikely(map->m_la >= inode->i_size)) {
BUG_ON(!initial);
map->m_llen = map->m_la + 1 - inode->i_size;
-   map->m_la = inode->i_size - 1;
+   map->m_la = inode->i_size;
map->m_flags = 0;
goto out;
}
-- 
2.7.4



[PREVIEW] [PATCH v2 staging-next 6/8] staging: erofs: fix vle_decompressed_index_clusterofs

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

This patch adds error handing code, and fixes a missing
endian conversion in vle_decompressed_index_clusterofs.

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
change log v2:
 - fix as pointed out by Dan Carpenter :
 don't make functions inline

 drivers/staging/erofs/unzip_vle.c | 26 +++---
 1 file changed, 15 insertions(+), 11 deletions(-)

diff --git a/drivers/staging/erofs/unzip_vle.c 
b/drivers/staging/erofs/unzip_vle.c
index f597f07..e31393a 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1419,24 +1419,24 @@ const struct address_space_operations 
z_erofs_vle_normalaccess_aops = {
 #define vle_cluster_type(di)   \
__vle_cluster_type((di)->di_advise)
 
-static inline unsigned
-vle_compressed_index_clusterofs(unsigned clustersize,
-   struct z_erofs_vle_decompressed_index *di)
+static int
+vle_decompressed_index_clusterofs(unsigned int *clusterofs,
+ unsigned int clustersize,
+ struct z_erofs_vle_decompressed_index *di)
 {
-   debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
-   __func__, di, di->di_advise, vle_cluster_type(di),
-   di->di_clusterofs, di->di_u.blkaddr);
-
switch (vle_cluster_type(di)) {
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+   *clusterofs = clustersize;
break;
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
-   return di->di_clusterofs;
+   *clusterofs = le16_to_cpu(di->di_clusterofs);
+   break;
default:
-   BUG_ON(1);
+   DBG_BUGON(1);
+   return -EIO;
}
-   return clustersize;
+   return 0;
 }
 
 static inline erofs_blk_t
@@ -1581,7 +1581,11 @@ int z_erofs_map_blocks_iter(struct inode *inode,
debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
e_blkaddr, vle_extent_blkoff(inode, lcn));
 
-   logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
+   err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
+   clustersize, di);
+   if (unlikely(err))
+   goto unmap_out;
+
if (!initial) {
/* [walking mode] 'map' has been already initialized */
map->m_llen += logical_cluster_ofs;
-- 
2.7.4



[PREVIEW] [PATCH RESEND staging-next 5/8] staging: erofs: rearrange vle clustertype definitions

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

This patch moves vle clustertype definitions to erofs_fs.h
since they are part of on-disk format.

It also adds compile time check for Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
 drivers/staging/erofs/erofs_fs.h  | 11 +++
 drivers/staging/erofs/unzip_vle.c |  8 
 2 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
index 2f8e2bf..d4bffa2 100644
--- a/drivers/staging/erofs/erofs_fs.h
+++ b/drivers/staging/erofs/erofs_fs.h
@@ -202,6 +202,14 @@ struct erofs_extent_header {
  *di_u.delta[1] = distance to its corresponding tail cluster
  *(di_advise could be 0, 1 or 2)
  */
+enum {
+   Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
+   Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
+   Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
+   Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
+   Z_EROFS_VLE_CLUSTER_TYPE_MAX
+};
+
 #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS2
 #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
 
@@ -260,6 +268,9 @@ static inline void 
erofs_check_ondisk_layout_definitions(void)
BUILD_BUG_ON(sizeof(struct erofs_extent_header) != 16);
BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
+
+   BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
+Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
 }
 
 #endif
diff --git a/drivers/staging/erofs/unzip_vle.c 
b/drivers/staging/erofs/unzip_vle.c
index 5032b3b..f597f07 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -1416,14 +1416,6 @@ const struct address_space_operations 
z_erofs_vle_normalaccess_aops = {
 #define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
 
-enum {
-   Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
-   Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
-   Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
-   Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
-   Z_EROFS_VLE_CLUSTER_TYPE_MAX
-};
-
 #define vle_cluster_type(di)   \
__vle_cluster_type((di)->di_advise)
 
-- 
2.7.4



[PREVIEW] [PATCH v3 staging-next 3/8] staging: erofs: add error handling for xattr submodule

2018-08-13 Thread Gao Xiang via Linux-erofs
From: Gao Xiang 

This patch enhances the missing error handling code for
xattr submodule, which improves the stability for the rare cases.

Signed-off-by: Gao Xiang 
Reviewed-by: Chao Yu 
Signed-off-by: Chao Yu 
---
change log v3:
 - fix bare use of 'unsigned *' at drivers/staging/erofs/xattr.c:168.

change log v2:
 - fix as pointed out by Dan Carpenter :
1) drop all likely/unlikely hints in this patch;

 - documentation and an attempt to avoid to propogate `return 1' to
   callers of xattr_foreach will be sent independently in the later
   patchset.

 drivers/staging/erofs/internal.h |   6 +-
 drivers/staging/erofs/xattr.c| 122 ++-
 2 files changed, 85 insertions(+), 43 deletions(-)

diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index a756abe..8951e01 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -485,10 +485,10 @@ struct erofs_map_blocks_iter {
 
 
 static inline struct page *
-erofs_get_inline_page_nofail(struct inode *inode,
-erofs_blk_t blkaddr)
+erofs_get_inline_page(struct inode *inode,
+ erofs_blk_t blkaddr)
 {
-   return erofs_get_meta_page_nofail(inode->i_sb,
+   return erofs_get_meta_page(inode->i_sb,
blkaddr, S_ISDIR(inode->i_mode));
 }
 
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 2593c85..2eca6ed 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -24,16 +24,25 @@ struct xattr_iter {
 
 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
 {
-   /* only init_inode_xattrs use non-atomic once */
+   /* the only user of kunmap() is 'init_inode_xattrs' */
if (unlikely(!atomic))
kunmap(it->page);
else
kunmap_atomic(it->kaddr);
+
unlock_page(it->page);
put_page(it->page);
 }
 
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
+{
+   if (it->page == NULL)
+   return;
+
+   xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
 {
struct xattr_iter it;
unsigned i;
@@ -44,7 +53,7 @@ static void init_inode_xattrs(struct inode *inode)
bool atomic_map;
 
if (likely(inode_has_inited_xattr(inode)))
-   return;
+   return 0;
 
vi = EROFS_V(inode);
BUG_ON(!vi->xattr_isize);
@@ -54,8 +63,9 @@ static void init_inode_xattrs(struct inode *inode)
it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
 
-   it.page = erofs_get_inline_page_nofail(inode, it.blkaddr);
-   BUG_ON(IS_ERR(it.page));
+   it.page = erofs_get_inline_page(inode, it.blkaddr);
+   if (IS_ERR(it.page))
+   return PTR_ERR(it.page);
 
/* read in shared xattr array (non-atomic, see kmalloc below) */
it.kaddr = kmap(it.page);
@@ -64,9 +74,12 @@ static void init_inode_xattrs(struct inode *inode)
ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
 
vi->xattr_shared_count = ih->h_shared_count;
-   vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
-   vi->xattr_shared_count, sizeof(unsigned),
-   GFP_KERNEL | __GFP_NOFAIL);
+   vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+   sizeof(uint), GFP_KERNEL);
+   if (vi->xattr_shared_xattrs == NULL) {
+   xattr_iter_end(&it, atomic_map);
+   return -ENOMEM;
+   }
 
/* let's skip ibody header */
it.ofs += sizeof(struct erofs_xattr_ibody_header);
@@ -77,9 +90,10 @@ static void init_inode_xattrs(struct inode *inode)
BUG_ON(it.ofs != EROFS_BLKSIZ);
xattr_iter_end(&it, atomic_map);
 
-   it.page = erofs_get_meta_page_nofail(sb,
+   it.page = erofs_get_meta_page(sb,
++it.blkaddr, S_ISDIR(inode->i_mode));
-   BUG_ON(IS_ERR(it.page));
+   if (IS_ERR(it.page))
+   return PTR_ERR(it.page);
 
it.kaddr = kmap_atomic(it.page);
atomic_map = true;
@@ -92,6 +106,7 @@ static void init_inode_xattrs(struct inode *inode)
xattr_iter_end(&it, atomic_map);
 
inode_set_inited_xattr(inode);
+   return 0;
 }
 
 struct xattr_iter_handlers {
@@ -101,19 +116,26 @@ struct xattr_iter_handlers {
void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
 };
 
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
 {
-   if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
-   xattr_iter_end(it, 

[PREVIEW] [PATCH 1/2] staging: erofs: add some comments for xattr subsystem

2018-08-13 Thread Gao Xiang via Linux-erofs
As Dan Carpenter pointed out, it is better to document what
return values of these callbacks in `struct xattr_iter_handlers'
mean and why it->ofs is increased regardless of success or
failure in `xattr_foreach'.

Suggested-by: Dan Carpenter 
Signed-off-by: Gao Xiang 
---
 drivers/staging/erofs/xattr.c | 13 -
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 2eca6ed..4bb7d9e 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -109,6 +109,13 @@ static int init_inode_xattrs(struct inode *inode)
return 0;
 }
 
+/*
+ * the general idea for these return values is
+ * if0 is returned, go on processing the current xattr;
+ *   1 (> 0) is returned, skip this round to process the next xattr;
+ *-err (< 0) is returned, an error (maybe ENOXATTR) occurred
+ *and need to be handled
+ */
 struct xattr_iter_handlers {
int (*entry)(struct xattr_iter *, struct erofs_xattr_entry *);
int (*name)(struct xattr_iter *, unsigned, char *, unsigned);
@@ -164,6 +171,10 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
return vi->xattr_isize - xattr_header_sz;
 }
 
+/*
+ * Regardless of success or failure, `xattr_foreach' will end up with
+ * `ofs' pointing to the next xattr item rather than an arbitrary position.
+ */
 static int xattr_foreach(struct xattr_iter *it,
const struct xattr_iter_handlers *op, unsigned *tlimit)
 {
@@ -255,7 +266,7 @@ static int xattr_foreach(struct xattr_iter *it,
}
 
 out:
-   /* we assume that ofs is aligned with 4 bytes */
+   /* xattrs should be 4-byte aligned (on-disk constraint) */
it->ofs = EROFS_XATTR_ALIGN(it->ofs);
return err;
 }
-- 
2.7.4



[PREVIEW] [RFC PATCH 2/2] staging: erofs: simplify return value of xattr_foreach

2018-08-13 Thread Gao Xiang via Linux-erofs
As Dan Carpenter pointed out, there is no need to propagate positive
return values back to its callers.

Suggested-by: Dan Carpenter 
Signed-off-by: Gao Xiang 
---

If some error, please help point out.
And any comments are welcomed. :)

Thanks,
Gao Xiang

 drivers/staging/erofs/xattr.c | 24 +---
 1 file changed, 9 insertions(+), 15 deletions(-)

diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 4bb7d9e..93a6559 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -268,7 +268,7 @@ static int xattr_foreach(struct xattr_iter *it,
 out:
/* xattrs should be 4-byte aligned (on-disk constraint) */
it->ofs = EROFS_XATTR_ALIGN(it->ofs);
-   return err;
+   return err < 0 ? err : 0;
 }
 
 struct getxattr_iter {
@@ -333,15 +333,12 @@ static int inline_getxattr(struct inode *inode, struct 
getxattr_iter *it)
remaining = ret;
while (remaining) {
ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
-   if (ret >= 0)
-   break;
-
-   if (ret != -ENOATTR)/* -ENOMEM, -EIO, etc. */
+   if (ret != -ENOATTR)
break;
}
xattr_iter_end_final(&it->it);
 
-   return ret < 0 ? ret : it->buffer_size;
+   return ret ? ret : it->buffer_size;
 }
 
 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
@@ -371,16 +368,13 @@ static int shared_getxattr(struct inode *inode, struct 
getxattr_iter *it)
}
 
ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
-   if (ret >= 0)
-   break;
-
-   if (ret != -ENOATTR)/* -ENOMEM, -EIO, etc. */
+   if (ret != -ENOATTR)
break;
}
if (vi->xattr_shared_count)
xattr_iter_end_final(&it->it);
 
-   return ret < 0 ? ret : it->buffer_size;
+   return ret ? ret : it->buffer_size;
 }
 
 static bool erofs_xattr_user_list(struct dentry *dentry)
@@ -567,11 +561,11 @@ static int inline_listxattr(struct listxattr_iter *it)
remaining = ret;
while (remaining) {
ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
-   if (ret < 0)
+   if (ret)
break;
}
xattr_iter_end_final(&it->it);
-   return ret < 0 ? ret : it->buffer_ofs;
+   return ret ? ret : it->buffer_ofs;
 }
 
 static int shared_listxattr(struct listxattr_iter *it)
@@ -601,13 +595,13 @@ static int shared_listxattr(struct listxattr_iter *it)
}
 
ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
-   if (ret < 0)
+   if (ret)
break;
}
if (vi->xattr_shared_count)
xattr_iter_end_final(&it->it);
 
-   return ret < 0 ? ret : it->buffer_ofs;
+   return ret ? ret : it->buffer_ofs;
 }
 
 ssize_t erofs_listxattr(struct dentry *dentry,
-- 
2.7.4



Re: [PREVIEW] [PATCH RESEND staging-next 1/8] staging: erofs: introduce erofs_grab_bio

2018-08-13 Thread Gao Xiang
Hi Chao,

Could you please review again these `[PREVIEW]' patches are OK...
And could help send to staging mailing list and LKML when you are free...

Thanks in advance...

Thanks,
Gao Xiang


Re: [PREVIEW] [PATCH RESEND staging-next 1/8] staging: erofs: introduce erofs_grab_bio

2018-08-13 Thread Gao Xiang
Hi Chao,

(sorry about the email client...)

Could you please review again these `[PREVIEW]' patches are OK...
And could help send to staging mailing list and LKML when you are free...

Thanks in advance...

Thanks,
Gao Xiang