Currently we update inode and shmem_inode_info before verifying that
used_blocks will not exceed max_blocks. In case it will, we undo the
update. Let's switch the order and move the verification of the blocks
count before the inode and shmem_inode_info update.

Signed-off-by: Mike Rapoport <r...@linux.vnet.ibm.com>
---
 mm/shmem.c | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index e67d6ba..40a43ae 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -265,6 +265,14 @@ bool shmem_charge(struct inode *inode, long pages)
 
        if (shmem_acct_block(info->flags, pages))
                return false;
+
+       if (sbinfo->max_blocks) {
+               if (percpu_counter_compare(&sbinfo->used_blocks,
+                                          sbinfo->max_blocks - pages) > 0)
+                       goto unacct;
+               percpu_counter_add(&sbinfo->used_blocks, pages);
+       }
+
        spin_lock_irqsave(&info->lock, flags);
        info->alloced += pages;
        inode->i_blocks += pages * BLOCKS_PER_PAGE;
@@ -272,20 +280,11 @@ bool shmem_charge(struct inode *inode, long pages)
        spin_unlock_irqrestore(&info->lock, flags);
        inode->i_mapping->nrpages += pages;
 
-       if (!sbinfo->max_blocks)
-               return true;
-       if (percpu_counter_compare(&sbinfo->used_blocks,
-                               sbinfo->max_blocks - pages) > 0) {
-               inode->i_mapping->nrpages -= pages;
-               spin_lock_irqsave(&info->lock, flags);
-               info->alloced -= pages;
-               shmem_recalc_inode(inode);
-               spin_unlock_irqrestore(&info->lock, flags);
-               shmem_unacct_blocks(info->flags, pages);
-               return false;
-       }
-       percpu_counter_add(&sbinfo->used_blocks, pages);
        return true;
+
+unacct:
+       shmem_unacct_blocks(info->flags, pages);
+       return false;
 }
 
 void shmem_uncharge(struct inode *inode, long pages)
-- 
2.7.4

Reply via email to