The commit is pushed to "branch-rh7-3.10.0-327.10.1.vz7.12.x-ovz" and will 
appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.10.1.vz7.12.7
------>
commit 11ebbfd6d14abc7bbd38cf0af5a1ebd3f504517e
Author: Maxim Patlasov <[email protected]>
Date:   Tue Apr 12 14:24:14 2016 +0400

    cbt: factor out alloc_page
    
    Patchset description:
    
    cbt: fix page allocation and add blk_cbt_map_copy_once() helper
    
    The first part of the patch set fixes a bug: it's not valid to call
    alloc_page with disabled irqs (hidden inside on_each_cpu internals).
    
    The remaining part (last patch) implements blk_cbt_map_copy_once() --
    a helper for ploop making a private "snapshot" copy of CBT mask.
    
    https://jira.sw.ru/browse/PSBM-45000
    
    Maxim Patlasov (5):
          cbt: factor out alloc_page
          cbt: introduce CBT_PAGE_MISSED
          cbt: make __blk_cbt_set() smarter
          cbt: fix page allocation
          cbt: add blk_cbt_map_copy_once() helper
    
    =======================
    This patch description:
    
    The patch moves the logic of allocating new page for cbt->map[idx] to
    a separate function. It will be used in further patches.
    
    Signed-off-by: Maxim Patlasov <[email protected]>
    Acked-by: Dmitry Monakhov <[email protected]>
---
 block/blk-cbt.c | 98 ++++++++++++++++++++++++++++++++++++---------------------
 1 file changed, 62 insertions(+), 36 deletions(-)

diff --git a/block/blk-cbt.c b/block/blk-cbt.c
index b66f513..08cefd8 100644
--- a/block/blk-cbt.c
+++ b/block/blk-cbt.c
@@ -76,6 +76,61 @@ static void set_bits(void *bm, int cur, int len, bool is_set)
        }
 }
 
+/*
+ * Return values:
+ * 0 if OK,
+ * -EAGAIN if cbt was updated,
+ * -EBADF if cbt is dead,
+ * -ENOMEM if alloc_page failed.
+ */
+static int cbt_page_alloc(struct cbt_info  **cbt_pp, unsigned long idx,
+                         int in_rcu)
+{
+       struct cbt_info  *cbt = *cbt_pp;
+       struct page *page;
+
+       /* Page not allocated yet. Synchronization required */
+       spin_lock_irq(&cbt->lock);
+       if (likely(!test_bit(CBT_DEAD, &cbt->flags))) {
+               cbt->count++;
+       } else {
+               struct cbt_info *new = rcu_dereference(cbt->queue->cbt);
+
+               spin_unlock_irq(&cbt->lock);
+               /* was cbt updated ? */
+               if (new != cbt) {
+                       *cbt_pp = new;
+                       return -EAGAIN;
+               } else {
+                       return -EBADF;
+               }
+       }
+       spin_unlock_irq(&cbt->lock);
+       if (in_rcu)
+               rcu_read_unlock();
+       page = alloc_page(GFP_NOIO|__GFP_ZERO);
+       if (in_rcu)
+               rcu_read_lock();
+       spin_lock_irq(&cbt->lock);
+       if (unlikely(!cbt->count-- && test_bit(CBT_DEAD, &cbt->flags))) {
+               spin_unlock_irq(&cbt->lock);
+               call_rcu(&cbt->rcu, &cbt_release_callback);
+               if (page)
+                       __free_page(page);
+               return -EBADF;
+       }
+       if (unlikely(!page)) {
+               set_bit(CBT_ERROR, &cbt->flags);
+               spin_unlock_irq(&cbt->lock);
+               return -ENOMEM;
+       }
+       cbt->map[idx] = page;
+       page = NULL;
+       spin_unlock_irq(&cbt->lock);
+
+       return 0;
+}
+
 static int __blk_cbt_set(struct cbt_info  *cbt, blkcnt_t block,
                          blkcnt_t count, bool in_rcu, bool set)
 {
@@ -95,6 +150,7 @@ static int __blk_cbt_set(struct cbt_info  *cbt, blkcnt_t 
block,
                unsigned long off = block & (BITS_PER_PAGE -1);
                unsigned long len = min_t(unsigned long, BITS_PER_PAGE - off,
                                          count);
+               int ret;
 
                page = cbt->map[idx];
                if (page) {
@@ -112,44 +168,14 @@ static int __blk_cbt_set(struct cbt_info  *cbt, blkcnt_t 
block,
                                continue;
                        }
                }
-               /* Page not allocated yet. Synchronization required */
-               spin_lock_irq(&cbt->lock);
-               if (likely(!test_bit(CBT_DEAD, &cbt->flags))) {
-                       cbt->count++;
-               } else {
-                       struct cbt_info *new = rcu_dereference(cbt->queue->cbt);
 
-                       spin_unlock_irq(&cbt->lock);
-                       /* was cbt updated ? */
-                       if (new != cbt) {
-                               cbt = new;
-                               continue;
-                       } else {
-                               break;
-                       }
-               }
-               spin_unlock_irq(&cbt->lock);
-               if (in_rcu)
-                       rcu_read_unlock();
-               page = alloc_page(GFP_NOIO|__GFP_ZERO);
-               if (in_rcu)
-                       rcu_read_lock();
-               spin_lock_irq(&cbt->lock);
-               if (unlikely(!cbt->count-- && test_bit(CBT_DEAD, &cbt->flags))) 
{
-                       spin_unlock_irq(&cbt->lock);
-                       call_rcu(&cbt->rcu, &cbt_release_callback);
-                       if (page)
-                               __free_page(page);
+               ret = cbt_page_alloc(&cbt, idx, in_rcu);
+               if (ret == -EAGAIN) /* new cbt */
+                       continue;
+               else if (ret == -EBADF) /* dead cbt */
                        break;
-               }
-               if (unlikely(!page)) {
-                       set_bit(CBT_ERROR, &cbt->flags);
-                       spin_unlock_irq(&cbt->lock);
-                       return -ENOMEM;
-               }
-               cbt->map[idx] = page;
-               page = NULL;
-               spin_unlock_irq(&cbt->lock);
+               else if (ret)
+                       return ret;
        }
        return 0;
 }
_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to