This prevents the lockdep report (and potential deadlock) if we get
unlucky while traversing the queues.

Signed-off-by: Dan Smith <[email protected]>
---
 net/checkpoint.c |    8 ++++----
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/net/checkpoint.c b/net/checkpoint.c
index 0eb8860..9197671 100644
--- a/net/checkpoint.c
+++ b/net/checkpoint.c
@@ -44,10 +44,10 @@ static int sock_copy_buffers(struct sk_buff_head *from,
 
        *total_bytes = 0;
 
-       spin_lock(&from->lock);
+       spin_lock_bh(&from->lock);
        skb_queue_walk(from, skb)
                count1++;
-       spin_unlock(&from->lock);
+       spin_unlock_bh(&from->lock);
 
        skbs = kzalloc(sizeof(*skbs) * count1, GFP_KERNEL);
        if (!skbs)
@@ -60,7 +60,7 @@ static int sock_copy_buffers(struct sk_buff_head *from,
        }
 
        i = 0;
-       spin_lock(&from->lock);
+       spin_lock_bh(&from->lock);
        skb_queue_walk(from, skb) {
                if (++count2 > count1)
                        break; /* The queue changed as we read it */
@@ -72,7 +72,7 @@ static int sock_copy_buffers(struct sk_buff_head *from,
                *total_bytes += skb->len;
                i++;
        }
-       spin_unlock(&from->lock);
+       spin_unlock_bh(&from->lock);
 
        if (count1 != count2)
                goto err;
-- 
1.6.2.5

_______________________________________________
Containers mailing list
[email protected]
https://lists.linux-foundation.org/mailman/listinfo/containers

_______________________________________________
Devel mailing list
[email protected]
https://openvz.org/mailman/listinfo/devel

Reply via email to