On 03/14/2016 04:19 PM, Dmitry Safonov wrote:
From: Peter Zijlstra <[email protected]>
Peter, sorry for including you in this backport-mail, forgot to use
--suppress-cc=author. Excuse for the noise.

Instead of using comments in an attempt at getting the locking right,
use proper assertions that actively warn you if you got it wrong.

Also add extra braces in a few sites to comply with coding-style.

Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>

[ported from ms, resolved conflict with commit 345c905d13a4
("slub: Make cpu partial slab support configurable")]
Signed-off-by: Dmitry Safonov <[email protected]>
---
  mm/slub.c | 40 ++++++++++++++++++++--------------------
  1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 2f07a63..93068da 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -956,23 +956,22 @@ static void trace(struct kmem_cache *s, struct page 
*page, void *object,
/*
   * Tracking of fully allocated slabs for debugging purposes.
- *
- * list_lock must be held.
   */
  static void add_full(struct kmem_cache *s,
        struct kmem_cache_node *n, struct page *page)
  {
+       lockdep_assert_held(&n->list_lock);
+
        if (!(s->flags & SLAB_STORE_USER))
                return;
list_add(&page->lru, &n->full);
  }
-/*
- * list_lock must be held.
- */
-static void remove_full(struct kmem_cache *s, struct page *page)
+static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 
struct page *page)
  {
+       lockdep_assert_held(&n->list_lock);
+
        if (!(s->flags & SLAB_STORE_USER))
                return;
@@ -1220,7 +1219,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
                        void *object, u8 val) { return 1; }
  static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
                                        struct page *page) {}
-static inline void remove_full(struct kmem_cache *s, struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
+                                       struct page *page) {}
  static inline unsigned long kmem_cache_flags(unsigned long object_size,
        unsigned long flags, const char *name,
        void (*ctor)(void *))
@@ -1507,12 +1507,12 @@ static void discard_slab(struct kmem_cache *s, struct 
page *page)
/*
   * Management of partially allocated slabs.
- *
- * list_lock must be held.
   */
  static inline void add_partial(struct kmem_cache_node *n,
                                struct page *page, int tail)
  {
+       lockdep_assert_held(&n->list_lock);
+
        n->nr_partial++;
        if (tail == DEACTIVATE_TO_TAIL)
                list_add_tail(&page->lru, &n->partial);
@@ -1520,12 +1520,11 @@ static inline void add_partial(struct kmem_cache_node 
*n,
                list_add(&page->lru, &n->partial);
  }
-/*
- * list_lock must be held.
- */
  static inline void remove_partial(struct kmem_cache_node *n,
                                        struct page *page)
  {
+       lockdep_assert_held(&n->list_lock);
+
        list_del(&page->lru);
        n->nr_partial--;
  }
@@ -1535,8 +1534,6 @@ static inline void remove_partial(struct kmem_cache_node 
*n,
   * return the pointer to the freelist.
   *
   * Returns a list of objects or NULL if it fails.
- *
- * Must hold list_lock since we modify the partial list.
   */
  static inline void *acquire_slab(struct kmem_cache *s,
                struct kmem_cache_node *n, struct page *page,
@@ -1546,6 +1543,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
        unsigned long counters;
        struct page new;
+ lockdep_assert_held(&n->list_lock);
+
        /*
         * Zap the freelist and set the frozen bit.
         * The old freelist is the list of objects for the
@@ -1893,7 +1892,7 @@ redo:
else if (l == M_FULL) - remove_full(s, page);
+                       remove_full(s, n, page);
if (m == M_PARTIAL) { @@ -2559,7 +2558,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                new.inuse--;
                if ((!new.inuse || !prior) && !was_frozen) {
- if (!kmem_cache_debug(s) && !prior)
+                       if (!kmem_cache_debug(s) && !prior) {
/*
                                 * Slab was on no list before and will be 
partially empty
@@ -2567,7 +2566,7 @@ static void __slab_free(struct kmem_cache *s, struct page 
*page,
                                 */
                                new.frozen = 1;
- else { /* Needs to be taken off a list */
+                       } else { /* Needs to be taken off a list */
n = get_node(s, page_to_nid(page));
                                /*
@@ -2615,7 +2614,7 @@ static void __slab_free(struct kmem_cache *s, struct page 
*page,
         * then add it.
         */
        if (kmem_cache_debug(s) && unlikely(!prior)) {
-               remove_full(s, page);
+               remove_full(s, n, page);
                add_partial(n, page, DEACTIVATE_TO_TAIL);
                stat(s, FREE_ADD_PARTIAL);
        }
@@ -2629,9 +2628,10 @@ slab_empty:
                 */
                remove_partial(n, page);
                stat(s, FREE_REMOVE_PARTIAL);
-       } else
+       } else {
                /* Slab must be on the full list */
-               remove_full(s, page);
+               remove_full(s, n, page);
+       }
spin_unlock_irqrestore(&n->list_lock, flags);
        stat(s, FREE_SLAB);


--
Regards,
Dmitry Safonov

_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to