Re: [PATCH v5 6/7] slab: Use slab_list instead of lru

2019-04-03 Thread Roman Gushchin
On Wed, Apr 03, 2019 at 10:05:44AM +1100, Tobin C. Harding wrote:
> Currently we use the page->lru list for maintaining lists of slabs.  We
> have a list in the page structure (slab_list) that can be used for this
> purpose.  Doing so makes the code cleaner since we are not overloading
> the lru list.
> 
> Use the slab_list instead of the lru list for maintaining lists of
> slabs.
> 
> Signed-off-by: Tobin C. Harding 

Reviewed-by: Roman Gushchin 


Re: [PATCH v5 6/7] slab: Use slab_list instead of lru

2019-04-03 Thread Christopher Lameter


Acked-by: Christoph Lameter 




[PATCH v5 6/7] slab: Use slab_list instead of lru

2019-04-02 Thread Tobin C. Harding
Currently we use the page->lru list for maintaining lists of slabs.  We
have a list in the page structure (slab_list) that can be used for this
purpose.  Doing so makes the code cleaner since we are not overloading
the lru list.

Use the slab_list instead of the lru list for maintaining lists of
slabs.

Signed-off-by: Tobin C. Harding 
---
 mm/slab.c | 49 +
 1 file changed, 25 insertions(+), 24 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 329bfe67f2ca..09e2a0131338 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1710,8 +1710,8 @@ static void slabs_destroy(struct kmem_cache *cachep, 
struct list_head *list)
 {
struct page *page, *n;
 
-   list_for_each_entry_safe(page, n, list, lru) {
-   list_del(&page->lru);
+   list_for_each_entry_safe(page, n, list, slab_list) {
+   list_del(&page->slab_list);
slab_destroy(cachep, page);
}
 }
@@ -2267,8 +2267,8 @@ static int drain_freelist(struct kmem_cache *cache,
goto out;
}
 
-   page = list_entry(p, struct page, lru);
-   list_del(&page->lru);
+   page = list_entry(p, struct page, slab_list);
+   list_del(&page->slab_list);
n->free_slabs--;
n->total_slabs--;
/*
@@ -2728,13 +2728,13 @@ static void cache_grow_end(struct kmem_cache *cachep, 
struct page *page)
if (!page)
return;
 
-   INIT_LIST_HEAD(&page->lru);
+   INIT_LIST_HEAD(&page->slab_list);
n = get_node(cachep, page_to_nid(page));
 
spin_lock(&n->list_lock);
n->total_slabs++;
if (!page->active) {
-   list_add_tail(&page->lru, &(n->slabs_free));
+   list_add_tail(&page->slab_list, &n->slabs_free);
n->free_slabs++;
} else
fixup_slab_list(cachep, n, page, &list);
@@ -2843,9 +2843,9 @@ static inline void fixup_slab_list(struct kmem_cache 
*cachep,
void **list)
 {
/* move slabp to correct slabp list: */
-   list_del(&page->lru);
+   list_del(&page->slab_list);
if (page->active == cachep->num) {
-   list_add(&page->lru, &n->slabs_full);
+   list_add(&page->slab_list, &n->slabs_full);
if (OBJFREELIST_SLAB(cachep)) {
 #if DEBUG
/* Poisoning will be done without holding the lock */
@@ -2859,7 +2859,7 @@ static inline void fixup_slab_list(struct kmem_cache 
*cachep,
page->freelist = NULL;
}
} else
-   list_add(&page->lru, &n->slabs_partial);
+   list_add(&page->slab_list, &n->slabs_partial);
 }
 
 /* Try to find non-pfmemalloc slab if needed */
@@ -2882,20 +2882,20 @@ static noinline struct page 
*get_valid_first_slab(struct kmem_cache_node *n,
}
 
/* Move pfmemalloc slab to the end of list to speed up next search */
-   list_del(&page->lru);
+   list_del(&page->slab_list);
if (!page->active) {
-   list_add_tail(&page->lru, &n->slabs_free);
+   list_add_tail(&page->slab_list, &n->slabs_free);
n->free_slabs++;
} else
-   list_add_tail(&page->lru, &n->slabs_partial);
+   list_add_tail(&page->slab_list, &n->slabs_partial);
 
-   list_for_each_entry(page, &n->slabs_partial, lru) {
+   list_for_each_entry(page, &n->slabs_partial, slab_list) {
if (!PageSlabPfmemalloc(page))
return page;
}
 
n->free_touched = 1;
-   list_for_each_entry(page, &n->slabs_free, lru) {
+   list_for_each_entry(page, &n->slabs_free, slab_list) {
if (!PageSlabPfmemalloc(page)) {
n->free_slabs--;
return page;
@@ -2910,11 +2910,12 @@ static struct page *get_first_slab(struct 
kmem_cache_node *n, bool pfmemalloc)
struct page *page;
 
assert_spin_locked(&n->list_lock);
-   page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
+   page = list_first_entry_or_null(&n->slabs_partial, struct page,
+   slab_list);
if (!page) {
n->free_touched = 1;
page = list_first_entry_or_null(&n->slabs_free, struct page,
-   lru);
+   slab_list);
if (page)
n->free_slabs--;
}
@@ -3415,29 +3416,29 @@ static void free_block(struct kmem_cache *cachep, void 
**objpp,
objp = objpp[i];
 
page = virt_to_head_page(objp);
-   list_del(&page->lru);
+   list_del(&page->slab_list);
check_spinlock_acquired_node(cachep, node);
slab_put_obj(cachep, page, objp)