We don't have to add a freeable page into lru and then remove from it.
This change saves a couple of actions and makes the moving more clear.

The SetPageLRU needs to be kept here for list intergrity.
Otherwise:
 #0 mave_pages_to_lru              #1 release_pages
                                   if (put_page_testzero())
 if !put_page_testzero
                                     !PageLRU //skip lru_lock
                                       list_add(&page->lru,)
   list_add(&page->lru,) //corrupt

[a...@linux-foundation.org: coding style fixes]
Signed-off-by: Alex Shi <alex....@linux.alibaba.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Tejun Heo <t...@kernel.org>
Cc: Matthew Wilcox <wi...@infradead.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: linux...@kvack.org
Cc: linux-kernel@vger.kernel.org
---
 mm/vmscan.c | 37 ++++++++++++++++++++++++-------------
 1 file changed, 24 insertions(+), 13 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 749d239c62b2..ddb29d813d77 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1856,26 +1856,29 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
        while (!list_empty(list)) {
                page = lru_to_page(list);
                VM_BUG_ON_PAGE(PageLRU(page), page);
+               list_del(&page->lru);
                if (unlikely(!page_evictable(page))) {
-                       list_del(&page->lru);
                        spin_unlock_irq(&pgdat->lru_lock);
                        putback_lru_page(page);
                        spin_lock_irq(&pgdat->lru_lock);
                        continue;
                }
-               lruvec = mem_cgroup_page_lruvec(page, pgdat);
 
+               /*
+                * The SetPageLRU needs to be kept here for list intergrity.
+                * Otherwise:
+                *   #0 mave_pages_to_lru             #1 release_pages
+                *                                    if (put_page_testzero())
+                *   if !put_page_testzero
+                *                                      !PageLRU //skip lru_lock
+                *                                        list_add(&page->lru,)
+                *     list_add(&page->lru,) //corrupt
+                */
                SetPageLRU(page);
-               lru = page_lru(page);
 
-               nr_pages = hpage_nr_pages(page);
-               update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
-               list_move(&page->lru, &lruvec->lists[lru]);
-
-               if (put_page_testzero(page)) {
+               if (unlikely(put_page_testzero(page))) {
                        __ClearPageLRU(page);
                        __ClearPageActive(page);
-                       del_page_from_lru_list(page, lruvec, lru);
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&pgdat->lru_lock);
@@ -1883,11 +1886,19 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
                                spin_lock_irq(&pgdat->lru_lock);
                        } else
                                list_add(&page->lru, &pages_to_free);
-               } else {
-                       nr_moved += nr_pages;
-                       if (PageActive(page))
-                               workingset_age_nonresident(lruvec, nr_pages);
+
+                       continue;
                }
+
+               lruvec = mem_cgroup_page_lruvec(page, pgdat);
+               lru = page_lru(page);
+               nr_pages = hpage_nr_pages(page);
+
+               update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
+               list_add(&page->lru, &lruvec->lists[lru]);
+               nr_moved += nr_pages;
+               if (PageActive(page))
+                       workingset_age_nonresident(lruvec, nr_pages);
        }
 
        /*
-- 
1.8.3.1

Reply via email to