This is a trivial but worth having clean-up patch. There should be
no side effects except page->lru is temporarily poisoned after it's
deleted but before it's added to the new list in move_pages_to_lru()
(which is not a problem).

Signed-off-by: Yu Zhao <[email protected]>
---
 mm/swap.c   |  4 +---
 mm/vmscan.c | 14 ++++----------
 2 files changed, 5 insertions(+), 13 deletions(-)

diff --git a/mm/swap.c b/mm/swap.c
index 40bf20a75278..2735ecf0f566 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -597,11 +597,9 @@ static void lru_lazyfree_fn(struct page *page, struct 
lruvec *lruvec,
 {
        if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
            !PageSwapCache(page) && !PageUnevictable(page)) {
-               bool active = PageActive(page);
                int nr_pages = thp_nr_pages(page);
 
-               del_page_from_lru_list(page, lruvec,
-                                      LRU_INACTIVE_ANON + active);
+               del_page_from_lru_list(page, lruvec, page_lru(page));
                ClearPageActive(page);
                ClearPageReferenced(page);
                /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99e1796eb833..b479ced26cd3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1845,13 +1845,12 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
        int nr_pages, nr_moved = 0;
        LIST_HEAD(pages_to_free);
        struct page *page;
-       enum lru_list lru;
 
        while (!list_empty(list)) {
                page = lru_to_page(list);
                VM_BUG_ON_PAGE(PageLRU(page), page);
+               list_del(&page->lru);
                if (unlikely(!page_evictable(page))) {
-                       list_del(&page->lru);
                        spin_unlock_irq(&pgdat->lru_lock);
                        putback_lru_page(page);
                        spin_lock_irq(&pgdat->lru_lock);
@@ -1860,16 +1859,10 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
                lruvec = mem_cgroup_page_lruvec(page, pgdat);
 
                SetPageLRU(page);
-               lru = page_lru(page);
-
-               nr_pages = thp_nr_pages(page);
-               update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
-               list_move(&page->lru, &lruvec->lists[lru]);
+               add_page_to_lru_list(page, lruvec, page_lru(page));
 
                if (put_page_testzero(page)) {
-                       __ClearPageLRU(page);
-                       __ClearPageActive(page);
-                       del_page_from_lru_list(page, lruvec, lru);
+                       del_page_from_lru_list(page, lruvec, 
page_off_lru(page));
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&pgdat->lru_lock);
@@ -1878,6 +1871,7 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
                        } else
                                list_add(&page->lru, &pages_to_free);
                } else {
+                       nr_pages = thp_nr_pages(page);
                        nr_moved += nr_pages;
                        if (PageActive(page))
                                workingset_age_nonresident(lruvec, nr_pages);
-- 
2.28.0.402.g5ffc5be6b7-goog

Reply via email to