This patch replaces the only open-coded __ClearPageActive() with
page_off_lru(). There is no open-coded __ClearPageUnevictable()s.

Before this patch, we have:
        __ClearPageActive()
        add_page_to_lru_list()

After this patch, we have:
        page_off_lru()
                if PageUnevictable()
                        __ClearPageUnevictable()
                else if PageActive()
                        __ClearPageActive()
        add_page_to_lru_list()

Checking PageUnevictable() shouldn't be a problem because these two
flags are mutually exclusive. Leaking either will trigger bad_page().

Signed-off-by: Yu Zhao <yuz...@google.com>
---
 mm/vmscan.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 503fc5e1fe32..f257d2f61574 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1845,7 +1845,6 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
        int nr_pages, nr_moved = 0;
        LIST_HEAD(pages_to_free);
        struct page *page;
-       enum lru_list lru;
 
        while (!list_empty(list)) {
                page = lru_to_page(list);
@@ -1860,14 +1859,11 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
                lruvec = mem_cgroup_page_lruvec(page, pgdat);
 
                SetPageLRU(page);
-               lru = page_lru(page);
-
                add_page_to_lru_list(page, lruvec, lru);
 
                if (put_page_testzero(page)) {
                        __ClearPageLRU(page);
-                       __ClearPageActive(page);
-                       del_page_from_lru_list(page, lruvec, lru);
+                       del_page_from_lru_list(page, lruvec, 
page_off_lru(page));
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&pgdat->lru_lock);
-- 
2.28.0.681.g6f77f65b4e-goog

Reply via email to