The speculative page fault handler which is run without holding the
mmap_sem is calling lru_cache_add_active_or_unevictable() but the vm_flags
is not guaranteed to remain constant.
Introducing __lru_cache_add_active_or_unevictable() which has the vma flags
value parameter instead of the vma pointer.

Signed-off-by: Laurent Dufour <lduf...@linux.vnet.ibm.com>
---
 include/linux/swap.h | 10 ++++++++--
 mm/memory.c          |  8 ++++----
 mm/swap.c            |  6 +++---
 3 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index a1a3f4ed94ce..99377b66ea93 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -337,8 +337,14 @@ extern void deactivate_file_page(struct page *page);
 extern void mark_page_lazyfree(struct page *page);
 extern void swap_setup(void);
 
-extern void lru_cache_add_active_or_unevictable(struct page *page,
-                                               struct vm_area_struct *vma);
+extern void __lru_cache_add_active_or_unevictable(struct page *page,
+                                               unsigned long vma_flags);
+
+static inline void lru_cache_add_active_or_unevictable(struct page *page,
+                                               struct vm_area_struct *vma)
+{
+       return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
+}
 
 /* linux/mm/vmscan.c */
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/mm/memory.c b/mm/memory.c
index e4c0f08b78e8..cbd7e5c3a42f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2540,7 +2540,7 @@ static int wp_page_copy(struct vm_fault *vmf)
                ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
                page_add_new_anon_rmap(new_page, vma, vmf->address, false);
                mem_cgroup_commit_charge(new_page, memcg, false, false);
-               lru_cache_add_active_or_unevictable(new_page, vma);
+               __lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags);
                /*
                 * We call the notify macro here because, when using secondary
                 * mmu page tables (such as kvm shadow page tables), we want the
@@ -3082,7 +3082,7 @@ int do_swap_page(struct vm_fault *vmf)
        if (unlikely(page != swapcache && swapcache)) {
                page_add_new_anon_rmap(page, vma, vmf->address, false);
                mem_cgroup_commit_charge(page, memcg, false, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               __lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
        } else {
                do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
                mem_cgroup_commit_charge(page, memcg, true, false);
@@ -3232,7 +3232,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
        inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, vmf->address, false);
        mem_cgroup_commit_charge(page, memcg, false, false);
-       lru_cache_add_active_or_unevictable(page, vma);
+       __lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
 setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
@@ -3484,7 +3484,7 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup 
*memcg,
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
                page_add_new_anon_rmap(page, vma, vmf->address, false);
                mem_cgroup_commit_charge(page, memcg, false, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               __lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
        } else {
                inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
                page_add_file_rmap(page, false);
diff --git a/mm/swap.c b/mm/swap.c
index 566cfb9fdaf3..7e25a74397b9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -455,12 +455,12 @@ void lru_cache_add(struct page *page)
  * directly back onto it's zone's unevictable list, it does NOT use a
  * per cpu pagevec.
  */
-void lru_cache_add_active_or_unevictable(struct page *page,
-                                        struct vm_area_struct *vma)
+void __lru_cache_add_active_or_unevictable(struct page *page,
+                                          unsigned long vma_flags)
 {
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
-       if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
+       if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
                SetPageActive(page);
        else if (!TestSetPageMlocked(page)) {
                /*
-- 
2.7.4

Reply via email to