The version of the patch to protect executable pages that is on
its way upstream.

-------- Original Message --------
Subject: [PATCH -mm] vmscan: make mapped executable pages the first class citizen
Date: Thu, 7 May 2009 20:11:01 +0800
From: Wu Fengguang <fengguang...@intel.com>
To: Andrew Morton <a...@linux-foundation.org>
CC: Peter Zijlstra <pet...@infradead.org>, Rik van Riel <r...@redhat.com>, "linux-ker...@vger.kernel.org" <linux-ker...@vger.kernel.org>, "ty...@mit.edu" <ty...@mit.edu>, "linux...@kvack.org" <linux...@kvack.org>, Elladan <ella...@eskimo.com>, Nick Piggin <npig...@suse.de>, Johannes Weiner <han...@cmpxchg.org>, Christoph Lameter <c...@linux-foundation.org>, KOSAKI Motohiro <kosaki.motoh...@jp.fujitsu.com> References: <20090430072057.ga4...@eskimo.com> <20090430174536.d0f438dd.a...@linux-foundation.org> <20090430205936.0f8b2...@riellaptop.surriel.com> <20090430181340.6f07421d.a...@linux-foundation.org> <20090430215034.4748e...@riellaptop.surriel.com> <20090430195439.e02edc26.a...@linux-foundation.org> <49fb01c1.6050...@redhat.com> <20090501123541.7983a8ae.a...@linux-foundation.org> <20090503031539.gc5...@localhost> <1241432635.7620.4732.ca...@twins>

Introduce AS_EXEC to mark executables and their linked libraries, and to
protect their referenced active pages from being deactivated.

CC: Elladan <ella...@eskimo.com>
CC: Nick Piggin <npig...@suse.de>
CC: Johannes Weiner <han...@cmpxchg.org>
CC: Christoph Lameter <c...@linux-foundation.org>
CC: KOSAKI Motohiro <kosaki.motoh...@jp.fujitsu.com>
Acked-by: Peter Zijlstra <pet...@infradead.org>
Acked-by: Rik van Riel <r...@redhat.com>
Signed-off-by: Wu Fengguang <fengguang...@intel.com>
---
 include/linux/pagemap.h |    1 +
 mm/mmap.c               |    2 ++
 mm/nommu.c              |    2 ++
 mm/vmscan.c             |   35 +++++++++++++++++++++++++++++++++--
 4 files changed, 38 insertions(+), 2 deletions(-)

--- linux.orig/include/linux/pagemap.h
+++ linux/include/linux/pagemap.h
@@ -25,6 +25,7 @@ enum mapping_flags {
 #ifdef CONFIG_UNEVICTABLE_LRU
        AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
 #endif
+       AS_EXEC         = __GFP_BITS_SHIFT + 4, /* mapped PROT_EXEC somewhere */
 };

static inline void mapping_set_error(struct address_space *mapping, int error)
--- linux.orig/mm/mmap.c
+++ linux/mm/mmap.c
@@ -1194,6 +1194,8 @@ munmap_back:
                        goto unmap_and_free_vma;
                if (vm_flags & VM_EXECUTABLE)
                        added_exe_file_vma(mm);
+               if (vm_flags & VM_EXEC)
+                       set_bit(AS_EXEC, &file->f_mapping->flags);
        } else if (vm_flags & VM_SHARED) {
                error = shmem_zero_setup(vma);
                if (error)
--- linux.orig/mm/nommu.c
+++ linux/mm/nommu.c
@@ -1224,6 +1224,8 @@ unsigned long do_mmap_pgoff(struct file
                        added_exe_file_vma(current->mm);
                        vma->vm_mm = current->mm;
                }
+               if (vm_flags & VM_EXEC)
+                       set_bit(AS_EXEC, &file->f_mapping->flags);
        }

        down_write(&nommu_region_sem);
--- linux.orig/mm/vmscan.c
+++ linux/mm/vmscan.c
@@ -1230,6 +1230,7 @@ static void shrink_active_list(unsigned
        unsigned long pgmoved;
        unsigned long pgscanned;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
+       LIST_HEAD(l_active);
        LIST_HEAD(l_inactive);
        struct page *page;
        struct pagevec pvec;
@@ -1269,8 +1270,15 @@ static void shrink_active_list(unsigned

                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
-                   page_referenced(page, 0, sc->mem_cgroup))
+                   page_referenced(page, 0, sc->mem_cgroup)) {
+                       struct address_space *mapping = page_mapping(page);
+
                        pgmoved++;
+                       if (mapping && test_bit(AS_EXEC, &mapping->flags)) {
+                               list_add(&page->lru, &l_active);
+                               continue;
+                       }
+               }

                list_add(&page->lru, &l_inactive);
        }
@@ -1279,7 +1287,6 @@ static void shrink_active_list(unsigned
         * Move the pages to the [file or anon] inactive list.
         */
        pagevec_init(&pvec, 1);
-       lru = LRU_BASE + file * LRU_FILE;

        spin_lock_irq(&zone->lru_lock);
        /*
@@ -1291,6 +1298,7 @@ static void shrink_active_list(unsigned
        reclaim_stat->recent_rotated[!!file] += pgmoved;

        pgmoved = 0;  /* count pages moved to inactive list */
+       lru = LRU_BASE + file * LRU_FILE;
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1313,6 +1321,29 @@ static void shrink_active_list(unsigned
        __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        __count_vm_events(PGDEACTIVATE, pgmoved);
+
+       pgmoved = 0;  /* count pages moved back to active list */
+       lru = LRU_ACTIVE + file * LRU_FILE;
+       while (!list_empty(&l_active)) {
+               page = lru_to_page(&l_active);
+               prefetchw_prev_lru_page(page, &l_active, flags);
+               VM_BUG_ON(PageLRU(page));
+               SetPageLRU(page);
+               VM_BUG_ON(!PageActive(page));
+
+               list_move(&page->lru, &zone->lru[lru].list);
+               mem_cgroup_add_lru_list(page, lru);
+               pgmoved++;
+               if (!pagevec_add(&pvec, page)) {
+                       spin_unlock_irq(&zone->lru_lock);
+                       if (buffer_heads_over_limit)
+                               pagevec_strip(&pvec);
+                       __pagevec_release(&pvec);
+                       spin_lock_irq(&zone->lru_lock);
+               }
+       }
+       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
+
        spin_unlock_irq(&zone->lru_lock);
        if (buffer_heads_over_limit)
                pagevec_strip(&pvec);

--
All rights reversed.

_______________________________________________
Fedora-kernel-list mailing list
Fedora-kernel-list@redhat.com
https://www.redhat.com/mailman/listinfo/fedora-kernel-list

Reply via email to