Author: alc
Date: Wed Sep 18 07:25:04 2019
New Revision: 352475
URL: https://svnweb.freebsd.org/changeset/base/352475

Log:
  MFC r350463
    In pmap_advise(), when we encounter a superpage mapping, we first demote
    the mapping and then destroy one of the 4 KB page mappings so that there
    is a potential trigger for repromotion.  Currently, we destroy the first
    4 KB page mapping that falls within the (current) superpage mapping or the
    virtual address range [sva, eva).  However, I have found empirically that
    destroying the last 4 KB mapping produces slightly better results,
    specifically, more promotions and fewer failed promotion attempts.
    Accordingly, this revision changes pmap_advise() to destroy the last 4 KB
    page mapping.  It also replaces some nearby uses of boolean_t with bool.

Modified:
  stable/12/sys/amd64/amd64/pmap.c
  stable/12/sys/arm64/arm64/pmap.c
  stable/12/sys/i386/i386/pmap.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/12/sys/amd64/amd64/pmap.c    Wed Sep 18 07:24:33 2019        
(r352474)
+++ stable/12/sys/amd64/amd64/pmap.c    Wed Sep 18 07:25:04 2019        
(r352475)
@@ -7462,7 +7462,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
        pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
        vm_offset_t va, va_next;
        vm_page_t m;
-       boolean_t anychanged;
+       bool anychanged;
 
        if (advice != MADV_DONTNEED && advice != MADV_FREE)
                return;
@@ -7481,7 +7481,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
        PG_M = pmap_modified_bit(pmap);
        PG_V = pmap_valid_bit(pmap);
        PG_RW = pmap_rw_bit(pmap);
-       anychanged = FALSE;
+       anychanged = false;
        pmap_delayed_invl_start();
        PMAP_LOCK(pmap);
        for (; sva < eva; sva = va_next) {
@@ -7523,17 +7523,25 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
                        /*
                         * Unless the page mappings are wired, remove the
                         * mapping to a single page so that a subsequent
-                        * access may repromote.  Since the underlying page
-                        * table page is fully populated, this removal never
-                        * frees a page table page.
+                        * access may repromote.  Choosing the last page
+                        * within the address range [sva, min(va_next, eva))
+                        * generally results in more repromotions.  Since the
+                        * underlying page table page is fully populated, this
+                        * removal never frees a page table page.
                         */
                        if ((oldpde & PG_W) == 0) {
-                               pte = pmap_pde_to_pte(pde, sva);
+                               va = eva;
+                               if (va > va_next)
+                                       va = va_next;
+                               va -= PAGE_SIZE;
+                               KASSERT(va >= sva,
+                                   ("pmap_advise: no address gap"));
+                               pte = pmap_pde_to_pte(pde, va);
                                KASSERT((*pte & PG_V) != 0,
                                    ("pmap_advise: invalid PTE"));
-                               pmap_remove_pte(pmap, pte, sva, *pde, NULL,
+                               pmap_remove_pte(pmap, pte, va, *pde, NULL,
                                    &lock);
-                               anychanged = TRUE;
+                               anychanged = true;
                        }
                        if (lock != NULL)
                                rw_wunlock(lock);
@@ -7565,7 +7573,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
                                if (va == va_next)
                                        va = sva;
                        } else
-                               anychanged = TRUE;
+                               anychanged = true;
                        continue;
 maybe_invlrng:
                        if (va != va_next) {

Modified: stable/12/sys/arm64/arm64/pmap.c
==============================================================================
--- stable/12/sys/arm64/arm64/pmap.c    Wed Sep 18 07:24:33 2019        
(r352474)
+++ stable/12/sys/arm64/arm64/pmap.c    Wed Sep 18 07:25:04 2019        
(r352475)
@@ -4882,15 +4882,23 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
                        /*
                         * Unless the page mappings are wired, remove the
                         * mapping to a single page so that a subsequent
-                        * access may repromote.  Since the underlying page
-                        * table page is fully populated, this removal never
-                        * frees a page table page.
+                        * access may repromote.  Choosing the last page
+                        * within the address range [sva, min(va_next, eva))
+                        * generally results in more repromotions.  Since the
+                        * underlying page table page is fully populated, this
+                        * removal never frees a page table page.
                         */
                        if ((oldl2 & ATTR_SW_WIRED) == 0) {
-                               l3 = pmap_l2_to_l3(l2, sva);
+                               va = eva;
+                               if (va > va_next)
+                                       va = va_next;
+                               va -= PAGE_SIZE;
+                               KASSERT(va >= sva,
+                                   ("pmap_advise: no address gap"));
+                               l3 = pmap_l2_to_l3(l2, va);
                                KASSERT(pmap_load(l3) != 0,
                                    ("pmap_advise: invalid PTE"));
-                               pmap_remove_l3(pmap, l3, sva, pmap_load(l2),
+                               pmap_remove_l3(pmap, l3, va, pmap_load(l2),
                                    NULL, &lock);
                        }
                        if (lock != NULL)

Modified: stable/12/sys/i386/i386/pmap.c
==============================================================================
--- stable/12/sys/i386/i386/pmap.c      Wed Sep 18 07:24:33 2019        
(r352474)
+++ stable/12/sys/i386/i386/pmap.c      Wed Sep 18 07:25:04 2019        
(r352475)
@@ -5246,19 +5246,19 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
        pt_entry_t *pte;
        vm_offset_t va, pdnxt;
        vm_page_t m;
-       boolean_t anychanged, pv_lists_locked;
+       bool anychanged, pv_lists_locked;
 
        if (advice != MADV_DONTNEED && advice != MADV_FREE)
                return;
        if (pmap_is_current(pmap))
-               pv_lists_locked = FALSE;
+               pv_lists_locked = false;
        else {
-               pv_lists_locked = TRUE;
+               pv_lists_locked = true;
 resume:
                rw_wlock(&pvh_global_lock);
                sched_pin();
        }
-       anychanged = FALSE;
+       anychanged = false;
        PMAP_LOCK(pmap);
        for (; sva < eva; sva = pdnxt) {
                pdnxt = (sva + NBPDR) & ~PDRMASK;
@@ -5272,7 +5272,7 @@ resume:
                        if ((oldpde & PG_MANAGED) == 0)
                                continue;
                        if (!pv_lists_locked) {
-                               pv_lists_locked = TRUE;
+                               pv_lists_locked = true;
                                if (!rw_try_wlock(&pvh_global_lock)) {
                                        if (anychanged)
                                                pmap_invalidate_all(pmap);
@@ -5291,16 +5291,24 @@ resume:
                        /*
                         * Unless the page mappings are wired, remove the
                         * mapping to a single page so that a subsequent
-                        * access may repromote.  Since the underlying page
-                        * table page is fully populated, this removal never
-                        * frees a page table page.
+                        * access may repromote.  Choosing the last page
+                        * within the address range [sva, min(pdnxt, eva))
+                        * generally results in more repromotions.  Since the
+                        * underlying page table page is fully populated, this
+                        * removal never frees a page table page.
                         */
                        if ((oldpde & PG_W) == 0) {
-                               pte = pmap_pte_quick(pmap, sva);
+                               va = eva;
+                               if (va > pdnxt)
+                                       va = pdnxt;
+                               va -= PAGE_SIZE;
+                               KASSERT(va >= sva,
+                                   ("pmap_advise: no address gap"));
+                               pte = pmap_pte_quick(pmap, va);
                                KASSERT((*pte & PG_V) != 0,
                                    ("pmap_advise: invalid PTE"));
-                               pmap_remove_pte(pmap, pte, sva, NULL);
-                               anychanged = TRUE;
+                               pmap_remove_pte(pmap, pte, va, NULL);
+                               anychanged = true;
                        }
                }
                if (pdnxt > eva)
@@ -5329,7 +5337,7 @@ resume:
                                if (va == pdnxt)
                                        va = sva;
                        } else
-                               anychanged = TRUE;
+                               anychanged = true;
                        continue;
 maybe_invlrng:
                        if (va != pdnxt) {
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to