Author: alc
Date: Sat Oct  1 19:30:28 2016
New Revision: 306558
URL: https://svnweb.freebsd.org/changeset/base/306558

Log:
  MFC r305213,305319,305398
    As an optimization to the machine-independent layer, change the machine-
    dependent pmap_ts_referenced() so that it updates the page's dirty field
    if a modified bit is found while counting reference bits.  This
    opportunistic update can be performed at low cost and can eliminate the
    need for some future calls to pmap_is_modified() by the machine-
    independent layer.
  
    Replace the number 4 in sparc64's pmap_ts_referenced() by
    PMAP_TS_REFERENCED_MAX, like we've done elsewhere, e.g., amd64.

Modified:
  stable/11/sys/amd64/amd64/pmap.c
  stable/11/sys/arm/arm/pmap-v6.c
  stable/11/sys/i386/i386/pmap.c
  stable/11/sys/sparc64/sparc64/pmap.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/11/sys/amd64/amd64/pmap.c    Sat Oct  1 17:57:32 2016        
(r306557)
+++ stable/11/sys/amd64/amd64/pmap.c    Sat Oct  1 19:30:28 2016        
(r306558)
@@ -5831,6 +5831,14 @@ safe_to_clear_referenced(pmap_t pmap, pt
  *     should be tested and standardized at some point in the future for
  *     optimal aging of shared pages.
  *
+ *     As an optimization, update the page's dirty field if a modified bit is
+ *     found while counting reference bits.  This opportunistic update can be
+ *     performed at low cost and can eliminate the need for some future calls
+ *     to pmap_is_modified().  However, since this function stops after
+ *     finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ *     dirty pages.  Those dirty pages will only be detected by a future call
+ *     to pmap_is_modified().
+ *
  *     A DI block is not needed within this function, because
  *     invalidations are performed before the PV list lock is
  *     released.
@@ -5843,7 +5851,7 @@ pmap_ts_referenced(vm_page_t m)
        pmap_t pmap;
        struct rwlock *lock;
        pd_entry_t oldpde, *pde;
-       pt_entry_t *pte, PG_A;
+       pt_entry_t *pte, PG_A, PG_M, PG_RW;
        vm_offset_t va;
        vm_paddr_t pa;
        int cleared, md_gen, not_cleared, pvh_gen;
@@ -5878,9 +5886,19 @@ retry:
                        }
                }
                PG_A = pmap_accessed_bit(pmap);
+               PG_M = pmap_modified_bit(pmap);
+               PG_RW = pmap_rw_bit(pmap);
                va = pv->pv_va;
                pde = pmap_pde(pmap, pv->pv_va);
                oldpde = *pde;
+               if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+                       /*
+                        * Although "oldpde" is mapping a 2MB page, because
+                        * this function is called at a 4KB page granularity,
+                        * we only update the 4KB page under test.
+                        */
+                       vm_page_dirty(m);
+               }
                if ((*pde & PG_A) != 0) {
                        /*
                         * Since this reference bit is shared by 512 4KB
@@ -5974,11 +5992,15 @@ small_mappings:
                        }
                }
                PG_A = pmap_accessed_bit(pmap);
+               PG_M = pmap_modified_bit(pmap);
+               PG_RW = pmap_rw_bit(pmap);
                pde = pmap_pde(pmap, pv->pv_va);
                KASSERT((*pde & PG_PS) == 0,
                    ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
                    m));
                pte = pmap_pde_to_pte(pde, pv->pv_va);
+               if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+                       vm_page_dirty(m);
                if ((*pte & PG_A) != 0) {
                        if (safe_to_clear_referenced(pmap, *pte)) {
                                atomic_clear_long(pte, PG_A);

Modified: stable/11/sys/arm/arm/pmap-v6.c
==============================================================================
--- stable/11/sys/arm/arm/pmap-v6.c     Sat Oct  1 17:57:32 2016        
(r306557)
+++ stable/11/sys/arm/arm/pmap-v6.c     Sat Oct  1 19:30:28 2016        
(r306558)
@@ -5178,6 +5178,14 @@ pmap_is_referenced(vm_page_t m)
  *     XXX: The exact number of bits to check and clear is a matter that
  *     should be tested and standardized at some point in the future for
  *     optimal aging of shared pages.
+ *
+ *     As an optimization, update the page's dirty field if a modified bit is
+ *     found while counting reference bits.  This opportunistic update can be
+ *     performed at low cost and can eliminate the need for some future calls
+ *     to pmap_is_modified().  However, since this function stops after
+ *     finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ *     dirty pages.  Those dirty pages will only be detected by a future call
+ *     to pmap_is_modified().
  */
 int
 pmap_ts_referenced(vm_page_t m)
@@ -5186,7 +5194,7 @@ pmap_ts_referenced(vm_page_t m)
        pv_entry_t pv, pvf;
        pmap_t pmap;
        pt1_entry_t  *pte1p, opte1;
-       pt2_entry_t *pte2p;
+       pt2_entry_t *pte2p, opte2;
        vm_paddr_t pa;
        int rtval = 0;
 
@@ -5205,6 +5213,14 @@ pmap_ts_referenced(vm_page_t m)
                PMAP_LOCK(pmap);
                pte1p = pmap_pte1(pmap, pv->pv_va);
                opte1 = pte1_load(pte1p);
+               if (pte1_is_dirty(opte1)) {
+                       /*
+                        * Although "opte1" is mapping a 1MB page, because
+                        * this function is called at a 4KB page granularity,
+                        * we only update the 4KB page under test.
+                        */
+                       vm_page_dirty(m);
+               }
                if ((opte1 & PTE1_A) != 0) {
                        /*
                         * Since this reference bit is shared by 256 4KB pages,
@@ -5253,7 +5269,10 @@ small_mappings:
                    ("%s: not found a link in page %p's pv list", __func__, m));
 
                pte2p = pmap_pte2_quick(pmap, pv->pv_va);
-               if ((pte2_load(pte2p) & PTE2_A) != 0) {
+               opte2 = pte2_load(pte2p);
+               if (pte2_is_dirty(opte2))
+                       vm_page_dirty(m);
+               if ((opte2 & PTE2_A) != 0) {
                        pte2_clear_bit(pte2p, PTE2_A);
                        pmap_tlb_flush(pmap, pv->pv_va);
                        rtval++;

Modified: stable/11/sys/i386/i386/pmap.c
==============================================================================
--- stable/11/sys/i386/i386/pmap.c      Sat Oct  1 17:57:32 2016        
(r306557)
+++ stable/11/sys/i386/i386/pmap.c      Sat Oct  1 19:30:28 2016        
(r306558)
@@ -4802,6 +4802,14 @@ retry:
  *     XXX: The exact number of bits to check and clear is a matter that
  *     should be tested and standardized at some point in the future for
  *     optimal aging of shared pages.
+ *
+ *     As an optimization, update the page's dirty field if a modified bit is
+ *     found while counting reference bits.  This opportunistic update can be
+ *     performed at low cost and can eliminate the need for some future calls
+ *     to pmap_is_modified().  However, since this function stops after
+ *     finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ *     dirty pages.  Those dirty pages will only be detected by a future call
+ *     to pmap_is_modified().
  */
 int
 pmap_ts_referenced(vm_page_t m)
@@ -4828,6 +4836,14 @@ pmap_ts_referenced(vm_page_t m)
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
                pde = pmap_pde(pmap, pv->pv_va);
+               if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+                       /*
+                        * Although "*pde" is mapping a 2/4MB page, because
+                        * this function is called at a 4KB page granularity,
+                        * we only update the 4KB page under test.
+                        */
+                       vm_page_dirty(m);
+               }
                if ((*pde & PG_A) != 0) {
                        /*
                         * Since this reference bit is shared by either 1024
@@ -4876,6 +4892,8 @@ small_mappings:
                    ("pmap_ts_referenced: found a 4mpage in page %p's pv list",
                    m));
                pte = pmap_pte_quick(pmap, pv->pv_va);
+               if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+                       vm_page_dirty(m);
                if ((*pte & PG_A) != 0) {
                        atomic_clear_int((u_int *)pte, PG_A);
                        pmap_invalidate_page(pmap, pv->pv_va);

Modified: stable/11/sys/sparc64/sparc64/pmap.c
==============================================================================
--- stable/11/sys/sparc64/sparc64/pmap.c        Sat Oct  1 17:57:32 2016        
(r306557)
+++ stable/11/sys/sparc64/sparc64/pmap.c        Sat Oct  1 19:30:28 2016        
(r306558)
@@ -2106,6 +2106,8 @@ pmap_page_is_mapped(vm_page_t m)
        return (rv);
 }
 
+#define        PMAP_TS_REFERENCED_MAX  5
+
 /*
  * Return a count of reference bits for a page, clearing those bits.
  * It is not necessary for every reference bit to be cleared, but it
@@ -2115,6 +2117,14 @@ pmap_page_is_mapped(vm_page_t m)
  * XXX: The exact number of bits to check and clear is a matter that
  * should be tested and standardized at some point in the future for
  * optimal aging of shared pages.
+ *
+ * As an optimization, update the page's dirty field if a modified bit is
+ * found while counting reference bits.  This opportunistic update can be
+ * performed at low cost and can eliminate the need for some future calls
+ * to pmap_is_modified().  However, since this function stops after
+ * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ * dirty pages.  Those dirty pages will only be detected by a future call
+ * to pmap_is_modified().
  */
 int
 pmap_ts_referenced(vm_page_t m)
@@ -2138,7 +2148,10 @@ pmap_ts_referenced(vm_page_t m)
                        if ((tp->tte_data & TD_PV) == 0)
                                continue;
                        data = atomic_clear_long(&tp->tte_data, TD_REF);
-                       if ((data & TD_REF) != 0 && ++count > 4)
+                       if ((data & TD_W) != 0)
+                               vm_page_dirty(m);
+                       if ((data & TD_REF) != 0 && ++count >=
+                           PMAP_TS_REFERENCED_MAX)
                                break;
                } while ((tp = tpn) != NULL && tp != tpf);
        }
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to