Author: alc
Date: Sat Jan  4 19:50:25 2020
New Revision: 356354
URL: https://svnweb.freebsd.org/changeset/base/356354

Log:
  When a copy-on-write fault occurs, pmap_enter() is called on to replace the
  mapping to the old read-only page with a mapping to the new read-write page.
  To destroy the old mapping, pmap_enter() must destroy its page table and PV
  entries and invalidate its TLB entry.  This change simply invalidates that
  TLB entry a little earlier, specifically, on amd64 and arm64, before the PV
  list lock is held.
  
  Reviewed by:  kib, markj
  MFC after:    1 week
  Differential Revision:        https://reviews.freebsd.org/D23027

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/arm64/arm64/pmap.c
  head/sys/i386/i386/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Sat Jan  4 19:29:25 2020        (r356353)
+++ head/sys/amd64/amd64/pmap.c Sat Jan  4 19:50:25 2020        (r356354)
@@ -6131,8 +6131,10 @@ retry:
                         */
                        if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
                                vm_page_dirty(om);
-                       if ((origpte & PG_A) != 0)
+                       if ((origpte & PG_A) != 0) {
+                               pmap_invalidate_page(pmap, va);
                                vm_page_aflag_set(om, PGA_REFERENCED);
+                       }
                        CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
                        pv = pmap_pvh_remove(&om->md, pmap, va);
                        KASSERT(pv != NULL,
@@ -6144,9 +6146,13 @@ retry:
                            ((om->flags & PG_FICTITIOUS) != 0 ||
                            TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
                                vm_page_aflag_clear(om, PGA_WRITEABLE);
-               }
-               if ((origpte & PG_A) != 0)
+               } else {
+                       /*
+                        * Since this mapping is unmanaged, assume that PG_A
+                        * is set.
+                        */
                        pmap_invalidate_page(pmap, va);
+               }
                origpte = 0;
        } else {
                /*

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c Sat Jan  4 19:29:25 2020        (r356353)
+++ head/sys/arm64/arm64/pmap.c Sat Jan  4 19:50:25 2020        (r356354)
@@ -3449,8 +3449,10 @@ havel3:
                         */
                        if (pmap_pte_dirty(orig_l3))
                                vm_page_dirty(om);
-                       if ((orig_l3 & ATTR_AF) != 0)
+                       if ((orig_l3 & ATTR_AF) != 0) {
+                               pmap_invalidate_page(pmap, va);
                                vm_page_aflag_set(om, PGA_REFERENCED);
+                       }
                        CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
                        pv = pmap_pvh_remove(&om->md, pmap, va);
                        if ((m->oflags & VPO_UNMANAGED) != 0)
@@ -3460,8 +3462,11 @@ havel3:
                            ((om->flags & PG_FICTITIOUS) != 0 ||
                            TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
                                vm_page_aflag_clear(om, PGA_WRITEABLE);
+               } else {
+                       KASSERT((orig_l3 & ATTR_AF) != 0,
+                           ("pmap_enter: unmanaged mapping lacks ATTR_AF"));
+                       pmap_invalidate_page(pmap, va);
                }
-               pmap_invalidate_page(pmap, va);
                orig_l3 = 0;
        } else {
                /*

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Sat Jan  4 19:29:25 2020        (r356353)
+++ head/sys/i386/i386/pmap.c   Sat Jan  4 19:50:25 2020        (r356354)
@@ -3798,8 +3798,10 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, v
                         */
                        if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
                                vm_page_dirty(om);
-                       if ((origpte & PG_A) != 0)
+                       if ((origpte & PG_A) != 0) {
+                               pmap_invalidate_page_int(pmap, va);
                                vm_page_aflag_set(om, PGA_REFERENCED);
+                       }
                        pv = pmap_pvh_remove(&om->md, pmap, va);
                        KASSERT(pv != NULL,
                            ("pmap_enter: no PV entry for %#x", va));
@@ -3810,9 +3812,13 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, v
                            ((om->flags & PG_FICTITIOUS) != 0 ||
                            TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
                                vm_page_aflag_clear(om, PGA_WRITEABLE);
-               }
-               if ((origpte & PG_A) != 0)
+               } else {
+                       /*
+                        * Since this mapping is unmanaged, assume that PG_A
+                        * is set.
+                        */
                        pmap_invalidate_page_int(pmap, va);
+               }
                origpte = 0;
        } else {
                /*
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to