Author: mjg
Date: Fri Jul 24 23:44:35 2020
New Revision: 363500
URL: https://svnweb.freebsd.org/changeset/base/363500

Log:
  MFC r352874:
  
      amd64 pmap: batch chunk removal in pmap_remove_pages

Modified:
  stable/12/sys/amd64/amd64/pmap.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/12/sys/amd64/amd64/pmap.c    Fri Jul 24 23:17:09 2020        
(r363499)
+++ stable/12/sys/amd64/amd64/pmap.c    Fri Jul 24 23:44:35 2020        
(r363500)
@@ -1111,7 +1111,10 @@ static caddr_t crashdumpmap;
 #define        MAPDEV_SETATTR          0x00000002      /* Modify existing 
attrs. */
 #define        MAPDEV_ASSERTVALID      0x00000004      /* Assert mapping 
validity. */
 
+TAILQ_HEAD(pv_chunklist, pv_chunk);
+
 static void    free_pv_chunk(struct pv_chunk *pc);
+static void    free_pv_chunk_batch(struct pv_chunklist *batch);
 static void    free_pv_entry(pmap_t pmap, pv_entry_t pv);
 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
 static int     popcnt_pc_map_pq(uint64_t *map);
@@ -4313,13 +4316,10 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv)
 }
 
 static void
-free_pv_chunk(struct pv_chunk *pc)
+free_pv_chunk_dequeued(struct pv_chunk *pc)
 {
        vm_page_t m;
 
-       mtx_lock(&pv_chunks_mutex);
-       TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
-       mtx_unlock(&pv_chunks_mutex);
        PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
        PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
        PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
@@ -4330,6 +4330,35 @@ free_pv_chunk(struct pv_chunk *pc)
        vm_page_free(m);
 }
 
+static void
+free_pv_chunk(struct pv_chunk *pc)
+{
+
+       mtx_lock(&pv_chunks_mutex);
+       TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+       mtx_unlock(&pv_chunks_mutex);
+       free_pv_chunk_dequeued(pc);
+}
+
+static void
+free_pv_chunk_batch(struct pv_chunklist *batch)
+{
+       struct pv_chunk *pc, *npc;
+
+       if (TAILQ_EMPTY(batch))
+               return;
+
+       mtx_lock(&pv_chunks_mutex);
+       TAILQ_FOREACH(pc, batch, pc_list) {
+               TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+       }
+       mtx_unlock(&pv_chunks_mutex);
+
+       TAILQ_FOREACH_SAFE(pc, batch, pc_list, npc) {
+               free_pv_chunk_dequeued(pc);
+       }
+}
+
 /*
  * Returns a new PV entry, allocating a new PV chunk from the system when
  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
@@ -6946,6 +6975,7 @@ pmap_remove_pages(pmap_t pmap)
        pt_entry_t *pte, tpte;
        pt_entry_t PG_M, PG_RW, PG_V;
        struct spglist free;
+       struct pv_chunklist free_chunks;
        vm_page_t m, mpte, mt;
        pv_entry_t pv;
        struct md_page *pvh;
@@ -6981,6 +7011,7 @@ pmap_remove_pages(pmap_t pmap)
        PG_V = pmap_valid_bit(pmap);
        PG_RW = pmap_rw_bit(pmap);
 
+       TAILQ_INIT(&free_chunks);
        SLIST_INIT(&free);
        PMAP_LOCK(pmap);
        TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
@@ -7108,13 +7139,14 @@ pmap_remove_pages(pmap_t pmap)
                PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
                if (allfree) {
                        TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
-                       free_pv_chunk(pc);
+                       TAILQ_INSERT_TAIL(&free_chunks, pc, pc_list);
                }
        }
        if (lock != NULL)
                rw_wunlock(lock);
        pmap_invalidate_all(pmap);
        pmap_pkru_deassign_all(pmap);
+       free_pv_chunk_batch(&free_chunks);
        PMAP_UNLOCK(pmap);
        vm_page_free_pages_toq(&free, true);
 }
_______________________________________________
[email protected] mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to