> Date: Fri, 19 Mar 2021 09:19:21 +0100
> From: Martin Pieuchot <m...@openbsd.org>
> 
> On 18/03/21(Thu) 16:49, Mark Kettenis wrote:
> > > Date: Thu, 18 Mar 2021 09:26:14 +0100
> > > From: Martin Pieuchot <m...@openbsd.org>
> > > 
> > > Diff below only touches comments in sys/uvm.  It reverts the commit from
> > > 2014 that turned three line comments into one line comments and sync
> > > some more block with NetBSD -current.  This helps reducing the diff with
> > > NetBSD.
> > > 
> > > ok?
> > 
> > A few nits below where I think you change the comments in a way that
> > is misleading because our implementation differs from NetBSD.
> 
> Thanks, updated diff below.

Thanks, probably should have said that with those points addressed
this is ok with me.


> Index: uvm/uvm_addr.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_addr.c,v
> retrieving revision 1.29
> diff -u -p -r1.29 uvm_addr.c
> --- uvm/uvm_addr.c    22 Sep 2020 14:31:08 -0000      1.29
> +++ uvm/uvm_addr.c    18 Mar 2021 08:26:49 -0000
> @@ -65,7 +65,9 @@ struct uaddr_rnd_state {
>  #endif
>  };
>  
> -/* Definition of a pivot in pivot selector. */
> +/*
> + * Definition of a pivot in pivot selector.
> + */
>  struct uaddr_pivot {
>       vaddr_t                          addr;  /* End of prev. allocation. */
>       int                              expire;/* Best before date. */
> @@ -87,7 +89,11 @@ struct uaddr_pivot_state {
>  extern const struct uvm_addr_functions uaddr_kernel_functions;
>  struct uvm_addr_state uaddr_kbootstrap;
>  
> -/* Support functions. */
> +
> +/*
> + * Support functions.
> + */
> +
>  #ifndef SMALL_KERNEL
>  struct vm_map_entry  *uvm_addr_entrybyspace(struct uaddr_free_rbtree*,
>                           vsize_t);
> @@ -236,7 +242,9 @@ uvm_addr_fitspace(vaddr_t *min_result, v
>       if (fspace - before_gap - after_gap < sz)
>               return ENOMEM;
>  
> -     /* Calculate lowest address. */
> +     /*
> +      * Calculate lowest address.
> +      */
>       low_addr += before_gap;
>       low_addr = uvm_addr_align_forward(tmp = low_addr, align, offset);
>       if (low_addr < tmp)     /* Overflow during alignment. */
> @@ -244,7 +252,9 @@ uvm_addr_fitspace(vaddr_t *min_result, v
>       if (high_addr - after_gap - sz < low_addr)
>               return ENOMEM;
>  
> -     /* Calculate highest address. */
> +     /*
> +      * Calculate highest address.
> +      */
>       high_addr -= after_gap + sz;
>       high_addr = uvm_addr_align_backward(tmp = high_addr, align, offset);
>       if (high_addr > tmp)    /* Overflow during alignment. */
> @@ -341,7 +351,9 @@ uvm_addr_linsearch(struct vm_map *map, s
>           (before_gap & PAGE_MASK) == 0 && (after_gap & PAGE_MASK) == 0);
>       KASSERT(high + sz > high); /* Check for overflow. */
>  
> -     /* Hint magic. */
> +     /*
> +      * Hint magic.
> +      */
>       if (hint == 0)
>               hint = (direction == 1 ? low : high);
>       else if (hint > high) {
> @@ -463,6 +475,7 @@ uaddr_destroy(struct uvm_addr_state *uad
>   * If hint is set, search will start at the hint position.
>   * Only searches forward.
>   */
> +
>  const struct uvm_addr_functions uaddr_lin_functions = {
>       .uaddr_select = &uaddr_lin_select,
>       .uaddr_destroy = &uaddr_destroy,
> @@ -489,7 +502,9 @@ uaddr_lin_select(struct vm_map *map, str
>  {
>       vaddr_t guard_sz;
>  
> -     /* Deal with guardpages: search for space with one extra page. */
> +     /*
> +      * Deal with guardpages: search for space with one extra page.
> +      */
>       guard_sz = ((map->flags & VM_MAP_GUARDPAGES) == 0 ? 0 : PAGE_SIZE);
>  
>       if (uaddr->uaddr_maxaddr - uaddr->uaddr_minaddr - guard_sz < sz)
> @@ -716,6 +731,7 @@ uaddr_rnd_print(struct uvm_addr_state *u
>  /*
>   * Kernel allocation bootstrap logic.
>   */
> +
>  const struct uvm_addr_functions uaddr_kernel_functions = {
>       .uaddr_select = &uaddr_kbootstrap_select,
>       .uaddr_destroy = &uaddr_kbootstrap_destroy,
> @@ -839,7 +855,9 @@ uaddr_bestfit_select(struct vm_map *map,
>       if (entry == NULL)
>               return ENOMEM;
>  
> -     /* Walk the tree until we find an entry that fits.  */
> +     /*
> +      * Walk the tree until we find an entry that fits.
> +      */
>       while (uvm_addr_fitspace(&min, &max,
>           VMMAP_FREE_START(entry), VMMAP_FREE_END(entry),
>           sz, align, offset, 0, guardsz) != 0) {
> @@ -848,7 +866,9 @@ uaddr_bestfit_select(struct vm_map *map,
>                       return ENOMEM;
>       }
>  
> -     /* Return the address that generates the least fragmentation. */
> +     /*
> +      * Return the address that generates the least fragmentation.
> +      */
>       *entry_out = entry;
>       *addr_out = (min - VMMAP_FREE_START(entry) <=
>           VMMAP_FREE_END(entry) - guardsz - sz - max ?
> @@ -1128,7 +1148,9 @@ uaddr_pivot_select(struct vm_map *map, s
>       if (pivot->addr == 0 || pivot->entry == NULL || pivot->expire == 0)
>               goto expired;   /* Pivot is invalid (null or expired). */
>  
> -     /* Attempt to use the pivot to map the entry. */
> +     /*
> +      * Attempt to use the pivot to map the entry.
> +      */
>       entry = pivot->entry;
>       if (pivot->dir > 0) {
>               if (uvm_addr_fitspace(&min, &max,
> Index: uvm/uvm_amap.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_amap.c,v
> retrieving revision 1.87
> diff -u -p -r1.87 uvm_amap.c
> --- uvm/uvm_amap.c    19 Jan 2021 13:21:36 -0000      1.87
> +++ uvm/uvm_amap.c    19 Mar 2021 08:10:45 -0000
> @@ -188,7 +188,7 @@ amap_chunk_free(struct vm_amap *amap, st
>   * when enabled, an array of ints is allocated for the pprefs.  this
>   * array is allocated only when a partial reference is added to the
>   * map (either by unmapping part of the amap, or gaining a reference
> - * to only a part of an amap).  if the malloc of the array fails
> + * to only a part of an amap).  if the allocation of the array fails
>   * (M_NOWAIT), then we set the array pointer to PPREF_NONE to indicate
>   * that we tried to do ppref's but couldn't alloc the array so just
>   * give up (after all, this is an optional feature!).
> @@ -209,12 +209,14 @@ amap_chunk_free(struct vm_amap *amap, st
>   * chunk.    note that the "plus one" part is needed because a reference
>   * count of zero is neither positive or negative (need a way to tell
>   * if we've got one zero or a bunch of them).
> - * 
> + *
>   * here are some in-line functions to help us.
>   */
>  
>  /*
>   * pp_getreflen: get the reference and length for a specific offset
> + *
> + * => ppref's amap must be locked
>   */
>  static inline void
>  pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
> @@ -231,6 +233,8 @@ pp_getreflen(int *ppref, int offset, int
>  
>  /*
>   * pp_setreflen: set the reference and length for a specific offset
> + *
> + * => ppref's amap must be locked
>   */
>  static inline void
>  pp_setreflen(int *ppref, int offset, int ref, int len)
> @@ -242,7 +246,7 @@ pp_setreflen(int *ppref, int offset, int
>               ppref[offset+1] = len;
>       }
>  }
> -#endif
> +#endif /* UVM_AMAP_PPREF */
>  
>  /*
>   * amap_init: called at boot time to init global amap data structures
> @@ -276,8 +280,9 @@ amap_init(void)
>  }
>  
>  /*
> - * amap_alloc1: internal function that allocates an amap, but does not
> - *   init the overlay.
> + * amap_alloc1: allocate an amap, but do not initialise the overlay.
> + *
> + * => Note: lock is not set.
>   */
>  static inline struct vm_amap *
>  amap_alloc1(int slots, int waitf, int lazyalloc)
> @@ -408,6 +413,7 @@ amap_lock_alloc(struct vm_amap *amap)
>   *
>   * => caller should ensure sz is a multiple of PAGE_SIZE
>   * => reference count to new amap is set to one
> + * => new amap is returned unlocked
>   */
>  struct vm_amap *
>  amap_alloc(vaddr_t sz, int waitf, int lazyalloc)
> @@ -432,6 +438,7 @@ amap_alloc(vaddr_t sz, int waitf, int la
>  /*
>   * amap_free: free an amap
>   *
> + * => the amap must be unlocked
>   * => the amap should have a zero reference count and be empty
>   */
>  void
> @@ -466,11 +473,9 @@ amap_free(struct vm_amap *amap)
>  /*
>   * amap_wipeout: wipeout all anon's in an amap; then free the amap!
>   *
> - * => called from amap_unref when the final reference to an amap is
> - *   discarded (i.e. when reference count == 1)
> + * => Called from amap_unref(), when reference count drops to zero.
>   * => amap must be locked.
>   */
> -
>  void
>  amap_wipeout(struct vm_amap *amap)
>  {
> @@ -483,7 +488,9 @@ amap_wipeout(struct vm_amap *amap)
>       KASSERT(amap->am_ref == 0);
>  
>       if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
> -             /* amap_swap_off will call us again. */
> +             /*
> +              * Note: amap_swap_off() will call us again.
> +              */
>               amap_unlock(amap);
>               return;
>       }
> @@ -503,12 +510,11 @@ amap_wipeout(struct vm_amap *amap)
>                               panic("amap_wipeout: corrupt amap");
>                       KASSERT(anon->an_lock == amap->am_lock);
>  
> +                     /*
> +                      * Drop the reference.
> +                      */
>                       refs = --anon->an_ref;
>                       if (refs == 0) {
> -                             /*
> -                              * we had the last reference to a vm_anon.
> -                              * free it.
> -                              */
>                               uvm_anfree_list(anon, &pgl);
>                       }
>               }
> @@ -516,7 +522,9 @@ amap_wipeout(struct vm_amap *amap)
>       /* free the pages */
>       uvm_pglistfree(&pgl);
>  
> -     /* now we free the map */
> +     /*
> +      * Finally, destroy the amap.
> +      */
>       amap->am_ref = 0;       /* ... was one */
>       amap->am_nused = 0;
>       amap_unlock(amap);
> @@ -526,10 +534,10 @@ amap_wipeout(struct vm_amap *amap)
>  /*
>   * amap_copy: ensure that a map entry's "needs_copy" flag is false
>   *   by copying the amap if necessary.
> - * 
> + *
>   * => an entry with a null amap pointer will get a new (blank) one.
> - * => the map that the map entry blocks to must be locked by caller.
> - * => the amap (if any) currently attached to the entry must be unlocked.
> + * => the map that the map entry belongs to must be locked by caller.
> + * => the amap currently attached to "entry" (if any) must be unlocked.
>   * => if canchunk is true, then we may clip the entry into a chunk
>   * => "startva" and "endva" are used only if canchunk is true.  they are
>   *     used to limit chunking (e.g. if you have a large space that you
> @@ -550,14 +558,16 @@ amap_copy(struct vm_map *map, struct vm_
>  
>       KASSERT(map != kernel_map);             /* we use sleeping locks */
>  
> -     /* is there a map to copy?   if not, create one from scratch. */
> +     /*
> +      * Is there an amap to copy?  If not, create one.
> +      */
>       if (entry->aref.ar_amap == NULL) {
>               /*
> -              * check to see if we have a large amap that we can
> -              * chunk.  we align startva/endva to chunk-sized
> +              * Check to see if we have a large amap that we can
> +              * chunk.  We align startva/endva to chunk-sized
>                * boundaries and then clip to them.
>                *
> -              * if we cannot chunk the amap, allocate it in a way
> +              * If we cannot chunk the amap, allocate it in a way
>                * that makes it grow or shrink dynamically with
>                * the number of slots.
>                */
> @@ -584,17 +594,21 @@ amap_copy(struct vm_map *map, struct vm_
>       }
>  
>       /*
> -      * first check and see if we are the only map entry
> -      * referencing the amap we currently have.  if so, then we can
> -      * just take it over rather than copying it.  the value can only
> -      * be one if we have the only reference to the amap
> +      * First check and see if we are the only map entry referencing
> +      * he amap we currently have.  If so, then just take it over instead
> +      * of copying it.  Note that we are reading am_ref without lock held
> +      * as the value value can only be one if we have the only reference
> +      * to the amap (via our locked map).  If the value is greater than
> +      * one, then allocate amap and re-check the value.
>        */
>       if (entry->aref.ar_amap->am_ref == 1) {
>               entry->etype &= ~UVM_ET_NEEDSCOPY;
>               return;
>       }
>  
> -     /* looks like we need to copy the map. */
> +     /*
> +      * Allocate a new amap (note: not initialised, etc).
> +      */
>       AMAP_B2SLOT(slots, entry->end - entry->start);
>       if (!UVM_AMAP_SMALL(entry->aref.ar_amap) &&
>           entry->aref.ar_amap->am_hashshift != 0)
> @@ -607,20 +621,22 @@ amap_copy(struct vm_map *map, struct vm_
>       amap_lock(srcamap);
>  
>       /*
> -      * need to double check reference count now.  the reference count
> -      * could have changed while we were in malloc.  if the reference count
> -      * dropped down to one we take over the old map rather than
> -      * copying the amap.
> +      * Re-check the reference count with the lock held.  If it has
> +      * dropped to one - we can take over the existing map.
>        */
> -     if (srcamap->am_ref == 1) {             /* take it over? */
> +     if (srcamap->am_ref == 1) {
> +             /* Just take over the existing amap. */
>               entry->etype &= ~UVM_ET_NEEDSCOPY;
>               amap_unlock(srcamap);
> -             amap->am_ref--;         /* drop final reference to map */
> -             amap_free(amap);        /* dispose of new (unused) amap */
> +             /* Destroy the new (unused) amap. */
> +             amap->am_ref--;
> +             amap_free(amap);
>               return;
>       }
>  
> -     /* we must copy it now. */
> +     /*
> +      * Copy the slots.
> +      */
>       for (lcv = 0; lcv < slots; lcv += n) {
>               srcslot = entry->aref.ar_pageoff + lcv;
>               i = UVM_AMAP_SLOTIDX(lcv);
> @@ -659,10 +675,9 @@ amap_copy(struct vm_map *map, struct vm_
>       }
>  
>       /*
> -      * drop our reference to the old amap (srcamap).
> -      * we know that the reference count on srcamap is greater than
> -      * one (we checked above), so there is no way we could drop
> -      * the count to zero.  [and no need to worry about freeing it]
> +      * Drop our reference to the old amap (srcamap) and unlock.
> +      * Since the reference count on srcamap is greater than one,
> +      * (we checked above), it cannot drop to zero while it is locked.
>        */
>       srcamap->am_ref--;
>       KASSERT(srcamap->am_ref > 0);
> @@ -690,7 +705,9 @@ amap_copy(struct vm_map *map, struct vm_
>       if (amap->am_lock == NULL)
>               amap_lock_alloc(amap);
>  
> -     /* install new amap. */
> +     /*
> +      * Install new amap.
> +      */
>       entry->aref.ar_pageoff = 0;
>       entry->aref.ar_amap = amap;
>       entry->etype &= ~UVM_ET_NEEDSCOPY;
> @@ -723,9 +740,9 @@ amap_cow_now(struct vm_map *map, struct 
>       struct vm_amap_chunk *chunk;
>  
>       /*
> -      * note that if we wait, we must ReStart the "lcv" for loop because
> -      * some other process could reorder the anon's in the
> -      * am_anon[] array on us.
> +      * note that if we unlock the amap then we must ReStart the "lcv" for
> +      * loop because some other process could reorder the anon's in the
> +      * am_anon[] array on us while the lock is dropped.
>        */
>  ReStart:
>       amap_lock(amap);
> @@ -739,7 +756,10 @@ ReStart:
>                       pg = anon->an_page;
>                       KASSERT(anon->an_lock == amap->am_lock);
>  
> -                     /* page must be resident since parent is wired */
> +                     /*
> +                      * The old page must be resident since the parent is
> +                      * wired.
> +                      */
>                       KASSERT(pg != NULL);
>  
>                       /*
> @@ -750,7 +770,7 @@ ReStart:
>                               continue;
>  
>                       /*
> -                      * if the page is busy then we have to wait for
> +                      * If the page is busy, then we have to unlock, wait for
>                        * it and then restart.
>                        */
>                       if (pg->pg_flags & PG_BUSY) {
> @@ -760,7 +780,10 @@ ReStart:
>                               goto ReStart;
>                       }
>  
> -                     /* ok, time to do a copy-on-write to a new anon */
> +                     /*
> +                      * Perform a copy-on-write.
> +                      * First - get a new anon and a page.
> +                      */
>                       nanon = uvm_analloc();
>                       if (nanon != NULL) {
>                               /* the new anon will share the amap's lock */
> @@ -783,18 +806,18 @@ ReStart:
>                       }
>  
>                       /*
> -                      * got it... now we can copy the data and replace anon
> -                      * with our new one...
> +                      * Copy the data and replace anon with the new one.
> +                      * Also, setup its lock (share the with amap's lock).
>                        */
> -                     uvm_pagecopy(pg, npg);          /* old -> new */
> -                     anon->an_ref--;                 /* can't drop to zero */
> +                     uvm_pagecopy(pg, npg);
> +                     anon->an_ref--;
>                       KASSERT(anon->an_ref > 0);
> -                     chunk->ac_anon[slot] = nanon;   /* replace */
> +                     chunk->ac_anon[slot] = nanon;
>  
>                       /*
> -                      * drop PG_BUSY on new page ... since we have had its
> -                      * owner locked the whole time it can't be
> -                      * PG_RELEASED | PG_WANTED.
> +                      * Drop PG_BUSY on new page.  Since its owner was write
> +                      * locked all this time - it cannot be PG_RELEASED or
> +                      * PG_WANTED.
>                        */
>                       atomic_clearbits_int(&npg->pg_flags, PG_BUSY|PG_FAKE);
>                       UVM_PAGE_OWN(npg, NULL);
> @@ -810,6 +833,8 @@ ReStart:
>   * amap_splitref: split a single reference into two separate references
>   *
>   * => called from uvm_map's clip routines
> + * => origref's map should be locked
> + * => origref->ar_amap should be unlocked (we will lock)
>   */
>  void
>  amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t 
> offset)
> @@ -824,12 +849,11 @@ amap_splitref(struct vm_aref *origref, s
>  
>       amap_lock(amap);
>  
> -     /* now: we have a valid am_mapped array. */
>       if (amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
>               panic("amap_splitref: map size check failed");
>  
>  #ifdef UVM_AMAP_PPREF
> -        /* Establish ppref before we add a duplicate reference to the amap. 
> */
> +     /* Establish ppref before we add a duplicate reference to the amap. */
>       if (amap->am_ppref == NULL)
>               amap_pp_establish(amap);
>  #endif
> @@ -844,7 +868,9 @@ amap_splitref(struct vm_aref *origref, s
>  #ifdef UVM_AMAP_PPREF
>  
>  /*
> - * amap_pp_establish: add a ppref array to an amap, if possible
> + * amap_pp_establish: add a ppref array to an amap, if possible.
> + *
> + * => amap should be locked by caller* => amap should be locked by caller
>   */
>  void
>  amap_pp_establish(struct vm_amap *amap)
> @@ -854,13 +880,12 @@ amap_pp_establish(struct vm_amap *amap)
>       amap->am_ppref = mallocarray(amap->am_nslot, sizeof(int),
>           M_UVMAMAP, M_NOWAIT|M_ZERO);
>  
> -     /* if we fail then we just won't use ppref for this amap */
>       if (amap->am_ppref == NULL) {
> -             amap->am_ppref = PPREF_NONE;    /* not using it */
> +             /* Failure - just do not use ppref. */
> +             amap->am_ppref = PPREF_NONE;
>               return;
>       }
>  
> -     /* init ppref */
>       pp_setreflen(amap->am_ppref, 0, amap->am_ref, amap->am_nslot);
>  }
>  
> @@ -868,7 +893,8 @@ amap_pp_establish(struct vm_amap *amap)
>   * amap_pp_adjref: adjust reference count to a part of an amap using the
>   * per-page reference count array.
>   *
> - * => caller must check that ppref != PPREF_NONE before calling
> + * => caller must check that ppref != PPREF_NONE before calling.
> + * => map and amap must be locked.
>   */
>  void
>  amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int 
> adjval)
> @@ -883,8 +909,7 @@ amap_pp_adjref(struct vm_amap *amap, int
>       prevlcv = 0;
>  
>       /*
> -      * first advance to the correct place in the ppref array,
> -      * fragment if needed.
> +      * Advance to the correct place in the array, fragment if needed.
>        */
>       for (lcv = 0 ; lcv < curslot ; lcv += len) {
>               pp_getreflen(ppref, lcv, &ref, &len);
> @@ -898,17 +923,17 @@ amap_pp_adjref(struct vm_amap *amap, int
>       if (lcv != 0)
>               pp_getreflen(ppref, prevlcv, &prevref, &prevlen);
>       else {
> -             /* Ensure that the "prevref == ref" test below always
> -              * fails, since we're starting from the beginning of
> -              * the ppref array; that is, there is no previous
> -              * chunk.  
> +             /*
> +              * Ensure that the "prevref == ref" test below always
> +              * fails, since we are starting from the beginning of
> +              * the ppref array; that is, there is no previous chunk.
>                */
>               prevref = -1;
>               prevlen = 0;
>       }
>  
>       /*
> -      * now adjust reference counts in range.  merge the first
> +      * Now adjust reference counts in range.  Merge the first
>        * changed entry with the last unchanged entry if possible.
>        */
>       if (lcv != curslot)
> @@ -972,12 +997,19 @@ amap_wiperange_chunk(struct vm_amap *ama
>               if (refs == 0) {
>                       uvm_anfree(anon);
>               }
> -     }
> +
> +             /*
> +              * done with this anon, next ...!
> +              */
> +
> +     }       /* end of 'for' loop */
>  }
>  
>  /*
> - * amap_wiperange: wipe out a range of an amap
> - * [different from amap_wipeout because the amap is kept intact]
> + * amap_wiperange: wipe out a range of an amap.
> + * Note: different from amap_wipeout because the amap is kept intact.
> + *
> + * => Both map and amap must be locked by caller.
>   */
>  void
>  amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
> @@ -991,8 +1023,8 @@ amap_wiperange(struct vm_amap *amap, int
>       endbucket = UVM_AMAP_BUCKET(amap, slotoff + slots - 1);
>  
>       /*
> -      * we can either traverse the amap by am_chunks or by am_buckets
> -      * depending on which is cheaper.    decide now.
> +      * We can either traverse the amap by am_chunks or by am_buckets.
> +      * Determine which way is less expensive.
>        */
>       if (UVM_AMAP_SMALL(amap))
>               amap_wiperange_chunk(amap, &amap->am_small, slotoff, slots);
> @@ -1110,7 +1142,9 @@ nextamap:
>  }
>  
>  /*
> - * amap_lookup: look up a page in an amap
> + * amap_lookup: look up a page in an amap.
> + *
> + * => amap should be locked by caller.
>   */
>  struct vm_anon *
>  amap_lookup(struct vm_aref *aref, vaddr_t offset)
> @@ -1131,8 +1165,9 @@ amap_lookup(struct vm_aref *aref, vaddr_
>  }
>  
>  /*
> - * amap_lookups: look up a range of pages in an amap
> + * amap_lookups: look up a range of pages in an amap.
>   *
> + * => amap should be locked by caller.
>   * => XXXCDC: this interface is biased toward array-based amaps.  fix.
>   */
>  void
> @@ -1184,9 +1219,10 @@ amap_populate(struct vm_aref *aref, vadd
>  }
>  
>  /*
> - * amap_add: add (or replace) a page to an amap
> + * amap_add: add (or replace) a page to an amap.
>   *
> - * => returns 0 if adding the page was successful or 1 when not.
> + * => amap should be locked by caller.
> + * => anon must have the lock associated with this amap.
>   */
>  int
>  amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
> @@ -1228,7 +1264,9 @@ amap_add(struct vm_aref *aref, vaddr_t o
>  }
>  
>  /*
> - * amap_unadd: remove a page from an amap
> + * amap_unadd: remove a page from an amap.
> + *
> + * => amap should be locked by caller.
>   */
>  void
>  amap_unadd(struct vm_aref *aref, vaddr_t offset)
> Index: uvm/uvm_anon.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_anon.c,v
> retrieving revision 1.52
> diff -u -p -r1.52 uvm_anon.c
> --- uvm/uvm_anon.c    4 Mar 2021 09:00:03 -0000       1.52
> +++ uvm/uvm_anon.c    18 Mar 2021 08:26:49 -0000
> @@ -42,9 +42,6 @@
>  
>  struct pool uvm_anon_pool;
>  
> -/*
> - * allocate anons
> - */
>  void
>  uvm_anon_init(void)
>  {
> @@ -54,7 +51,9 @@ uvm_anon_init(void)
>  }
>  
>  /*
> - * allocate an anon
> + * uvm_analloc: allocate a new anon.
> + *
> + * => anon will have no lock associated.
>   */
>  struct vm_anon *
>  uvm_analloc(void)
> @@ -93,12 +92,10 @@ uvm_anfree_list(struct vm_anon *anon, st
>               KASSERT(anon->an_lock != NULL);
>  
>               /*
> -              * if page is busy then we just mark it as released (who ever
> -              * has it busy must check for this when they wake up). if the
> -              * page is not busy then we can free it now.
> +              * If the page is busy, mark it as PG_RELEASED, so
> +              * that uvm_anon_release(9) would release it later.
>                */
>               if ((pg->pg_flags & PG_BUSY) != 0) {
> -                     /* tell them to dump it when done */
>                       atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
>                       rw_obj_hold(anon->an_lock);
>                       return;
> @@ -127,13 +124,11 @@ uvm_anfree_list(struct vm_anon *anon, st
>       }
>       anon->an_lock = NULL;
>  
> -     /* free any swap resources. */
> -     uvm_anon_dropswap(anon);
> -
>       /*
> -      * now that we've stripped the data areas from the anon, free the anon
> -      * itself!
> +      * Free any swap resources, leave a page replacement hint.
>        */
> +     uvm_anon_dropswap(anon);
> +
>       KASSERT(anon->an_page == NULL);
>       KASSERT(anon->an_swslot == 0);
>  
> @@ -154,9 +149,10 @@ uvm_anwait(void)
>  }
>  
>  /*
> - * fetch an anon's page.
> + * uvm_anon_pagein: fetch an anon's page.
>   *
> - * => returns TRUE if pagein was aborted due to lack of memory.
> + * => anon must be locked, and is unlocked upon return.
> + * => returns true if pagein was aborted due to lack of memory.
>   */
>  
>  boolean_t
> @@ -168,20 +164,26 @@ uvm_anon_pagein(struct vm_amap *amap, st
>       KASSERT(rw_write_held(anon->an_lock));
>       KASSERT(anon->an_lock == amap->am_lock);
>  
> +     /*
> +      * Get the page of the anon.
> +      */
>       rv = uvmfault_anonget(NULL, amap, anon);
>  
>       switch (rv) {
>       case VM_PAGER_OK:
>               KASSERT(rw_write_held(anon->an_lock));
>               break;
> +
>       case VM_PAGER_ERROR:
>       case VM_PAGER_REFAULT:
> +
>               /*
> -              * nothing more to do on errors.
> -              * VM_PAGER_REFAULT can only mean that the anon was freed,
> -              * so again there's nothing to do.
> +              * Nothing more to do on errors.
> +              * VM_PAGER_REFAULT  means that the anon was freed.
>                */
> +
>               return FALSE;
> +
>       default:
>  #ifdef DIAGNOSTIC
>               panic("anon_pagein: uvmfault_anonget -> %d", rv);
> @@ -191,8 +193,7 @@ uvm_anon_pagein(struct vm_amap *amap, st
>       }
>  
>       /*
> -      * ok, we've got the page now.
> -      * mark it as dirty, clear its swslot and un-busy it.
> +      * Mark the page as dirty and clear its swslot.
>        */
>       pg = anon->an_page;
>       if (anon->an_swslot > 0) {
> @@ -201,7 +202,9 @@ uvm_anon_pagein(struct vm_amap *amap, st
>       anon->an_swslot = 0;
>       atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
>  
> -     /* deactivate the page (to put it on a page queue) */
> +     /*
> +      * Deactivate the page (to put it on a page queue).
> +      */
>       pmap_clear_reference(pg);
>       pmap_page_protect(pg, PROT_NONE);
>       uvm_lock_pageq();
> @@ -213,7 +216,7 @@ uvm_anon_pagein(struct vm_amap *amap, st
>  }
>  
>  /*
> - * uvm_anon_dropswap:  release any swap resources from this anon.
> + * uvm_anon_dropswap: release any swap resources from this anon.
>   *
>   * => anon must be locked or have a reference count of 0.
>   */
> Index: uvm/uvm_aobj.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v
> retrieving revision 1.91
> diff -u -p -r1.91 uvm_aobj.c
> --- uvm/uvm_aobj.c    4 Mar 2021 09:00:03 -0000       1.91
> +++ uvm/uvm_aobj.c    19 Mar 2021 08:12:52 -0000
> @@ -48,56 +48,45 @@
>  #include <uvm/uvm.h>
>  
>  /*
> - * an aobj manages anonymous-memory backed uvm_objects.   in addition
> - * to keeping the list of resident pages, it also keeps a list of
> - * allocated swap blocks.  depending on the size of the aobj this list
> - * of allocated swap blocks is either stored in an array (small objects)
> - * or in a hash table (large objects).
> + * An anonymous UVM object (aobj) manages anonymous-memory.  In addition to
> + * keeping the list of resident pages, it may also keep a list of allocated
> + * swap blocks.  Depending on the size of the object, this list is either
> + * stored in an array (small objects) or in a hash table (large objects).
>   */
>  
>  /*
> - * local structures
> - */
> -
> -/*
> - * for hash tables, we break the address space of the aobj into blocks
> - * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
> - * be a power of two.
> + * Note: for hash tables, we break the address space of the aobj into blocks
> + * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
>   */
>  #define UAO_SWHASH_CLUSTER_SHIFT 4
>  #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
>  
> -/* get the "tag" for this page index */
> +/* Get the "tag" for this page index. */
>  #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
>       ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
>  
> -/* given an ELT and a page index, find the swap slot */
> +/* Given an ELT and a page index, find the swap slot. */
>  #define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
>       ((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
>  #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
>       ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
>  
> -/* given an ELT, return its pageidx base */
> +/* Given an ELT, return its pageidx base. */
>  #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
>       ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
>  
> -/*
> - * the swhash hash function
> - */
> +/* The hash function. */
>  #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
>       (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
>                           & (AOBJ)->u_swhashmask)])
>  
>  /*
> - * the swhash threshold determines if we will use an array or a
> + * The threshold which determines whether we will use an array or a
>   * hash table to store the list of allocated swap blocks.
>   */
> -
>  #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
>  
> -/*
> - * the number of buckets in a swhash, with an upper bound
> - */
> +/* The number of buckets in a hash, with an upper bound. */
>  #define UAO_SWHASH_MAXBUCKETS 256
>  #define UAO_SWHASH_BUCKETS(pages) \
>       (min((pages) >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
> @@ -149,14 +138,8 @@ struct uvm_aobj {
>       LIST_ENTRY(uvm_aobj) u_list;    /* global list of aobjs */
>  };
>  
> -/*
> - * uvm_aobj_pool: pool of uvm_aobj structures
> - */
>  struct pool uvm_aobj_pool;
>  
> -/*
> - * local functions
> - */
>  static struct uao_swhash_elt *uao_find_swhash_elt(struct uvm_aobj *, int,
>                                    boolean_t);
>  static int                    uao_find_swslot(struct uvm_aobj *, int);
> @@ -223,17 +206,20 @@ uao_find_swhash_elt(struct uvm_aobj *aob
>       swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
>       page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
>  
> -     /* now search the bucket for the requested tag */
> +     /*
> +      * now search the bucket for the requested tag
> +      */
>       LIST_FOREACH(elt, swhash, list) {
>               if (elt->tag == page_tag)
>                       return(elt);
>       }
>  
> -     /* fail now if we are not allowed to create a new entry in the bucket */
>       if (!create)
>               return NULL;
>  
> -     /* allocate a new entry for the bucket and init/insert it in */
> +     /*
> +      * allocate a new entry for the bucket and init/insert it in
> +      */
>       elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT | PR_ZERO);
>       /*
>        * XXX We cannot sleep here as the hash table might disappear
> @@ -258,11 +244,15 @@ inline static int
>  uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
>  {
>  
> -     /* if noswap flag is set, then we never return a slot */
> +     /*
> +      * if noswap flag is set, then we never return a slot
> +      */
>       if (aobj->u_flags & UAO_FLAG_NOSWAP)
>               return(0);
>  
> -     /* if hashing, look in hash table.  */
> +     /*
> +      * if hashing, look in hash table.
> +      */
>       if (aobj->u_pages > UAO_SWHASH_THRESHOLD) {
>               struct uao_swhash_elt *elt =
>                   uao_find_swhash_elt(aobj, pageidx, FALSE);
> @@ -273,7 +263,9 @@ uao_find_swslot(struct uvm_aobj *aobj, i
>                       return(0);
>       }
>  
> -     /* otherwise, look in the array */
> +     /*
> +      * otherwise, look in the array
> +      */
>       return(aobj->u_swslots[pageidx]);
>  }
>  
> @@ -281,6 +273,8 @@ uao_find_swslot(struct uvm_aobj *aobj, i
>   * uao_set_swslot: set the swap slot for a page in an aobj.
>   *
>   * => setting a slot to zero frees the slot
> + * => we return the old slot number, or -1 if we failed to allocate
> + *    memory to record the new slot number
>   */
>  int
>  uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
> @@ -290,18 +284,21 @@ uao_set_swslot(struct uvm_object *uobj, 
>  
>       KERNEL_ASSERT_LOCKED();
>  
> -     /* if noswap flag is set, then we can't set a slot */
> +     /*
> +      * if noswap flag is set, then we can't set a slot
> +      */
>       if (aobj->u_flags & UAO_FLAG_NOSWAP) {
>               if (slot == 0)
>                       return(0);              /* a clear is ok */
>  
>               /* but a set is not */
>               printf("uao_set_swslot: uobj = %p\n", uobj);
> -             panic("uao_set_swslot: attempt to set a slot"
> -                 " on a NOSWAP object");
> +         panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
>       }
>  
> -     /* are we using a hash table?  if so, add it in the hash.  */
> +     /*
> +      * are we using a hash table?  if so, add it in the hash.
> +      */
>       if (aobj->u_pages > UAO_SWHASH_THRESHOLD) {
>               /*
>                * Avoid allocating an entry just to free it again if
> @@ -322,12 +319,11 @@ uao_set_swslot(struct uvm_object *uobj, 
>                * now adjust the elt's reference counter and free it if we've
>                * dropped it to zero.
>                */
> -             /* an allocation? */
>               if (slot) {
>                       if (oldslot == 0)
>                               elt->count++;
> -             } else {                /* freeing slot ... */
> -                     if (oldslot)    /* to be safe */
> +             } else {
> +                     if (oldslot)
>                               elt->count--;
>  
>                       if (elt->count == 0) {
> @@ -335,7 +331,7 @@ uao_set_swslot(struct uvm_object *uobj, 
>                               pool_put(&uao_swhash_elt_pool, elt);
>                       }
>               }
> -     } else { 
> +     } else {
>               /* we are using an array */
>               oldslot = aobj->u_swslots[pageidx];
>               aobj->u_swslots[pageidx] = slot;
> @@ -393,12 +389,15 @@ uao_free(struct uvm_aobj *aobj)
>       } else {
>               int i;
>  
> -             /* free the array */
> +             /*
> +              * free the array
> +              */
>               for (i = 0; i < aobj->u_pages; i++) {
>                       int slot = aobj->u_swslots[i];
>  
>                       if (slot) {
>                               uvm_swap_free(slot, 1);
> +
>                               /* this page is no longer only in swap. */
>                               atomic_dec_int(&uvmexp.swpgonly);
>                       }
> @@ -406,7 +405,9 @@ uao_free(struct uvm_aobj *aobj)
>               free(aobj->u_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int));
>       }
>  
> -     /* finally free the aobj itself */
> +     /*
> +      * finally free the aobj itself
> +      */
>       pool_put(&uvm_aobj_pool, aobj);
>  }
>  
> @@ -702,36 +703,39 @@ uao_grow(struct uvm_object *uobj, int pa
>  struct uvm_object *
>  uao_create(vsize_t size, int flags)
>  {
> -     static struct uvm_aobj kernel_object_store; /* home of kernel_object */
> -     static int kobj_alloced = 0;                    /* not allocated yet */
> +     static struct uvm_aobj kernel_object_store;
> +     static int kobj_alloced = 0;
>       int pages = round_page(size) >> PAGE_SHIFT;
>       int refs = UVM_OBJ_KERN;
>       int mflags;
>       struct uvm_aobj *aobj;
>  
> -     /* malloc a new aobj unless we are asked for the kernel object */
> -     if (flags & UAO_FLAG_KERNOBJ) {         /* want kernel object? */
> +     /*
> +      * Allocate a new aobj, unless kernel object is requested.
> +      */
> +     if (flags & UAO_FLAG_KERNOBJ) {
>               if (kobj_alloced)
>                       panic("uao_create: kernel object already allocated");
>  
>               aobj = &kernel_object_store;
>               aobj->u_pages = pages;
> -             aobj->u_flags = UAO_FLAG_NOSWAP;        /* no swap to start */
> -             /* we are special, we never die */
> +             aobj->u_flags = UAO_FLAG_NOSWAP;
>               kobj_alloced = UAO_FLAG_KERNOBJ;
>       } else if (flags & UAO_FLAG_KERNSWAP) {
>               aobj = &kernel_object_store;
>               if (kobj_alloced != UAO_FLAG_KERNOBJ)
>                   panic("uao_create: asked to enable swap on kernel object");
>               kobj_alloced = UAO_FLAG_KERNSWAP;
> -     } else {        /* normal object */
> +     } else {
>               aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
>               aobj->u_pages = pages;
> -             aobj->u_flags = 0;              /* normal object */
> -             refs = 1;                       /* normal object so 1 ref */
> +             aobj->u_flags = 0;
> +             refs = 1;
>       }
>  
> -     /* allocate hash/array if necessary */
> +     /*
> +      * allocate hash/array if necessary
> +      */
>       if (flags == 0 || (flags & (UAO_FLAG_KERNSWAP | UAO_FLAG_CANFAIL))) {
>               if (flags)
>                       mflags = M_NOWAIT;
> @@ -768,9 +772,14 @@ uao_create(vsize_t size, int flags)
>               }
>       }
>  
> +     /*
> +      * Initialise UVM object.
> +      */
>       uvm_objinit(&aobj->u_obj, &aobj_pager, refs);
>  
> -     /* now that aobj is ready, add it to the global list */
> +     /*
> +      * now that aobj is ready, add it to the global list
> +      */
>       mtx_enter(&uao_list_lock);
>       LIST_INSERT_HEAD(&uao_list, aobj, u_list);
>       mtx_leave(&uao_list_lock);
> @@ -799,7 +808,7 @@ uao_init(void)
>  }
>  
>  /*
> - * uao_reference: add a ref to an aobj
> + * uao_reference: hold a reference to an anonymous UVM object.
>   */
>  void
>  uao_reference(struct uvm_object *uobj)
> @@ -808,23 +817,20 @@ uao_reference(struct uvm_object *uobj)
>       uao_reference_locked(uobj);
>  }
>  
> -/*
> - * uao_reference_locked: add a ref to an aobj
> - */
>  void
>  uao_reference_locked(struct uvm_object *uobj)
>  {
>  
> -     /* kernel_object already has plenty of references, leave it alone. */
> +     /* Kernel object is persistent. */
>       if (UVM_OBJ_IS_KERN_OBJECT(uobj))
>               return;
>  
> -     uobj->uo_refs++;                /* bump! */
> +     uobj->uo_refs++;
>  }
>  
>  
>  /*
> - * uao_detach: drop a reference to an aobj
> + * uao_detach: drop a reference to an anonymous UVM object.
>   */
>  void
>  uao_detach(struct uvm_object *uobj)
> @@ -845,26 +851,34 @@ uao_detach_locked(struct uvm_object *uob
>       struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
>       struct vm_page *pg;
>  
> -     /* detaching from kernel_object is a noop. */
> +     /*
> +      * Detaching from kernel_object is a NOP.
> +      */
>       if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
>               return;
>       }
>  
> -     uobj->uo_refs--;                                /* drop ref! */
> -     if (uobj->uo_refs) {                            /* still more refs? */
> +     /*
> +      * Drop the reference.  If it was the last one, destroy the object.
> +      */
> +     uobj->uo_refs--;
> +     if (uobj->uo_refs) {
>               return;
>       }
>  
> -     /* remove the aobj from the global list. */
> +     /*
> +      * Remove the aobj from the global list.
> +      */
>       mtx_enter(&uao_list_lock);
>       LIST_REMOVE(aobj, u_list);
>       mtx_leave(&uao_list_lock);
>  
>       /*
> -      * Free all pages left in the object. If they're busy, wait
> -      * for them to become available before we kill it.
> -      * Release swap resources then free the page.
> -      */
> +      * Free all the pages left in the aobj.  For each page, when the
> +      * page is no longer busy (and thus after any disk I/O that it is
> +      * involved in is complete), release any swap resources and free
> +      * the page itself.
> +      */
>       uvm_lock_pageq();
>       while((pg = RBT_ROOT(uvm_objtree, &uobj->memt)) != NULL) {
>               if (pg->pg_flags & PG_BUSY) {
> @@ -880,12 +894,14 @@ uao_detach_locked(struct uvm_object *uob
>       }
>       uvm_unlock_pageq();
>  
> -     /* finally, free the rest. */
> +     /*
> +      * Finally, free the anonymous UVM object itself.
> +      */
>       uao_free(aobj);
>  }
>  
>  /*
> - * uao_flush: "flush" pages out of a uvm object
> + * uao_flush: flush pages out of a uvm object
>   *
>   * => if PGO_CLEANIT is not set, then we will not block.
>   * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
> @@ -958,15 +974,11 @@ uao_flush(struct uvm_object *uobj, voff_
>                       /* FALLTHROUGH */
>               case PGO_DEACTIVATE:
>   deactivate_it:
> -                     /* skip the page if it's wired */
>                       if (pp->wire_count != 0)
>                               continue;
>  
>                       uvm_lock_pageq();
> -                     /* zap all mappings for the page. */
>                       pmap_page_protect(pp, PROT_NONE);
> -
> -                     /* ...and deactivate the page. */
>                       uvm_pagedeactivate(pp);
>                       uvm_unlock_pageq();
>  
> @@ -983,9 +995,16 @@ uao_flush(struct uvm_object *uobj, voff_
>                       if (pp->wire_count != 0)
>                               continue;
>  
> -                     /* zap all mappings for the page. */
> +                     /*
> +                      * free the swap slot and the page.
> +                      */
>                       pmap_page_protect(pp, PROT_NONE);
>  
> +                     /*
> +                      * freeing swapslot here is not strictly necessary.
> +                      * however, leaving it here doesn't save much
> +                      * because we need to update swap accounting anyway.
> +                      */
>                       uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
>                       uvm_lock_pageq();
>                       uvm_pagefree(pp);
> @@ -1029,12 +1048,17 @@ uao_get(struct uvm_object *uobj, voff_t 
>  
>       KERNEL_ASSERT_LOCKED();
>  
> -     /* get number of pages */
> +     /*
> +      * get number of pages
> +      */
>       maxpages = *npagesp;
>  
> -     /* step 1: handled the case where fault data structures are locked. */
>       if (flags & PGO_LOCKED) {
> -             /* step 1a: get pages that are already resident. */
> +             /*
> +              * step 1a: get pages that are already resident.   only do
> +              * this if the data structures are locked (i.e. the first
> +              * time through).
> +              */
>  
>               done = TRUE;    /* be optimistic */
>               gotpages = 0;   /* # of pages we got so far */
> @@ -1065,7 +1089,9 @@ uao_get(struct uvm_object *uobj, voff_t 
>                               }
>                       }
>  
> -                     /* to be useful must get a non-busy page */
> +                     /*
> +                      * to be useful must get a non-busy page
> +                      */
>                       if (ptmp == NULL ||
>                           (ptmp->pg_flags & PG_BUSY) != 0) {
>                               if (lcv == centeridx ||
> @@ -1076,10 +1102,8 @@ uao_get(struct uvm_object *uobj, voff_t 
>                       }
>  
>                       /*
> -                      * useful page: busy it and plug it in our
> -                      * result array
> +                      * useful page: plug it in our result array
>                        */
> -                     /* caller must un-busy this page */
>                       atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
>                       UVM_PAGE_OWN(ptmp, "uao_get1");
>                       pps[lcv] = ptmp;
> @@ -1146,8 +1170,7 @@ uao_get(struct uvm_object *uobj, voff_t 
>                               /* out of RAM? */
>                               if (ptmp == NULL) {
>                                       uvm_wait("uao_getpage");
> -                                     /* goto top of pps while loop */
> -                                     continue;       
> +                                     continue;
>                               }
>  
>                               /*
> @@ -1169,12 +1192,10 @@ uao_get(struct uvm_object *uobj, voff_t 
>                               tsleep_nsec(ptmp, PVM, "uao_get", INFSLP);
>                               continue;       /* goto top of pps while loop */
>                       }
> -                     
> -                     /* 
> -                      * if we get here then the page has become resident and
> -                      * unbusy between steps 1 and 2.  we busy it now (so we
> -                      * own it) and set pps[lcv] (so that we exit the while
> -                      * loop).
> +
> +                     /*
> +                      * if we get here then the page is resident and
> +                      * unbusy.  we busy it now (so we own it).
>                        */
>                       /* we own it, caller must un-busy */
>                       atomic_setbits_int(&ptmp->pg_flags, PG_BUSY);
> @@ -1200,10 +1221,14 @@ uao_get(struct uvm_object *uobj, voff_t 
>                       /* page hasn't existed before, just zero it. */
>                       uvm_pagezero(ptmp);
>               } else {
> -                     /* page in the swapped-out page. */
> +                     /*
> +                      * page in the swapped-out page.
> +                      */
>                       rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
>  
> -                     /* I/O done.  check for errors. */
> +                     /*
> +                      * I/O done.  check for errors.
> +                      */
>                       if (rv != VM_PAGER_OK) {
>                               /*
>                                * remove the swap slot from the aobj
> @@ -1228,18 +1253,16 @@ uao_get(struct uvm_object *uobj, voff_t 
>                       }
>               }
>  
> -             /* 
> +             /*
>                * we got the page!   clear the fake flag (indicates valid
>                * data now in page) and plug into our result array.   note
> -              * that page is still busy.   
> +              * that page is still busy.
>                *
>                * it is the callers job to:
>                * => check if the page is released
>                * => unbusy the page
>                * => activate the page
>                */
> -
> -             /* data is valid ... */
>               atomic_clearbits_int(&ptmp->pg_flags, PG_FAKE);
>               pmap_clear_modify(ptmp);                /* ... and clean */
>               pps[lcv] = ptmp;
> @@ -1274,7 +1297,9 @@ uao_swap_off(int startslot, int endslot)
>  {
>       struct uvm_aobj *aobj, *nextaobj, *prevaobj = NULL;
>  
> -     /* walk the list of all aobjs. */
> +     /*
> +      * Walk the list of all anonymous UVM objects.
> +      */
>       mtx_enter(&uao_list_lock);
>  
>       for (aobj = LIST_FIRST(&uao_list);
> @@ -1325,7 +1350,9 @@ uao_swap_off(int startslot, int endslot)
>               prevaobj = aobj;
>       }
>  
> -     /* done with traversal, unlock the list */
> +     /*
> +      * done with traversal, unlock the list
> +      */
>       mtx_leave(&uao_list_lock);
>       if (prevaobj) {
>               uao_detach_locked(&prevaobj->u_obj);
> @@ -1357,8 +1384,10 @@ restart:
>                               for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
>                                       int slot = elt->slots[i];
>  
> -                                     /* if slot isn't in range, skip it. */
> -                                     if (slot < startslot || 
> +                                     /*
> +                                      * if the slot isn't in range, skip it.
> +                                      */
> +                                     if (slot < startslot ||
>                                           slot >= endslot) {
>                                               continue;
>                                       }
> @@ -1384,12 +1413,16 @@ restart:
>               for (i = 0; i < aobj->u_pages; i++) {
>                       int slot = aobj->u_swslots[i];
>  
> -                     /* if the slot isn't in range, skip it */
> +                     /*
> +                      * if the slot isn't in range, skip it
> +                      */
>                       if (slot < startslot || slot >= endslot) {
>                               continue;
>                       }
>  
> -                     /* process the page.  */
> +                     /*
> +                      * process the page.
> +                      */
>                       rv = uao_pagein_page(aobj, i);
>                       if (rv) {
>                               return rv;
> @@ -1401,8 +1434,9 @@ restart:
>  }
>  
>  /*
> - * page in a page from an aobj.  used for swap_off.
> - * returns TRUE if pagein was aborted due to lack of memory.
> + * uao_pagein_page: page in a single page from an anonymous UVM object.
> + *
> + * => Returns TRUE if pagein was aborted due to lack of memory.
>   */
>  static boolean_t
>  uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
> @@ -1438,7 +1472,9 @@ uao_pagein_page(struct uvm_aobj *aobj, i
>       atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE);
>       UVM_PAGE_OWN(pg, NULL);
>  
> -     /* deactivate the page (to put it on a page queue). */
> +     /*
> +      * deactivate the page (to put it on a page queue).
> +      */
>       pmap_clear_reference(pg);
>       uvm_lock_pageq();
>       uvm_pagedeactivate(pg);
> Index: uvm/uvm_device.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_device.c,v
> retrieving revision 1.60
> diff -u -p -r1.60 uvm_device.c
> --- uvm/uvm_device.c  6 Nov 2020 11:52:39 -0000       1.60
> +++ uvm/uvm_device.c  19 Mar 2021 08:14:42 -0000
> @@ -79,6 +79,11 @@ const struct uvm_pagerops uvm_deviceops 
>  };
>  
>  /*
> + * the ops!
> + */
> +
> +
> +/*
>   * udv_attach
>   *
>   * get a VM object that is associated with a device.   allocate a new
> @@ -97,14 +102,18 @@ udv_attach(dev_t device, vm_prot_t acces
>       struct uvm_object *obj;
>  #endif
>  
> -     /* before we do anything, ensure this device supports mmap */
> +     /*
> +      * before we do anything, ensure this device supports mmap
> +      */
>       mapfn = cdevsw[major(device)].d_mmap;
>       if (mapfn == NULL ||
>           mapfn == (paddr_t (*)(dev_t, off_t, int)) enodev ||
>           mapfn == (paddr_t (*)(dev_t, off_t, int)) nullop)
>               return(NULL);
>  
> -     /* Negative offsets on the object are not allowed. */
> +     /*
> +      * Negative offsets on the object are not allowed.
> +      */
>       if (off < 0)
>               return(NULL);
>  
> @@ -126,16 +135,22 @@ udv_attach(dev_t device, vm_prot_t acces
>               off += PAGE_SIZE; size -= PAGE_SIZE;
>       }
>  
> -     /* keep looping until we get it */
> +     /*
> +      * keep looping until we get it
> +      */
>       for (;;) {
> -             /* first, attempt to find it on the main list */
> +             /*
> +              * first, attempt to find it on the main list
> +              */
>               mtx_enter(&udv_lock);
>               LIST_FOREACH(lcv, &udv_list, u_list) {
>                       if (device == lcv->u_device)
>                               break;
>               }
>  
> -             /* got it on main list.  put a hold on it and unlock udv_lock. 
> */
> +             /*
> +              * got it on main list.  put a hold on it and unlock udv_lock.
> +              */
>               if (lcv) {
>                       /*
>                        * if someone else has a hold on it, sleep and start
> @@ -153,7 +168,9 @@ udv_attach(dev_t device, vm_prot_t acces
>                       lcv->u_flags |= UVM_DEVICE_HOLD;
>                       mtx_leave(&udv_lock);
>  
> -                     /* bump reference count, unhold, return. */
> +                     /*
> +                      * bump reference count, unhold, return.
> +                      */
>                       lcv->u_obj.uo_refs++;
>  
>                       mtx_enter(&udv_lock);
> @@ -164,7 +181,9 @@ udv_attach(dev_t device, vm_prot_t acces
>                       return(&lcv->u_obj);
>               }
>  
> -             /* did not find it on main list.   need to malloc a new one. */
> +             /*
> +              * Did not find it on main list.  Need to allocate a new one.
> +              */
>               mtx_leave(&udv_lock);
>               /* NOTE: we could sleep in the following malloc() */
>               udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK);
> @@ -229,7 +248,9 @@ udv_detach(struct uvm_object *uobj)
>  
>       KERNEL_ASSERT_LOCKED();
>  
> -     /* loop until done */
> +     /*
> +      * loop until done
> +      */
>  again:
>       if (uobj->uo_refs > 1) {
>               uobj->uo_refs--;
> @@ -237,7 +258,9 @@ again:
>       }
>       KASSERT(uobj->uo_npages == 0 && RBT_EMPTY(uvm_objtree, &uobj->memt));
>  
> -     /* is it being held?   if so, wait until others are done. */
> +     /*
> +      * is it being held?   if so, wait until others are done.
> +      */
>       mtx_enter(&udv_lock);
>       if (udv->u_flags & UVM_DEVICE_HOLD) {
>               udv->u_flags |= UVM_DEVICE_WANTED;
> @@ -250,7 +273,9 @@ again:
>               goto again;
>       }
>  
> -     /* got it!   nuke it now. */
> +     /*
> +      * got it!   nuke it now.
> +      */
>       LIST_REMOVE(udv, u_list);
>       if (udv->u_flags & UVM_DEVICE_WANTED)
>               wakeup(udv);
> @@ -310,7 +335,9 @@ udv_fault(struct uvm_faultinfo *ufi, vad
>               return(VM_PAGER_ERROR);
>       }
>  
> -     /* get device map function. */
> +     /*
> +      * get device map function.
> +      */
>       device = udv->u_device;
>       mapfn = cdevsw[major(device)].d_mmap;
>  
> @@ -325,7 +352,9 @@ udv_fault(struct uvm_faultinfo *ufi, vad
>       /* pmap va = vaddr (virtual address of pps[0]) */
>       curr_va = vaddr;
>  
> -     /* loop over the page range entering in as needed */
> +     /*
> +      * loop over the page range entering in as needed
> +      */
>       retval = VM_PAGER_OK;
>       for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
>           curr_va += PAGE_SIZE) {
> Index: uvm/uvm_fault.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
> retrieving revision 1.118
> diff -u -p -r1.118 uvm_fault.c
> --- uvm/uvm_fault.c   12 Mar 2021 14:15:49 -0000      1.118
> +++ uvm/uvm_fault.c   19 Mar 2021 08:15:32 -0000
> @@ -230,24 +230,32 @@ static void
>  uvmfault_amapcopy(struct uvm_faultinfo *ufi)
>  {
>       for (;;) {
> -             /* no mapping?  give up. */
> +             /*
> +              * no mapping?  give up.
> +              */
>               if (uvmfault_lookup(ufi, TRUE) == FALSE)
>                       return;
>  
> -             /* copy if needed. */
> +             /*
> +              * copy if needed.
> +              */
>               if (UVM_ET_ISNEEDSCOPY(ufi->entry))
>                       amap_copy(ufi->map, ufi->entry, M_NOWAIT,
>                               UVM_ET_ISSTACK(ufi->entry) ? FALSE : TRUE,
>                               ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
>  
> -             /* didn't work?  must be out of RAM.  sleep. */
> +             /*
> +              * didn't work?  must be out of RAM.   unlock and sleep.
> +              */
>               if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
>                       uvmfault_unlockmaps(ufi, TRUE);
>                       uvm_wait("fltamapcopy");
>                       continue;
>               }
>  
> -             /* got it! */
> +             /*
> +              * got it!   unlock and return.
> +              */
>               uvmfault_unlockmaps(ufi, TRUE);
>               return;
>       }
> @@ -341,11 +349,11 @@ uvmfault_anonget(struct uvm_faultinfo *u
>                               uvmfault_unlockall(ufi, amap, NULL);
>  
>                               /*
> -                              * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
> -                              * page into the uvm_swap_get function with
> -                              * all data structures unlocked.  note that
> -                              * it is ok to read an_swslot here because
> -                              * we hold PG_BUSY on the page.
> +                              * Pass a PG_BUSY+PG_FAKE+PG_CLEAN page into
> +                              * the uvm_swap_get() function with all data
> +                              * structures unlocked.  Note that it is OK
> +                              * to read an_swslot here, because we hold
> +                              * PG_BUSY on the page.
>                                */
>                               counters_inc(uvmexp_counters, pageins);
>                               error = uvm_swap_get(pg, anon->an_swslot,
> @@ -393,6 +401,9 @@ uvmfault_anonget(struct uvm_faultinfo *u
>                       if (pg->pg_flags & PG_RELEASED) {
>                               pmap_page_protect(pg, PROT_NONE);
>                               KASSERT(anon->an_ref == 0);
> +                             /*
> +                              * Released while we had unlocked amap.
> +                              */
>                               if (locked)
>                                       uvmfault_unlockall(ufi, NULL, NULL);
>                               uvm_anon_release(anon); /* frees page for us */
> @@ -418,7 +429,7 @@ uvmfault_anonget(struct uvm_faultinfo *u
>                               /*
>                                * Note: page was never !PG_BUSY, so it
>                                * cannot be mapped and thus no need to
> -                              * pmap_page_protect it...
> +                              * pmap_page_protect() it.
>                                */
>                               uvm_lock_pageq();
>                               uvm_pagefree(pg);
> @@ -432,8 +443,7 @@ uvmfault_anonget(struct uvm_faultinfo *u
>                       }
>  
>                       /*
> -                      * must be OK, clear modify (already PG_CLEAN)
> -                      * and activate
> +                      * We have successfully read the page, activate it.
>                        */
>                       pmap_clear_modify(pg);
>                       uvm_lock_pageq();
> @@ -776,7 +786,9 @@ uvm_fault_check(struct uvm_faultinfo *uf
>               if (amap)
>                       uvmfault_anonflush(*ranons, nback);
>  
> -             /* flush object? */
> +             /*
> +              * flush object?
> +              */
>               if (uobj) {
>                       voff_t uoff;
>  
> @@ -843,7 +855,7 @@ uvm_fault_upper_lookup(struct uvm_faulti
>               }
>  
>               /*
> -              * unmapped or center page. check if any anon at this level.
> +              * unmapped or center page.   check if any anon at this level.
>                */
>               if (amap == NULL || anons[lcv] == NULL) {
>                       pages[lcv] = NULL;
> @@ -903,6 +915,7 @@ uvm_fault_upper(struct uvm_faultinfo *uf
>       struct vm_page *pg = NULL;
>       int error, ret;
>  
> +     /* locked: maps(read), amap, anon */
>       KASSERT(rw_write_held(amap->am_lock));
>       KASSERT(anon->an_lock == amap->am_lock);
>  
> @@ -1037,7 +1050,9 @@ uvm_fault_upper(struct uvm_faultinfo *uf
>               return ERESTART;
>       }
>  
> -     /* ... update the page queues. */
> +     /*
> +      * ... update the page queues.
> +      */
>       uvm_lock_pageq();
>  
>       if (fault_type == VM_FAULT_WIRE) {
> @@ -1181,6 +1196,7 @@ uvm_fault_lower(struct uvm_faultinfo *uf
>        * (PGO_LOCKED).
>        */
>       if (uobj == NULL) {
> +             /* zero fill; don't care neighbor pages */
>               uobjpage = NULL;
>       } else {
>               uobjpage = uvm_fault_lower_lookup(ufi, flt, pages);
> @@ -1236,7 +1252,9 @@ uvm_fault_lower(struct uvm_faultinfo *uf
>                   0, flt->access_type & MASK(ufi->entry), ufi->entry->advice,
>                   PGO_SYNCIO);
>  
> -             /* recover from I/O */
> +             /*
> +              * recover from I/O
> +              */
>               if (result != VM_PAGER_OK) {
>                       KASSERT(result != VM_PAGER_PEND);
>  
> @@ -1343,7 +1361,9 @@ uvm_fault_lower(struct uvm_faultinfo *uf
>                * out of memory resources?
>                */
>               if (anon == NULL || pg == NULL) {
> -                     /* arg!  must unbusy our page and fail or sleep. */
> +                     /*
> +                      * arg!  must unbusy our page and fail or sleep.
> +                      */
>                       if (uobjpage != PGO_DONTCARE) {
>                               uvm_lock_pageq();
>                               uvm_pageactivate(uobjpage);
> @@ -1378,7 +1398,9 @@ uvm_fault_lower(struct uvm_faultinfo *uf
>                       return ERESTART;
>               }
>  
> -             /* fill in the data */
> +             /*
> +              * fill in the data
> +              */
>               if (uobjpage != PGO_DONTCARE) {
>                       counters_inc(uvmexp_counters, flt_prcopy);
>                       /* copy page [pg now dirty] */
> @@ -1559,9 +1581,12 @@ uvm_fault_unwire_locked(vm_map_t map, va
>        * the PAs from the pmap.   we also lock out the page daemon so that
>        * we can call uvm_pageunwire.
>        */
> +
>       uvm_lock_pageq();
>  
> -     /* find the beginning map entry for the region. */
> +     /*
> +      * find the beginning map entry for the region.
> +      */
>       KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
>       if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
>               panic("uvm_fault_unwire_locked: address not in map");
> @@ -1570,7 +1595,9 @@ uvm_fault_unwire_locked(vm_map_t map, va
>               if (pmap_extract(pmap, va, &pa) == FALSE)
>                       continue;
>  
> -             /* find the map entry for the current address. */
> +             /*
> +              * find the map entry for the current address.
> +              */
>               KASSERT(va >= entry->start);
>               while (va >= entry->end) {
>                       next = RBT_NEXT(uvm_map_addr, entry);
> @@ -1578,7 +1605,9 @@ uvm_fault_unwire_locked(vm_map_t map, va
>                       entry = next;
>               }
>  
> -             /* if the entry is no longer wired, tell the pmap. */
> +             /*
> +              * if the entry is no longer wired, tell the pmap.
> +              */
>               if (VM_MAPENT_ISWIRED(entry) == 0)
>                       pmap_unwire(pmap, va);
>  
> @@ -1646,7 +1675,9 @@ uvmfault_lookup(struct uvm_faultinfo *uf
>  {
>       vm_map_t tmpmap;
>  
> -     /* init ufi values for lookup. */
> +     /*
> +      * init ufi values for lookup.
> +      */
>       ufi->map = ufi->orig_map;
>       ufi->size = ufi->orig_size;
>  
> @@ -1688,11 +1719,14 @@ uvmfault_lookup(struct uvm_faultinfo *uf
>                       continue;
>               }
>  
> -             /* got it! */
> +             /*
> +              * got it!
> +              */
>               ufi->mapv = ufi->map->timestamp;
>               return(TRUE);
>  
> -     }
> +     }       /* while loop */
> +
>       /*NOTREACHED*/
>  }
>  
> Index: uvm/uvm_init.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_init.c,v
> retrieving revision 1.41
> diff -u -p -r1.41 uvm_init.c
> --- uvm/uvm_init.c    28 Dec 2020 14:01:23 -0000      1.41
> +++ uvm/uvm_init.c    19 Mar 2021 08:11:36 -0000
> @@ -69,41 +69,41 @@ vaddr_t vm_min_kernel_address;
>  /*
>   * uvm_init: init the VM system.   called from kern/init_main.c.
>   */
> +
>  void
>  uvm_init(void)
>  {
>       vaddr_t kvm_start, kvm_end;
>  
> -     /* step 0: ensure that the hardware set the page size */
> +     /*
> +      * Ensure that the hardware set the page size.
> +      */
>       if (uvmexp.pagesize == 0) {
>               panic("uvm_init: page size not set");
>       }
> -
> -     /* step 1: set up stats. */
>       averunnable.fscale = FSCALE;
>  
>       /*
> -      * step 2: init the page sub-system.  this includes allocating the
> -      * vm_page structures, and setting up all the page queues (and
> -      * locks).  available memory will be put in the "free" queue.
> -      * kvm_start and kvm_end will be set to the area of kernel virtual
> -      * memory which is available for general use.
> +      * Init the page sub-system.  This includes allocating the vm_page
> +      * structures, and setting up all the page queues (and locks).
> +      * Available memory will be put in the "free" queue, kvm_start and
> +      * kvm_end will be set to the area of kernel virtual memory which
> +      * is available for general use.
>        */
>       uvm_page_init(&kvm_start, &kvm_end);
>  
>       /*
> -      * step 3: init the map sub-system.  allocates the static pool of
> -      * vm_map_entry structures that are used for "special" kernel maps
> -      * (e.g. kernel_map, kmem_map, etc...).
> +      * Init the map sub-system.
> +      *
> +      * Allocates the static pool of vm_map_entry structures that are
> +      * used for "special" kernel maps (e.g. kernel_map, kmem_map, etc...).
>        */
>       uvm_map_init();
>  
>       /*
> -      * step 4: setup the kernel's virtual memory data structures.  this
> -      * includes setting up the kernel_map/kernel_object and the kmem_map/
> -      * kmem_object.
> +      * Setup the kernel's virtual memory data structures.  This includes
> +      * setting up the kernel_map/kernel_object.
>        */
> -
>       uvm_km_init(vm_min_kernel_address, kvm_start, kvm_end);
>  
>       /*
> @@ -112,7 +112,7 @@ uvm_init(void)
>       uvmfault_init();
>  
>       /*
> -      * step 5: init the pmap module.   the pmap module is free to allocate
> +      * Init the pmap module.  The pmap module is free to allocate
>        * memory for its private use (e.g. pvlists).
>        */
>       pmap_init();
> @@ -123,8 +123,8 @@ uvm_init(void)
>       uvm_km_page_init();
>  
>       /*
> -      * step 7: init the kernel memory allocator.   after this call the
> -      * kernel memory allocator (malloc) can be used.
> +      * Make kernel memory allocators ready for use.
> +      * After this call the malloc memory allocator can be used.
>        */
>       kmeminit();
>  
> @@ -134,7 +134,7 @@ uvm_init(void)
>       dma_alloc_init();
>  
>       /*
> -      * step 8: init all pagers and the pager_map.
> +      * Init all pagers and the pager_map.
>        */
>       uvm_pager_init();
>  
> @@ -172,7 +172,7 @@ uvm_init(void)
>               panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF1);
>  #endif
>       /*
> -      * init anonymous memory systems
> +      * Init anonymous memory systems.
>        */
>       uvm_anon_init();
>  
> Index: uvm/uvm_io.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_io.c,v
> retrieving revision 1.26
> diff -u -p -r1.26 uvm_io.c
> --- uvm/uvm_io.c      9 Jan 2016 11:34:27 -0000       1.26
> +++ uvm/uvm_io.c      19 Mar 2021 08:16:04 -0000
> @@ -87,9 +87,14 @@ uvm_io(vm_map_t map, struct uio *uio, in
>       if (flags & UVM_IO_FIXPROT)
>               extractflags |= UVM_EXTRACT_FIXPROT;
>  
> -     /* step 1: main loop...  while we've got data to move */
> +     /*
> +      * step 1: main loop...  while we've got data to move
> +      */
>       for (/*null*/; togo > 0 ; pageoffset = 0) {
> -             /* step 2: extract mappings from the map into kernel_map */
> +
> +             /*
> +              * step 2: extract mappings from the map into kernel_map
> +              */
>               error = uvm_map_extract(map, baseva, chunksz, &kva,
>                   extractflags);
>               if (error) {
> @@ -105,7 +110,9 @@ uvm_io(vm_map_t map, struct uio *uio, in
>                       break;
>               }
>  
> -             /* step 3: move a chunk of data */
> +             /*
> +              * step 3: move a chunk of data
> +              */
>               sz = chunksz - pageoffset;
>               if (sz > togo)
>                       sz = togo;
> @@ -113,7 +120,10 @@ uvm_io(vm_map_t map, struct uio *uio, in
>               togo -= sz;
>               baseva += chunksz;
>  
> -             /* step 4: unmap the area of kernel memory */
> +
> +             /*
> +              * step 4: unmap the area of kernel memory
> +              */
>               vm_map_lock(kernel_map);
>               TAILQ_INIT(&dead_entries);
>               uvm_unmap_remove(kernel_map, kva, kva+chunksz,
> @@ -121,10 +131,6 @@ uvm_io(vm_map_t map, struct uio *uio, in
>               vm_map_unlock(kernel_map);
>               uvm_unmap_detach(&dead_entries, AMAP_REFALL);
>  
> -             /*
> -              * We defer checking the error return from uiomove until
> -              * here so that we won't leak memory.
> -              */
>               if (error)
>                       break;
>       }
> Index: uvm/uvm_km.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_km.c,v
> retrieving revision 1.141
> diff -u -p -r1.141 uvm_km.c
> --- uvm/uvm_km.c      12 Mar 2021 14:15:49 -0000      1.141
> +++ uvm/uvm_km.c      19 Mar 2021 08:16:26 -0000
> @@ -223,7 +223,9 @@ uvm_km_suballoc(struct vm_map *map, vadd
>               uvm_map_setup(submap, vm_map_pmap(map), *min, *max, flags);
>       }
>  
> -     /* now let uvm_map_submap plug in it...  */
> +     /*
> +      * now let uvm_map_submap plug in it...
> +      */
>       if (uvm_map_submap(map, *min, *max, submap) != 0)
>               panic("uvm_km_suballoc: submap allocation failed");
>  
> @@ -541,8 +543,9 @@ uvm_km_valloc_align(struct vm_map *map, 
>       size = round_page(size);
>       kva = vm_map_min(map);          /* hint */
>  
> -     /* allocate some virtual space, demand filled by kernel_object. */
> -
> +     /*
> +      * allocate some virtual space.  will be demand filled by kernel_object.
> +      */
>       if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
>           UVM_UNKNOWN_OFFSET, align,
>           UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
> 

Reply via email to