Use more KASSERT()s instead of the "if (x) panic()" idiom for sanity checks and add a couple of local variables to reduce the difference with NetBSD and help for upcoming locking.
ok? Index: uvm/uvm_amap.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_amap.c,v retrieving revision 1.84 diff -u -p -r1.84 uvm_amap.c --- uvm/uvm_amap.c 25 Sep 2020 08:04:48 -0000 1.84 +++ uvm/uvm_amap.c 1 Oct 2020 12:13:23 -0000 @@ -460,8 +460,7 @@ amap_wipeout(struct vm_amap *amap) map ^= 1 << slot; anon = chunk->ac_anon[slot]; - if (anon == NULL || anon->an_ref == 0) - panic("amap_wipeout: corrupt amap"); + KASSERT(anon != NULL && anon->an_ref != 0); refs = --anon->an_ref; if (refs == 0) { @@ -669,9 +668,7 @@ ReStart: pg = anon->an_page; /* page must be resident since parent is wired */ - if (pg == NULL) - panic("amap_cow_now: non-resident wired page" - " in anon %p", anon); + KASSERT(pg != NULL); /* * if the anon ref count is one, we are safe (the child @@ -740,24 +737,23 @@ ReStart: void amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset) { + struct vm_amap *amap = origref->ar_amap; int leftslots; AMAP_B2SLOT(leftslots, offset); - if (leftslots == 0) - panic("amap_splitref: split at zero offset"); + KASSERT(leftslots != 0); - /* now: we have a valid am_mapped array. */ - if (origref->ar_amap->am_nslot - origref->ar_pageoff - leftslots <= 0) - panic("amap_splitref: map size check failed"); + KASSERT(amap->am_nslot - origref->ar_pageoff - leftslots > 0); #ifdef UVM_AMAP_PPREF - /* establish ppref before we add a duplicate reference to the amap */ - if (origref->ar_amap->am_ppref == NULL) - amap_pp_establish(origref->ar_amap); + /* Establish ppref before we add a duplicate reference to the amap. */ + if (amap->am_ppref == NULL) + amap_pp_establish(amap); #endif - splitref->ar_amap = origref->ar_amap; - splitref->ar_amap->am_ref++; /* not a share reference */ + /* Note: not a share reference. */ + amap->am_ref++; + splitref->ar_amap = amap; splitref->ar_pageoff = origref->ar_pageoff + leftslots; } @@ -828,9 +824,7 @@ amap_pp_adjref(struct vm_amap *amap, int * now adjust reference counts in range. merge the first * changed entry with the last unchanged entry if possible. */ - if (lcv != curslot) - panic("amap_pp_adjref: overshot target"); - + KASSERT(lcv == curslot); for (/* lcv already set */; lcv < stopslot ; lcv += len) { pp_getreflen(ppref, lcv, &ref, &len); if (lcv + len > stopslot) { /* goes past end? */ @@ -840,8 +834,7 @@ amap_pp_adjref(struct vm_amap *amap, int len = stopslot - lcv; } ref += adjval; - if (ref < 0) - panic("amap_pp_adjref: negative reference count"); + KASSERT(ref >= 0); if (lcv == prevlcv + prevlen && ref == prevref) { pp_setreflen(ppref, prevlcv, ref, prevlen + len); } else { @@ -1104,20 +1097,17 @@ amap_add(struct vm_aref *aref, vaddr_t o slot = UVM_AMAP_SLOTIDX(slot); if (replace) { - if (chunk->ac_anon[slot] == NULL) - panic("amap_add: replacing null anon"); - if (chunk->ac_anon[slot]->an_page != NULL && - (amap->am_flags & AMAP_SHARED) != 0) { - pmap_page_protect(chunk->ac_anon[slot]->an_page, - PROT_NONE); + struct vm_anon *oanon = chunk->ac_anon[slot]; + + KASSERT(oanon != NULL); + if (oanon->an_page && (amap->am_flags & AMAP_SHARED) != 0) { + pmap_page_protect(oanon->an_page, PROT_NONE); /* * XXX: suppose page is supposed to be wired somewhere? */ } } else { /* !replace */ - if (chunk->ac_anon[slot] != NULL) - panic("amap_add: slot in use"); - + KASSERT(chunk->ac_anon[slot] == NULL); chunk->ac_usedmap |= 1 << slot; amap->am_nused++; } @@ -1140,12 +1130,10 @@ amap_unadd(struct vm_aref *aref, vaddr_t slot += aref->ar_pageoff; KASSERT(slot < amap->am_nslot); chunk = amap_chunk_get(amap, slot, 0, PR_NOWAIT); - if (chunk == NULL) - panic("amap_unadd: chunk for slot %d not present", slot); + KASSERT(chunk != NULL); slot = UVM_AMAP_SLOTIDX(slot); - if (chunk->ac_anon[slot] == NULL) - panic("amap_unadd: nothing there"); + KASSERT(chunk->ac_anon[slot] != NULL); chunk->ac_anon[slot] = NULL; chunk->ac_usedmap &= ~(1 << slot);