amaps operation are already serialized by their own lock so it is possible to free them w/o holding the KERNEL_LOCK(). This has been tested by many as part of the UVM unlocking diff.
ok? Index: uvm/uvm_map.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_map.c,v retrieving revision 1.277 diff -u -p -r1.277 uvm_map.c --- uvm/uvm_map.c 17 Jun 2021 16:10:39 -0000 1.277 +++ uvm/uvm_map.c 1 Oct 2021 17:02:29 -0000 @@ -1570,9 +1570,15 @@ uvm_unmap_detach(struct uvm_map_deadq *d int waitok = flags & UVM_PLA_WAITOK; TAILQ_FOREACH_SAFE(entry, deadq, dfree.deadq, tmp) { + /* Drop reference to amap, if we've got one. */ + if (entry->aref.ar_amap) + amap_unref(entry->aref.ar_amap, + entry->aref.ar_pageoff, + atop(entry->end - entry->start), + flags & AMAP_REFALL); + /* Skip entries for which we have to grab the kernel lock. */ - if (entry->aref.ar_amap || UVM_ET_ISSUBMAP(entry) || - UVM_ET_ISOBJ(entry)) + if (UVM_ET_ISSUBMAP(entry) || UVM_ET_ISOBJ(entry)) continue; TAILQ_REMOVE(deadq, entry, dfree.deadq); @@ -1586,13 +1592,6 @@ uvm_unmap_detach(struct uvm_map_deadq *d while ((entry = TAILQ_FIRST(deadq)) != NULL) { if (waitok) uvm_pause(); - /* Drop reference to amap, if we've got one. */ - if (entry->aref.ar_amap) - amap_unref(entry->aref.ar_amap, - entry->aref.ar_pageoff, - atop(entry->end - entry->start), - flags & AMAP_REFALL); - /* Drop reference to our backing object, if we've got one. */ if (UVM_ET_ISSUBMAP(entry)) { /* ... unlikely to happen, but play it safe */