> Date: Sun, 24 Oct 2021 15:14:14 +0100
> From: Martin Pieuchot <[email protected]>
>
> On 24/10/21(Sun) 14:49, Martin Pieuchot wrote:
> > Here's another small tweak I could extract from the UVM unlocking diff.
> > This doesn't introduce any functional change. uvm_km_pgremove() is used
> > in only one place.
>
> Updated diff that also moves pmap_kremove() into the intrsafe variant to
> be coherent, pointed out by kettenis@. This also reduce differences with
> NetBSD.
>
> ok?
ok kettenis@
> Index: uvm/uvm_km.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_km.c,v
> retrieving revision 1.145
> diff -u -p -r1.145 uvm_km.c
> --- uvm/uvm_km.c 15 Jun 2021 16:38:09 -0000 1.145
> +++ uvm/uvm_km.c 24 Oct 2021 14:08:42 -0000
> @@ -239,8 +239,10 @@ uvm_km_suballoc(struct vm_map *map, vadd
> * the pages right away. (this gets called from uvm_unmap_...).
> */
> void
> -uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end)
> +uvm_km_pgremove(struct uvm_object *uobj, vaddr_t startva, vaddr_t endva)
> {
> + const voff_t start = startva - vm_map_min(kernel_map);
> + const voff_t end = endva - vm_map_min(kernel_map);
> struct vm_page *pp;
> voff_t curoff;
> int slot;
> @@ -248,6 +250,7 @@ uvm_km_pgremove(struct uvm_object *uobj,
>
> KASSERT(UVM_OBJ_IS_AOBJ(uobj));
>
> + pmap_remove(pmap_kernel(), startva, endva);
> for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
> pp = uvm_pagelookup(uobj, curoff);
> if (pp && pp->pg_flags & PG_BUSY) {
> @@ -301,6 +304,7 @@ uvm_km_pgremove_intrsafe(vaddr_t start,
> panic("uvm_km_pgremove_intrsafe: no page");
> uvm_pagefree(pg);
> }
> + pmap_kremove(start, end - start);
> }
>
> /*
> Index: uvm/uvm_map.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_map.c,v
> retrieving revision 1.278
> diff -u -p -r1.278 uvm_map.c
> --- uvm/uvm_map.c 5 Oct 2021 15:37:21 -0000 1.278
> +++ uvm/uvm_map.c 24 Oct 2021 14:09:13 -0000
> @@ -2116,8 +2116,8 @@ uvm_unmap_kill_entry(struct vm_map *map,
> /* Nothing to be done for holes. */
> } else if (map->flags & VM_MAP_INTRSAFE) {
> KASSERT(vm_map_pmap(map) == pmap_kernel());
> +
> uvm_km_pgremove_intrsafe(entry->start, entry->end);
> - pmap_kremove(entry->start, entry->end - entry->start);
> } else if (UVM_ET_ISOBJ(entry) &&
> UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
> KASSERT(vm_map_pmap(map) == pmap_kernel());
> @@ -2155,10 +2155,8 @@ uvm_unmap_kill_entry(struct vm_map *map,
> * from the object. offsets are always relative
> * to vm_map_min(kernel_map).
> */
> - pmap_remove(pmap_kernel(), entry->start, entry->end);
> - uvm_km_pgremove(entry->object.uvm_obj,
> - entry->start - vm_map_min(kernel_map),
> - entry->end - vm_map_min(kernel_map));
> + uvm_km_pgremove(entry->object.uvm_obj, entry->start,
> + entry->end);
>
> /*
> * null out kernel_object reference, we've just
>
>