I'm looking for testers for this diff. This should be safe to run on amd64, i386 and sparc64. But has been reported to lock up i386 machines. I can't reproduce this on any of my own systems. So I'm looking for help. I'm looking for people that are able to build a kernel with this diff and the MP_LOCKDEBUG option enabled (uncommented) in their GENERIC.MP kernel, run it on an MP machine and put some load on it to see if it locks up and/or panics.
Being able to move forward with this would make OpenBSD run significantly better on MP systems. Thanks, Mark Index: uvm_addr.c =================================================================== RCS file: /home/cvs/src/sys/uvm/uvm_addr.c,v retrieving revision 1.13 diff -u -p -r1.13 uvm_addr.c --- uvm_addr.c 30 Mar 2015 21:08:40 -0000 1.13 +++ uvm_addr.c 4 Apr 2015 11:08:49 -0000 @@ -287,14 +287,19 @@ uvm_addr_init(void) { pool_init(&uaddr_pool, sizeof(struct uvm_addr_state), 0, 0, PR_WAITOK, "uaddr", NULL); + pool_setipl(&uaddr_pool, IPL_VM); pool_init(&uaddr_hint_pool, sizeof(struct uaddr_hint_state), 0, 0, PR_WAITOK, "uaddrhint", NULL); + pool_setipl(&uaddr_hint_pool, IPL_VM); pool_init(&uaddr_bestfit_pool, sizeof(struct uaddr_bestfit_state), 0, 0, PR_WAITOK, "uaddrbest", NULL); + pool_setipl(&uaddr_bestfit_pool, IPL_VM); pool_init(&uaddr_pivot_pool, sizeof(struct uaddr_pivot_state), 0, 0, PR_WAITOK, "uaddrpivot", NULL); + pool_setipl(&uaddr_pivot_pool, IPL_VM); pool_init(&uaddr_rnd_pool, sizeof(struct uaddr_rnd_state), 0, 0, PR_WAITOK, "uaddrrnd", NULL); + pool_setipl(&uaddr_rnd_pool, IPL_VM); uaddr_kbootstrap.uaddr_minaddr = PAGE_SIZE; uaddr_kbootstrap.uaddr_maxaddr = -(vaddr_t)PAGE_SIZE; Index: uvm_map.c =================================================================== RCS file: /home/cvs/src/sys/uvm/uvm_map.c,v retrieving revision 1.191 diff -u -p -r1.191 uvm_map.c --- uvm_map.c 23 Apr 2015 00:49:37 -0000 1.191 +++ uvm_map.c 28 Apr 2015 20:55:03 -0000 @@ -1842,8 +1842,10 @@ uvm_unmap_kill_entry(struct vm_map *map, { /* Unwire removed map entry. */ if (VM_MAPENT_ISWIRED(entry)) { + KERNEL_LOCK(); entry->wired_count = 0; uvm_fault_unwire_locked(map, entry->start, entry->end); + KERNEL_UNLOCK(); } /* Entry-type specific code. */ @@ -2422,18 +2424,20 @@ void uvm_map_teardown(struct vm_map *map) { struct uvm_map_deadq dead_entries; - int i, waitok = 0; struct vm_map_entry *entry, *tmp; #ifdef VMMAP_DEBUG size_t numq, numt; #endif + int i; - if ((map->flags & VM_MAP_INTRSAFE) == 0) - waitok = 1; - if (waitok) { - if (rw_enter(&map->lock, RW_NOSLEEP | RW_WRITE) != 0) - panic("uvm_map_teardown: rw_enter failed on free map"); - } + KERNEL_ASSERT_LOCKED(); + KERNEL_UNLOCK(); + KERNEL_ASSERT_UNLOCKED(); + + KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); + + if (rw_enter(&map->lock, RW_NOSLEEP | RW_WRITE) != 0) + panic("uvm_map_teardown: rw_enter failed on free map"); /* Remove address selectors. */ uvm_addr_destroy(map->uaddr_exe); @@ -2466,8 +2470,7 @@ uvm_map_teardown(struct vm_map *map) if ((entry = RB_ROOT(&map->addr)) != NULL) DEAD_ENTRY_PUSH(&dead_entries, entry); while (entry != NULL) { - if (waitok) - uvm_pause(); + sched_pause(); uvm_unmap_kill_entry(map, entry); if ((tmp = RB_LEFT(entry, daddrs.addr_entry)) != NULL) DEAD_ENTRY_PUSH(&dead_entries, tmp); @@ -2477,8 +2480,7 @@ uvm_map_teardown(struct vm_map *map) entry = TAILQ_NEXT(entry, dfree.deadq); } - if (waitok) - rw_exit(&map->lock); + rw_exit(&map->lock); #ifdef VMMAP_DEBUG numt = numq = 0; @@ -2488,7 +2490,10 @@ uvm_map_teardown(struct vm_map *map) numq++; KASSERT(numt == numq); #endif - uvm_unmap_detach(&dead_entries, waitok ? UVM_PLA_WAITOK : 0); + uvm_unmap_detach(&dead_entries, UVM_PLA_WAITOK); + + KERNEL_LOCK(); + pmap_destroy(map->pmap); map->pmap = NULL; } @@ -3185,6 +3190,8 @@ void uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t min, vaddr_t max, boolean_t pageable, boolean_t remove_holes) { + KASSERT(pmap == NULL || pmap == pmap_kernel()); + if (pmap) pmap_reference(pmap); else