When a userland program triggers a fault or does a syscall its SP and/or
PC are checked to be sure they are in expected regions. The result of
this check is cached in the "struct proc" such that a lookup isn't
always necessary.
Currently when the cache is outdated the KERNEL_LOCK() is taken before
doing the lookup. This shouldn't be necessary, uvm_map_lookup_entry()
is protected by the `vm_map_lock' also taken before the lookup. This
function is already called w/o KERNEL_LOCK(), like in the futex code,
so it should be safe to push it down there.
Removing the KERNEL_LOCK() from this code path reduces the overall time
spinning when executing syscalls from 30% to 25% when building the libc
on my 16 CPU sparc64.
Even if it doesn't improve the performances significantly I'd like to
have more code exercising the `vm_map_lock', cause it's one of the
pieces to unlock uvm_fault().
While here document that `p_spinentry' and `p_pcinentry' are owned by
the current process and don't need any locking.
Tests, comments and oks welcome :o)
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.247
diff -u -p -r1.247 uvm_map.c
--- uvm/uvm_map.c 9 Sep 2019 20:02:26 -0000 1.247
+++ uvm/uvm_map.c 31 Oct 2019 21:45:51 -0000
@@ -158,6 +158,10 @@ int uvm_map_findspace(struct
vm_map*,
vsize_t uvm_map_addr_augment_get(struct vm_map_entry*);
void uvm_map_addr_augment(struct vm_map_entry*);
+int uvm_map_inentry_recheck(u_long, vaddr_t,
+ struct p_inentry *);
+boolean_t uvm_map_inentry_fix(struct proc *, struct p_inentry *,
+ vaddr_t, int (*)(vm_map_entry_t), u_long);
/*
* Tree management functions.
*/
@@ -1868,16 +1872,16 @@ uvm_map_inentry(struct proc *p, struct p
boolean_t ok = TRUE;
if (uvm_map_inentry_recheck(serial, addr, ie)) {
- KERNEL_LOCK();
ok = uvm_map_inentry_fix(p, ie, addr, fn, serial);
if (!ok) {
printf(fmt, p->p_p->ps_comm, p->p_p->ps_pid, p->p_tid,
addr, ie->ie_start, ie->ie_end);
+ KERNEL_LOCK();
p->p_p->ps_acflag |= AMAP;
sv.sival_ptr = (void *)PROC_PC(p);
trapsignal(p, SIGSEGV, 0, SEGV_ACCERR, sv);
+ KERNEL_UNLOCK();
}
- KERNEL_UNLOCK();
}
return (ok);
}
Index: uvm/uvm_map.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.h,v
retrieving revision 1.62
diff -u -p -r1.62 uvm_map.h
--- uvm/uvm_map.h 14 Jun 2019 05:52:43 -0000 1.62
+++ uvm/uvm_map.h 31 Oct 2019 21:46:00 -0000
@@ -414,12 +414,8 @@ void uvm_unmap_remove(struct vm_map*, v
struct p_inentry;
-int uvm_map_inentry_recheck(u_long serial, vaddr_t,
- struct p_inentry *);
int uvm_map_inentry_sp(vm_map_entry_t);
int uvm_map_inentry_pc(vm_map_entry_t);
-boolean_t uvm_map_inentry_fix(struct proc *, struct p_inentry *,
- vaddr_t addr, int (*fn)(vm_map_entry_t), u_long serial);
boolean_t uvm_map_inentry(struct proc *, struct p_inentry *, vaddr_t addr,
const char *fmt, int (*fn)(vm_map_entry_t), u_long serial);
Index: sys/proc.h
===================================================================
RCS file: /cvs/src/sys/sys/proc.h,v
retrieving revision 1.275
diff -u -p -r1.275 proc.h
--- sys/proc.h 22 Oct 2019 21:19:22 -0000 1.275
+++ sys/proc.h 31 Oct 2019 21:27:43 -0000
@@ -318,6 +318,7 @@ struct p_inentry {
* I immutable after creation
* s scheduler lock
* l read only reference, see lim_read_enter()
+ * o owned (read/modified) by the current proc
*/
struct proc {
TAILQ_ENTRY(proc) p_runq; /* [s] current run/sleep queue */
@@ -332,8 +333,8 @@ struct proc {
/* substructures: */
struct filedesc *p_fd; /* copy of p_p->ps_fd */
struct vmspace *p_vmspace; /* [I] copy of p_p->ps_vmspace */
- struct p_inentry p_spinentry;
- struct p_inentry p_pcinentry;
+ struct p_inentry p_spinentry; /* [o] */
+ struct p_inentry p_pcinentry; /* [o] */
struct plimit *p_limit; /* [l] read ref. of p_p->ps_limit */
int p_flag; /* P_* flags. */