Module Name:    src
Committed By:   ad
Date:           Sun Jan 12 18:37:10 UTC 2020

Modified Files:
        src/sys/kern: kern_rwlock.c vfs_vnops.c
        src/sys/sys: lwp.h

Log Message:
- Shuffle some items around in struct lwp to save space.  Remove an unused
  item or two.

- For lockstat, get a useful callsite for vnode locks (caller to vn_lock()).


To generate a diff of this commit:
cvs rdiff -u -r1.59 -r1.60 src/sys/kern/kern_rwlock.c
cvs rdiff -u -r1.204 -r1.205 src/sys/kern/vfs_vnops.c
cvs rdiff -u -r1.193 -r1.194 src/sys/sys/lwp.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/kern_rwlock.c
diff -u src/sys/kern/kern_rwlock.c:1.59 src/sys/kern/kern_rwlock.c:1.60
--- src/sys/kern/kern_rwlock.c:1.59	Mon Dec  9 21:02:10 2019
+++ src/sys/kern/kern_rwlock.c	Sun Jan 12 18:37:10 2020
@@ -1,7 +1,8 @@
-/*	$NetBSD: kern_rwlock.c,v 1.59 2019/12/09 21:02:10 ad Exp $	*/
+/*	$NetBSD: kern_rwlock.c,v 1.60 2020/01/12 18:37:10 ad Exp $	*/
 
 /*-
- * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2019, 2020
+ *     The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -38,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.59 2019/12/09 21:02:10 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.60 2020/01/12 18:37:10 ad Exp $");
 
 #define	__RWLOCK_PRIVATE
 
@@ -420,9 +421,13 @@ rw_vector_enter(krwlock_t *rw, const krw
 	}
 	KPREEMPT_ENABLE(curlwp);
 
-	LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK |
-	    (op == RW_WRITER ? LB_SLEEP1 : LB_SLEEP2), slpcnt, slptime);
-	LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK | LB_SPIN, spincnt, spintime);
+	LOCKSTAT_EVENT_RA(lsflag, rw, LB_RWLOCK |
+	    (op == RW_WRITER ? LB_SLEEP1 : LB_SLEEP2), slpcnt, slptime,
+	    (l->l_rwcallsite != 0 ? l->l_rwcallsite :
+	      (uintptr_t)__builtin_return_address(0)));
+	LOCKSTAT_EVENT_RA(lsflag, rw, LB_RWLOCK | LB_SPIN, spincnt, spintime,
+	    (l->l_rwcallsite != 0 ? l->l_rwcallsite :
+	      (uintptr_t)__builtin_return_address(0)));
 	LOCKSTAT_EXIT(lsflag);
 
 	RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||

Index: src/sys/kern/vfs_vnops.c
diff -u src/sys/kern/vfs_vnops.c:1.204 src/sys/kern/vfs_vnops.c:1.205
--- src/sys/kern/vfs_vnops.c:1.204	Mon Dec 16 22:47:54 2019
+++ src/sys/kern/vfs_vnops.c	Sun Jan 12 18:37:10 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: vfs_vnops.c,v 1.204 2019/12/16 22:47:54 ad Exp $	*/
+/*	$NetBSD: vfs_vnops.c,v 1.205 2020/01/12 18:37:10 ad Exp $	*/
 
 /*-
  * Copyright (c) 2009 The NetBSD Foundation, Inc.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.204 2019/12/16 22:47:54 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.205 2020/01/12 18:37:10 ad Exp $");
 
 #include "veriexec.h"
 
@@ -1030,6 +1030,7 @@ vn_mmap(struct file *fp, off_t *offp, si
 int
 vn_lock(struct vnode *vp, int flags)
 {
+	struct lwp *l;
 	int error;
 
 #if 0
@@ -1044,10 +1045,17 @@ vn_lock(struct vnode *vp, int flags)
 		WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
 #endif
 
+	/* Get a more useful report for lockstat. */
+	l = curlwp;
+	KASSERT(l->l_rwcallsite == 0);
+	l->l_rwcallsite = (uintptr_t)__builtin_return_address(0);	
+
 	error = VOP_LOCK(vp, flags);
 	if ((flags & LK_RETRY) != 0 && error == ENOENT)
 		error = VOP_LOCK(vp, flags);
 
+	l->l_rwcallsite = 0;
+
 	KASSERT((flags & LK_RETRY) == 0 || (flags & LK_NOWAIT) != 0 ||
 	    error == 0);
 

Index: src/sys/sys/lwp.h
diff -u src/sys/sys/lwp.h:1.193 src/sys/sys/lwp.h:1.194
--- src/sys/sys/lwp.h:1.193	Wed Jan  8 17:38:43 2020
+++ src/sys/sys/lwp.h	Sun Jan 12 18:37:10 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: lwp.h,v 1.193 2020/01/08 17:38:43 ad Exp $	*/
+/*	$NetBSD: lwp.h,v 1.194 2020/01/12 18:37:10 ad Exp $	*/
 
 /*
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019
@@ -92,17 +92,17 @@ struct lwp {
 	kmutex_t * volatile l_mutex;	/* l: ptr to mutex on sched state */
 	void		*l_addr;	/* l: PCB address; use lwp_getpcb() */
 	struct mdlwp	l_md;		/* l: machine-dependent fields. */
-	int		l_flag;		/* l: misc flag values */
-	int		l_stat;		/* l: overall LWP status */
 	struct bintime 	l_rtime;	/* l: real time */
 	struct bintime	l_stime;	/* l: start time (while ONPROC) */
+	int		l_flag;		/* l: misc flag values */
 	u_int		l_swtime;	/* l: time swapped in or out */
 	u_int		l_rticks;	/* l: Saved start time of run */
 	u_int		l_rticksum;	/* l: Sum of ticks spent running */
 	u_int		l_slpticks;	/* l: Saved start time of sleep */
 	u_int		l_slpticksum;	/* l: Sum of ticks spent sleeping */
 	int		l_biglocks;	/* l: biglock count before sleep */
-	int		l_class;	/* l: scheduling class */
+	short		l_stat;		/* l: overall LWP status */
+	short		l_class;	/* l: scheduling class */
 	int		l_kpriority;	/* !: has kernel priority boost */
 	pri_t		l_kpribase;	/* !: kernel priority base level */
 	pri_t		l_priority;	/* l: scheduler priority */
@@ -110,13 +110,13 @@ struct lwp {
 	pri_t		l_protectprio;	/* l: for PTHREAD_PRIO_PROTECT */
 	pri_t		l_auxprio;	/* l: max(inherit,protect) priority */
 	int		l_protectdepth;	/* l: for PTHREAD_PRIO_PROTECT */
-	SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
-	volatile uint64_t l_ncsw;	/* l: total context switches */
-	volatile uint64_t l_nivcsw;	/* l: involuntary context switches */
 	u_int		l_cpticks;	/* (: Ticks of CPU time */
+	psetid_t	l_psid;		/* l: assigned processor-set ID */
 	fixpt_t		l_pctcpu;	/* p: %cpu during l_swtime */
 	fixpt_t		l_estcpu;	/* l: cpu time for SCHED_4BSD */
-	psetid_t	l_psid;		/* l: assigned processor-set ID */
+	volatile uint64_t l_ncsw;	/* l: total context switches */
+	volatile uint64_t l_nivcsw;	/* l: involuntary context switches */
+	SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
 	struct cpu_info *l_target_cpu;	/* l: target CPU to migrate */
 	struct lwpctl	*l_lwpctl;	/* p: lwpctl block kernel address */
 	struct lcpage	*l_lcpage;	/* p: lwpctl containing page */
@@ -129,11 +129,9 @@ struct lwp {
 	wchan_t		l_wchan;	/* l: sleep address */
 	const char	*l_wmesg;	/* l: reason for sleep */
 	struct sleepq	*l_sleepq;	/* l: current sleep queue */
-	int		l_sleeperr;	/* !: error before unblock */
-	u_int		l_slptime;	/* l: time since last blocked */
 	callout_t	l_timeout_ch;	/* !: callout for tsleep */
-	u_int		l_emap_gen;	/* !: emap generation number */
 	kcondvar_t	l_waitcv;	/* a: vfork() wait */
+	u_int		l_slptime;	/* l: time since last blocked */
 	bool		l_vforkwaiting;	/* a: vfork() waiting */
 
 #if PCU_UNIT_COUNT > 0
@@ -142,21 +140,21 @@ struct lwp {
 #endif
 
 	/* Process level and global state, misc. */
+	lwpid_t		l_lid;		/* (: LWP identifier; local to proc */
 	LIST_ENTRY(lwp)	l_list;		/* a: entry on list of all LWPs */
 	void		*l_ctxlink;	/* p: uc_link {get,set}context */
 	struct proc	*l_proc;	/* p: parent process */
 	LIST_ENTRY(lwp)	l_sibling;	/* p: entry on proc's list of LWPs */
+	char		*l_name;	/* (: name, optional */
 	lwpid_t		l_waiter;	/* p: first LWP waiting on us */
 	lwpid_t 	l_waitingfor;	/* p: specific LWP we are waiting on */
 	int		l_prflag;	/* p: process level flags */
 	u_int		l_refcnt;	/* p: reference count on this LWP */
-	lwpid_t		l_lid;		/* (: LWP identifier; local to proc */
-	char		*l_name;	/* (: name, optional */
 
 	/* State of select() or poll(). */
 	int		l_selflag;	/* S: polling state flags */
-	SLIST_HEAD(,selinfo) l_selwait;	/* S: descriptors waited on */
 	int		l_selret;	/* S: return value of select/poll */
+	SLIST_HEAD(,selinfo) l_selwait;	/* S: descriptors waited on */
 	uintptr_t	l_selrec;	/* !: argument for selrecord() */
 	struct selcluster *l_selcluster;/* !: associated cluster data */
 	void *		l_selbits;	/* (: select() bit-field */
@@ -184,7 +182,6 @@ struct lwp {
 	struct filedesc	*l_fd;		/* !: cached copy of proc::p_fd */
 	void		*l_emuldata;	/* !: kernel lwp-private data */
 	struct fstrans_lwp_info *l_fstrans; /* (: fstrans private data */
-	u_int		l_cv_signalled;	/* c: restarted by cv_signal() */
 	u_short		l_shlocks;	/* !: lockdebug: shared locks held */
 	u_short		l_exlocks;	/* !: lockdebug: excl. locks held */
 	u_short		l_psrefs;	/* !: count of psref held */
@@ -199,6 +196,7 @@ struct lwp {
 	uintptr_t	l_pfailaddr;	/* !: for kernel preemption */
 	uintptr_t	l_pfaillock;	/* !: for kernel preemption */
 	_TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */
+	uintptr_t	l_rwcallsite;	/* !: rwlock actual callsite */
 	int		l_tcgen;	/* !: for timecounter removal */
 
 	/* These are only used by 'options SYSCALL_TIMES'. */

Reply via email to