Re: Help me testing the netlock

2016-10-14 Thread Simon Mages
Hi,

i did some performace tests in current with and without your diff.

There is no difference in performance.

I will try to do performance tests with current on a regular base now.

2016-10-05 10:15 GMT+02:00, Martin Pieuchot :
> On 10/04/16 16:44, Martin Pieuchot wrote:
>> On 10/03/16 16:43, Martin Pieuchot wrote:
>>> Diff below introduces a single write lock that will be used to serialize
>>> access to ip_output().
>>>
>>> This lock will be then split in multiple readers and writers to allow
>>> multiple forwarding paths to run in parallel of each others but still
>>> serialized with the socket layer.
>>>
>>> I'm currently looking for people wanting to run this diff and try to
>>> break it.  In other words, your machine might panic with it and if it
>>> does report the panic to me so the diff can be improved.
>>>
>>> I tested NFS v2 and v3 so I'm quite confident, but I might have missed
>>> some obvious stuff.
>>
>> Updated diff attaced including a fix for syn_cache_timer(), problem
>> reported by Chris Jackman.
>
> Thanks to all testers!
>
> Here's a newer version that includes a fix for rt_timer_timer() also
> found by Chris Jackman.
>
>



Re: Help me testing the netlock

2016-10-06 Thread David Hill
Hi Martin -

As soon as machine boots and first packet hits the VPN, it hits the
assertion:
rw_status() == RW_WRITE

(hand-typed)

TID PID UID PRFLAGS PFLAGS  CPU COMMAND
*78967  78967   0   0x14000 0x200   1   crypto

__assert at0x25
ip_output() at 0x843
ipsp_process_done() at 0x2ad
esp_output_cb at 0x135
taskq_thnread at 0x6c



Re: Help me testing the netlock

2016-10-06 Thread Lampshade
> > panic: rw_enter: netlock locking against myself
> 
> Your tree is not up-to-date, you're missing sys/net/route.c r1.332.

My bad.
With updated sources I have tested this for few minutes
and I don't encountered any kernel panic.
CVS sync, wget to download base system from mirror,
browsing web with Chromium (with privoxy and relayd).
I have amd64 on laptop.



Re: Help me testing the netlock

2016-10-06 Thread Martin Pieuchot

On 10/05/16 20:52, Lampshade wrote:

panic: rw_enter: netlock locking against myself


Your tree is not up-to-date, you're missing sys/net/route.c r1.332.

rt_timer_timer() is now called from the softclock thread.



Re: Help me testing the netlock

2016-10-05 Thread Lampshade
panic: rw_enter: netlock locking against myself

 TIDPID UIDPRFLAGS
 12705 12705 1041 0x23
*62630 62630 1000 0x32
   
PFLAGS CPU COMMAND
0  1chrome
0  0 Xorg

panic()
rw_enter()
rw_enter_write()
rw_enter_timer()
timeout_run()
softlock()
softintr_dispatch()
Xsoftlock()
---interrupt---
param.c at 0x8
Bad frame pointer: 0x800032d78 b14

Above was similar with ddbcpu 0
Following machine
ddbcpu 1

x86_ipi_handler()
Xresume_lapic_ipi_
---interrupt---
__mp_lock()
syscall()
---syscall (number 27)---
end of kernel



Re: Help me testing the netlock

2016-10-05 Thread Martin Pieuchot

On 10/04/16 16:44, Martin Pieuchot wrote:

On 10/03/16 16:43, Martin Pieuchot wrote:

Diff below introduces a single write lock that will be used to serialize
access to ip_output().

This lock will be then split in multiple readers and writers to allow
multiple forwarding paths to run in parallel of each others but still
serialized with the socket layer.

I'm currently looking for people wanting to run this diff and try to
break it.  In other words, your machine might panic with it and if it
does report the panic to me so the diff can be improved.

I tested NFS v2 and v3 so I'm quite confident, but I might have missed
some obvious stuff.


Updated diff attaced including a fix for syn_cache_timer(), problem
reported by Chris Jackman.


Thanks to all testers!

Here's a newer version that includes a fix for rt_timer_timer() also
found by Chris Jackman.

Index: kern/kern_rwlock.c
===
RCS file: /cvs/src/sys/kern/kern_rwlock.c,v
retrieving revision 1.27
diff -u -p -r1.27 kern_rwlock.c
--- kern/kern_rwlock.c	14 Mar 2015 07:33:42 -	1.27
+++ kern/kern_rwlock.c	5 Oct 2016 08:11:09 -
@@ -98,6 +98,12 @@ rw_enter_read(struct rwlock *rwl)
 		membar_enter();
 }
 
+#if 1
+#include 
+#include 
+#include 
+#endif
+
 void
 rw_enter_write(struct rwlock *rwl)
 {
@@ -108,6 +114,15 @@ rw_enter_write(struct rwlock *rwl)
 		rw_enter(rwl, RW_WRITE);
 	else
 		membar_enter();
+
+#if 1
+	if ((rwl == ) && (splassert_ctl == 3)) {
+		printf("ENTER::%d::", cpu_number());
+		db_stack_trace_print(
+		(db_expr_t)__builtin_frame_address(1),
+		TRUE, 1, "", printf);
+	}
+#endif
 }
 
 void
@@ -129,6 +144,15 @@ rw_exit_write(struct rwlock *rwl)
 	unsigned long owner = rwl->rwl_owner;
 
 	rw_assert_wrlock(rwl);
+
+#if 1
+	if ((rwl == ) && (splassert_ctl == 3)) {
+		printf("EXIT::%d::", cpu_number());
+		db_stack_trace_print(
+		(db_expr_t)__builtin_frame_address(1),
+		TRUE, 1, "", printf);
+	}
+#endif
 
 	membar_exit();
 	if (__predict_false((owner & RWLOCK_WAIT) ||
Index: kern/sys_socket.c
===
RCS file: /cvs/src/sys/kern/sys_socket.c,v
retrieving revision 1.21
diff -u -p -r1.21 sys_socket.c
--- kern/sys_socket.c	5 Dec 2015 10:11:53 -	1.21
+++ kern/sys_socket.c	5 Oct 2016 08:11:09 -
@@ -131,8 +131,10 @@ soo_poll(struct file *fp, int events, st
 {
 	struct socket *so = fp->f_data;
 	int revents = 0;
-	int s = splsoftnet();
+	int s;
 
+	rw_enter_write();
+	s = splsoftnet();
 	if (events & (POLLIN | POLLRDNORM)) {
 		if (soreadable(so))
 			revents |= events & (POLLIN | POLLRDNORM);
@@ -159,6 +161,7 @@ soo_poll(struct file *fp, int events, st
 		}
 	}
 	splx(s);
+	rw_exit_write();
 	return (revents);
 }
 
Index: kern/uipc_socket.c
===
RCS file: /cvs/src/sys/kern/uipc_socket.c,v
retrieving revision 1.161
diff -u -p -r1.161 uipc_socket.c
--- kern/uipc_socket.c	20 Sep 2016 14:27:43 -	1.161
+++ kern/uipc_socket.c	5 Oct 2016 08:11:10 -
@@ -123,6 +123,7 @@ socreate(int dom, struct socket **aso, i
 		return (EPROTONOSUPPORT);
 	if (prp->pr_type != type)
 		return (EPROTOTYPE);
+	rw_enter_write();
 	s = splsoftnet();
 	so = pool_get(_pool, PR_WAITOK | PR_ZERO);
 	TAILQ_INIT(>so_q0);
@@ -142,9 +143,11 @@ socreate(int dom, struct socket **aso, i
 		so->so_state |= SS_NOFDREF;
 		sofree(so);
 		splx(s);
+		rw_exit_write();
 		return (error);
 	}
 	splx(s);
+	rw_exit_write();
 	*aso = so;
 	return (0);
 }
@@ -152,11 +155,13 @@ socreate(int dom, struct socket **aso, i
 int
 sobind(struct socket *so, struct mbuf *nam, struct proc *p)
 {
-	int s = splsoftnet();
-	int error;
+	int s, error;
 
+	rw_enter_write();
+	s = splsoftnet();
 	error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p);
 	splx(s);
+	rw_exit_write();
 	return (error);
 }
 
@@ -171,11 +176,13 @@ solisten(struct socket *so, int backlog)
 	if (isspliced(so) || issplicedback(so))
 		return (EOPNOTSUPP);
 #endif /* SOCKET_SPLICE */
+	rw_enter_write();
 	s = splsoftnet();
 	error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL,
 	curproc);
 	if (error) {
 		splx(s);
+		rw_exit_write();
 		return (error);
 	}
 	if (TAILQ_FIRST(>so_q) == NULL)
@@ -186,6 +193,7 @@ solisten(struct socket *so, int backlog)
 		backlog = sominconn;
 	so->so_qlimit = backlog;
 	splx(s);
+	rw_exit_write();
 	return (0);
 }
 
@@ -196,6 +204,7 @@ solisten(struct socket *so, int backlog)
 void
 sofree(struct socket *so)
 {
+	rw_assert_wrlock();
 	splsoftassert(IPL_SOFTNET);
 
 	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
@@ -234,9 +243,10 @@ int
 soclose(struct socket *so)
 {
 	struct socket *so2;
-	int s = splsoftnet();		/* conservative */
-	int error = 0;
+	int s, error = 0;
 
+	rw_enter_write();
+	s = splsoftnet();		/* conservative */
 	if (so->so_options & SO_ACCEPTCONN) {
 		while ((so2 = TAILQ_FIRST(>so_q0)) != NULL) {
 			(void) soqremque(so2, 0);
@@ 

Re: Help me testing the netlock

2016-10-05 Thread Mike Larkin
On Tue, Oct 04, 2016 at 04:44:29PM +0200, Martin Pieuchot wrote:
> On 10/03/16 16:43, Martin Pieuchot wrote:
> > Diff below introduces a single write lock that will be used to serialize
> > access to ip_output().
> > 
> > This lock will be then split in multiple readers and writers to allow
> > multiple forwarding paths to run in parallel of each others but still
> > serialized with the socket layer.
> > 
> > I'm currently looking for people wanting to run this diff and try to
> > break it.  In other words, your machine might panic with it and if it
> > does report the panic to me so the diff can be improved.
> > 
> > I tested NFS v2 and v3 so I'm quite confident, but I might have missed
> > some obvious stuff.
> 
> Updated diff attaced including a fix for syn_cache_timer(), problem
> reported by Chris Jackman.
> 

So far, so good, on i386 and amd64 vmm(4) VMs. booted, did a pkg_add 
upgrade, and cvsync.

No issues seen so far.

-ml

> Index: kern/kern_rwlock.c
> ===
> RCS file: /cvs/src/sys/kern/kern_rwlock.c,v
> retrieving revision 1.27
> diff -u -p -r1.27 kern_rwlock.c
> --- kern/kern_rwlock.c14 Mar 2015 07:33:42 -  1.27
> +++ kern/kern_rwlock.c4 Oct 2016 14:40:29 -
> @@ -98,6 +98,12 @@ rw_enter_read(struct rwlock *rwl)
>   membar_enter();
>  }
>  
> +#if 1
> +#include 
> +#include 
> +#include 
> +#endif
> +
>  void
>  rw_enter_write(struct rwlock *rwl)
>  {
> @@ -108,6 +114,15 @@ rw_enter_write(struct rwlock *rwl)
>   rw_enter(rwl, RW_WRITE);
>   else
>   membar_enter();
> +
> +#if 1
> + if ((rwl == ) && (splassert_ctl == 3)) {
> + printf("ENTER::%d::", cpu_number());
> + db_stack_trace_print(
> + (db_expr_t)__builtin_frame_address(1),
> + TRUE, 1, "", printf);
> + }
> +#endif
>  }
>  
>  void
> @@ -129,6 +144,15 @@ rw_exit_write(struct rwlock *rwl)
>   unsigned long owner = rwl->rwl_owner;
>  
>   rw_assert_wrlock(rwl);
> +
> +#if 1
> + if ((rwl == ) && (splassert_ctl == 3)) {
> + printf("EXIT::%d::", cpu_number());
> + db_stack_trace_print(
> + (db_expr_t)__builtin_frame_address(1),
> + TRUE, 1, "", printf);
> + }
> +#endif
>  
>   membar_exit();
>   if (__predict_false((owner & RWLOCK_WAIT) ||
> Index: kern/sys_socket.c
> ===
> RCS file: /cvs/src/sys/kern/sys_socket.c,v
> retrieving revision 1.21
> diff -u -p -r1.21 sys_socket.c
> --- kern/sys_socket.c 5 Dec 2015 10:11:53 -   1.21
> +++ kern/sys_socket.c 4 Oct 2016 14:40:29 -
> @@ -131,8 +131,10 @@ soo_poll(struct file *fp, int events, st
>  {
>   struct socket *so = fp->f_data;
>   int revents = 0;
> - int s = splsoftnet();
> + int s;
>  
> + rw_enter_write();
> + s = splsoftnet();
>   if (events & (POLLIN | POLLRDNORM)) {
>   if (soreadable(so))
>   revents |= events & (POLLIN | POLLRDNORM);
> @@ -159,6 +161,7 @@ soo_poll(struct file *fp, int events, st
>   }
>   }
>   splx(s);
> + rw_exit_write();
>   return (revents);
>  }
>  
> Index: kern/uipc_socket.c
> ===
> RCS file: /cvs/src/sys/kern/uipc_socket.c,v
> retrieving revision 1.161
> diff -u -p -r1.161 uipc_socket.c
> --- kern/uipc_socket.c20 Sep 2016 14:27:43 -  1.161
> +++ kern/uipc_socket.c4 Oct 2016 14:40:29 -
> @@ -123,6 +123,7 @@ socreate(int dom, struct socket **aso, i
>   return (EPROTONOSUPPORT);
>   if (prp->pr_type != type)
>   return (EPROTOTYPE);
> + rw_enter_write();
>   s = splsoftnet();
>   so = pool_get(_pool, PR_WAITOK | PR_ZERO);
>   TAILQ_INIT(>so_q0);
> @@ -142,9 +143,11 @@ socreate(int dom, struct socket **aso, i
>   so->so_state |= SS_NOFDREF;
>   sofree(so);
>   splx(s);
> + rw_exit_write();
>   return (error);
>   }
>   splx(s);
> + rw_exit_write();
>   *aso = so;
>   return (0);
>  }
> @@ -152,11 +155,13 @@ socreate(int dom, struct socket **aso, i
>  int
>  sobind(struct socket *so, struct mbuf *nam, struct proc *p)
>  {
> - int s = splsoftnet();
> - int error;
> + int s, error;
>  
> + rw_enter_write();
> + s = splsoftnet();
>   error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p);
>   splx(s);
> + rw_exit_write();
>   return (error);
>  }
>  
> @@ -171,11 +176,13 @@ solisten(struct socket *so, int backlog)
>   if (isspliced(so) || issplicedback(so))
>   return (EOPNOTSUPP);
>  #endif /* SOCKET_SPLICE */
> + rw_enter_write();
>   s = splsoftnet();
>   error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL,
>   curproc);
>   

Re: Help me testing the netlock

2016-10-04 Thread Martin Pieuchot

On 10/03/16 16:43, Martin Pieuchot wrote:

Diff below introduces a single write lock that will be used to serialize
access to ip_output().

This lock will be then split in multiple readers and writers to allow
multiple forwarding paths to run in parallel of each others but still
serialized with the socket layer.

I'm currently looking for people wanting to run this diff and try to
break it.  In other words, your machine might panic with it and if it
does report the panic to me so the diff can be improved.

I tested NFS v2 and v3 so I'm quite confident, but I might have missed
some obvious stuff.


Updated diff attaced including a fix for syn_cache_timer(), problem
reported by Chris Jackman.

Index: kern/kern_rwlock.c
===
RCS file: /cvs/src/sys/kern/kern_rwlock.c,v
retrieving revision 1.27
diff -u -p -r1.27 kern_rwlock.c
--- kern/kern_rwlock.c	14 Mar 2015 07:33:42 -	1.27
+++ kern/kern_rwlock.c	4 Oct 2016 14:40:29 -
@@ -98,6 +98,12 @@ rw_enter_read(struct rwlock *rwl)
 		membar_enter();
 }
 
+#if 1
+#include 
+#include 
+#include 
+#endif
+
 void
 rw_enter_write(struct rwlock *rwl)
 {
@@ -108,6 +114,15 @@ rw_enter_write(struct rwlock *rwl)
 		rw_enter(rwl, RW_WRITE);
 	else
 		membar_enter();
+
+#if 1
+	if ((rwl == ) && (splassert_ctl == 3)) {
+		printf("ENTER::%d::", cpu_number());
+		db_stack_trace_print(
+		(db_expr_t)__builtin_frame_address(1),
+		TRUE, 1, "", printf);
+	}
+#endif
 }
 
 void
@@ -129,6 +144,15 @@ rw_exit_write(struct rwlock *rwl)
 	unsigned long owner = rwl->rwl_owner;
 
 	rw_assert_wrlock(rwl);
+
+#if 1
+	if ((rwl == ) && (splassert_ctl == 3)) {
+		printf("EXIT::%d::", cpu_number());
+		db_stack_trace_print(
+		(db_expr_t)__builtin_frame_address(1),
+		TRUE, 1, "", printf);
+	}
+#endif
 
 	membar_exit();
 	if (__predict_false((owner & RWLOCK_WAIT) ||
Index: kern/sys_socket.c
===
RCS file: /cvs/src/sys/kern/sys_socket.c,v
retrieving revision 1.21
diff -u -p -r1.21 sys_socket.c
--- kern/sys_socket.c	5 Dec 2015 10:11:53 -	1.21
+++ kern/sys_socket.c	4 Oct 2016 14:40:29 -
@@ -131,8 +131,10 @@ soo_poll(struct file *fp, int events, st
 {
 	struct socket *so = fp->f_data;
 	int revents = 0;
-	int s = splsoftnet();
+	int s;
 
+	rw_enter_write();
+	s = splsoftnet();
 	if (events & (POLLIN | POLLRDNORM)) {
 		if (soreadable(so))
 			revents |= events & (POLLIN | POLLRDNORM);
@@ -159,6 +161,7 @@ soo_poll(struct file *fp, int events, st
 		}
 	}
 	splx(s);
+	rw_exit_write();
 	return (revents);
 }
 
Index: kern/uipc_socket.c
===
RCS file: /cvs/src/sys/kern/uipc_socket.c,v
retrieving revision 1.161
diff -u -p -r1.161 uipc_socket.c
--- kern/uipc_socket.c	20 Sep 2016 14:27:43 -	1.161
+++ kern/uipc_socket.c	4 Oct 2016 14:40:29 -
@@ -123,6 +123,7 @@ socreate(int dom, struct socket **aso, i
 		return (EPROTONOSUPPORT);
 	if (prp->pr_type != type)
 		return (EPROTOTYPE);
+	rw_enter_write();
 	s = splsoftnet();
 	so = pool_get(_pool, PR_WAITOK | PR_ZERO);
 	TAILQ_INIT(>so_q0);
@@ -142,9 +143,11 @@ socreate(int dom, struct socket **aso, i
 		so->so_state |= SS_NOFDREF;
 		sofree(so);
 		splx(s);
+		rw_exit_write();
 		return (error);
 	}
 	splx(s);
+	rw_exit_write();
 	*aso = so;
 	return (0);
 }
@@ -152,11 +155,13 @@ socreate(int dom, struct socket **aso, i
 int
 sobind(struct socket *so, struct mbuf *nam, struct proc *p)
 {
-	int s = splsoftnet();
-	int error;
+	int s, error;
 
+	rw_enter_write();
+	s = splsoftnet();
 	error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p);
 	splx(s);
+	rw_exit_write();
 	return (error);
 }
 
@@ -171,11 +176,13 @@ solisten(struct socket *so, int backlog)
 	if (isspliced(so) || issplicedback(so))
 		return (EOPNOTSUPP);
 #endif /* SOCKET_SPLICE */
+	rw_enter_write();
 	s = splsoftnet();
 	error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL,
 	curproc);
 	if (error) {
 		splx(s);
+		rw_exit_write();
 		return (error);
 	}
 	if (TAILQ_FIRST(>so_q) == NULL)
@@ -186,6 +193,7 @@ solisten(struct socket *so, int backlog)
 		backlog = sominconn;
 	so->so_qlimit = backlog;
 	splx(s);
+	rw_exit_write();
 	return (0);
 }
 
@@ -196,6 +204,7 @@ solisten(struct socket *so, int backlog)
 void
 sofree(struct socket *so)
 {
+	rw_assert_wrlock();
 	splsoftassert(IPL_SOFTNET);
 
 	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
@@ -234,9 +243,10 @@ int
 soclose(struct socket *so)
 {
 	struct socket *so2;
-	int s = splsoftnet();		/* conservative */
-	int error = 0;
+	int s, error = 0;
 
+	rw_enter_write();
+	s = splsoftnet();		/* conservative */
 	if (so->so_options & SO_ACCEPTCONN) {
 		while ((so2 = TAILQ_FIRST(>so_q0)) != NULL) {
 			(void) soqremque(so2, 0);
@@ -260,7 +270,7 @@ soclose(struct socket *so)
 			(so->so_state & SS_NBIO))
 goto drop;
 			while (so->so_state & SS_ISCONNECTED) {
-error = 

Help me testing the netlock

2016-10-03 Thread Martin Pieuchot
Diff below introduces a single write lock that will be used to serialize
access to ip_output().

This lock will be then split in multiple readers and writers to allow
multiple forwarding paths to run in parallel of each others but still
serialized with the socket layer.

I'm currently looking for people wanting to run this diff and try to
break it.  In other words, your machine might panic with it and if it
does report the panic to me so the diff can be improved.

I tested NFS v2 and v3 so I'm quite confident, but I might have missed
some obvious stuff.

PS: This diff includes the timeout_set_proc() diff I just sent.

Index: kern/kern_rwlock.c
===
RCS file: /cvs/src/sys/kern/kern_rwlock.c,v
retrieving revision 1.27
diff -u -p -r1.27 kern_rwlock.c
--- kern/kern_rwlock.c  14 Mar 2015 07:33:42 -  1.27
+++ kern/kern_rwlock.c  3 Oct 2016 12:59:16 -
@@ -98,6 +98,12 @@ rw_enter_read(struct rwlock *rwl)
membar_enter();
 }
 
+#if 1
+#include 
+#include 
+#include 
+#endif
+
 void
 rw_enter_write(struct rwlock *rwl)
 {
@@ -108,6 +114,15 @@ rw_enter_write(struct rwlock *rwl)
rw_enter(rwl, RW_WRITE);
else
membar_enter();
+
+#if 1
+   if ((rwl == ) && (splassert_ctl == 3)) {
+   printf("ENTER::%d::", cpu_number());
+   db_stack_trace_print(
+   (db_expr_t)__builtin_frame_address(1),
+   TRUE, 1, "", printf);
+   }
+#endif
 }
 
 void
@@ -129,6 +144,15 @@ rw_exit_write(struct rwlock *rwl)
unsigned long owner = rwl->rwl_owner;
 
rw_assert_wrlock(rwl);
+
+#if 1
+   if ((rwl == ) && (splassert_ctl == 3)) {
+   printf("EXIT::%d::", cpu_number());
+   db_stack_trace_print(
+   (db_expr_t)__builtin_frame_address(1),
+   TRUE, 1, "", printf);
+   }
+#endif
 
membar_exit();
if (__predict_false((owner & RWLOCK_WAIT) ||
Index: kern/sys_socket.c
===
RCS file: /cvs/src/sys/kern/sys_socket.c,v
retrieving revision 1.21
diff -u -p -r1.21 sys_socket.c
--- kern/sys_socket.c   5 Dec 2015 10:11:53 -   1.21
+++ kern/sys_socket.c   3 Oct 2016 12:59:16 -
@@ -131,8 +131,10 @@ soo_poll(struct file *fp, int events, st
 {
struct socket *so = fp->f_data;
int revents = 0;
-   int s = splsoftnet();
+   int s;
 
+   rw_enter_write();
+   s = splsoftnet();
if (events & (POLLIN | POLLRDNORM)) {
if (soreadable(so))
revents |= events & (POLLIN | POLLRDNORM);
@@ -159,6 +161,7 @@ soo_poll(struct file *fp, int events, st
}
}
splx(s);
+   rw_exit_write();
return (revents);
 }
 
Index: kern/uipc_socket.c
===
RCS file: /cvs/src/sys/kern/uipc_socket.c,v
retrieving revision 1.161
diff -u -p -r1.161 uipc_socket.c
--- kern/uipc_socket.c  20 Sep 2016 14:27:43 -  1.161
+++ kern/uipc_socket.c  3 Oct 2016 12:59:16 -
@@ -123,6 +123,7 @@ socreate(int dom, struct socket **aso, i
return (EPROTONOSUPPORT);
if (prp->pr_type != type)
return (EPROTOTYPE);
+   rw_enter_write();
s = splsoftnet();
so = pool_get(_pool, PR_WAITOK | PR_ZERO);
TAILQ_INIT(>so_q0);
@@ -142,9 +143,11 @@ socreate(int dom, struct socket **aso, i
so->so_state |= SS_NOFDREF;
sofree(so);
splx(s);
+   rw_exit_write();
return (error);
}
splx(s);
+   rw_exit_write();
*aso = so;
return (0);
 }
@@ -152,11 +155,13 @@ socreate(int dom, struct socket **aso, i
 int
 sobind(struct socket *so, struct mbuf *nam, struct proc *p)
 {
-   int s = splsoftnet();
-   int error;
+   int s, error;
 
+   rw_enter_write();
+   s = splsoftnet();
error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p);
splx(s);
+   rw_exit_write();
return (error);
 }
 
@@ -171,11 +176,13 @@ solisten(struct socket *so, int backlog)
if (isspliced(so) || issplicedback(so))
return (EOPNOTSUPP);
 #endif /* SOCKET_SPLICE */
+   rw_enter_write();
s = splsoftnet();
error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL,
curproc);
if (error) {
splx(s);
+   rw_exit_write();
return (error);
}
if (TAILQ_FIRST(>so_q) == NULL)
@@ -186,6 +193,7 @@ solisten(struct socket *so, int backlog)
backlog = sominconn;
so->so_qlimit = backlog;
splx(s);
+   rw_exit_write();
return (0);
 }
 
@@ -196,6 +204,7 @@ solisten(struct socket *so, int backlog)
 void
 sofree(struct socket *so)
 {
+