this adds a timeout_barrier function, which works like all the other
barriers we have (intr_barrier, ifq_barrier, etc).

my use case for this is a network driver im working on, which uses
a timeout to refill the rx ring if no mbufs are available in the
system, but needs to wait for a timeout to finish using the rx ring
structures before they're freed when the interface is being shut
down.

so in abstract, if you want to free something that a timeout refers
to, you need to wait for the timeout to stop using it before you
free it. if you're freeing without the kernel lock and splsoftnet
raised, you can't know if the timeout is currently running or not.
if you're using a timeout that gets handled in the softclock thread,
and it is sleeping, you can't free it yet either.

does that make sense?

Index: share/man/man9/timeout.9
===================================================================
RCS file: /cvs/src/share/man/man9/timeout.9,v
retrieving revision 1.44
diff -u -p -r1.44 timeout.9
--- share/man/man9/timeout.9    22 Sep 2016 12:55:24 -0000      1.44
+++ share/man/man9/timeout.9    17 Nov 2017 04:19:31 -0000
@@ -38,6 +38,7 @@
 .Nm timeout_add_ts ,
 .Nm timeout_add_bt ,
 .Nm timeout_del ,
+.Nm timeout_barrier ,
 .Nm timeout_pending ,
 .Nm timeout_initialized ,
 .Nm timeout_triggered ,
@@ -54,6 +55,8 @@
 .Fn timeout_add "struct timeout *to" "int ticks"
 .Ft int
 .Fn timeout_del "struct timeout *to"
+.Ft void
+.Fn timeout_barrier "struct timeout *to"
 .Ft int
 .Fn timeout_pending "struct timeout *to"
 .Ft int
@@ -153,6 +156,11 @@ will cancel the timeout in the argument
 If the timeout has already executed or has never been added
 the call will have no effect.
 .Pp
+.Fn timeout_barrier
+ensures that any current execution of the timeout in the argument
+.Fa to
+has completed before returning.
+.Pp
 The
 .Fn timeout_pending
 macro can be used to check if a timeout is scheduled to run.
@@ -217,6 +225,9 @@ context.
 can be called during autoconf, from process context, or from any
 interrupt context at or below
 .Dv IPL_CLOCK .
+.Pp
+.Fn timeout_barrier
+can be called from process context.
 .Pp
 When the timeout runs, the
 .Fa fn
Index: sys/sys/timeout.h
===================================================================
RCS file: /cvs/src/sys/sys/timeout.h,v
retrieving revision 1.26
diff -u -p -r1.26 timeout.h
--- sys/sys/timeout.h   22 Sep 2016 12:55:24 -0000      1.26
+++ sys/sys/timeout.h   17 Nov 2017 04:19:31 -0000
@@ -99,6 +99,7 @@ int timeout_add_msec(struct timeout *, i
 int timeout_add_usec(struct timeout *, int);
 int timeout_add_nsec(struct timeout *, int);
 int timeout_del(struct timeout *);
+void timeout_barrier(struct timeout *);
 
 void timeout_startup(void);
 void timeout_adjust_ticks(int);
Index: sys/kern/kern_timeout.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_timeout.c,v
retrieving revision 1.50
diff -u -p -r1.50 kern_timeout.c
--- sys/kern/kern_timeout.c     3 Oct 2016 11:54:29 -0000       1.50
+++ sys/kern/kern_timeout.c     17 Nov 2017 04:19:31 -0000
@@ -324,6 +324,45 @@ timeout_del(struct timeout *to)
        return (ret);
 }
 
+void   timeout_proc_barrier(void *);
+
+void
+timeout_barrier(struct timeout *to)
+{
+       if (!ISSET(to->to_flags, TIMEOUT_NEEDPROCCTX)) {
+               KERNEL_LOCK();
+               splx(splsoftclock());
+               KERNEL_UNLOCK();
+       } else {
+               int wait = 1;
+               struct timeout barrier;
+               struct sleep_state sls;
+
+               timeout_set(&barrier, timeout_proc_barrier, &wait);
+
+               mtx_enter(&timeout_mutex);
+               CIRCQ_INSERT(&to->to_list, &timeout_proc);
+               mtx_leave(&timeout_mutex);
+
+               wakeup_one(&timeout_proc);
+
+               while (wait) {
+                       sleep_setup(&sls, &wait, PSWP, "tmobar");
+                       sleep_finish(&sls, wait);
+               }
+       }
+}
+
+void
+timeout_proc_barrier(void *arg)
+{
+       int *wait = arg;
+
+       *wait = 0;
+
+       wakeup_one(wait);
+}
+
 /*
  * This is called from hardclock() once every tick.
  * We return !0 if we need to schedule a softclock.

Reply via email to