nothing uses it anymore, and i don't think it's useful either.

for those who don't know what it did, it marked the threads used by a
taskq so the scheduler knew they shouldnt sleep. this was used in the
early stages of the mpsafe network stack changes to mark the softnet
taskqs as nonsleeping so we could turn that back into an interrupt
context with less issues. it's pretty obvious now that softnets are
going to remain as threads though.

ok?

Index: share/man/man9/task_add.9
===================================================================
RCS file: /cvs/src/share/man/man9/task_add.9,v
retrieving revision 1.18
diff -u -p -r1.18 task_add.9
--- share/man/man9/task_add.9   16 Dec 2018 03:40:12 -0000      1.18
+++ share/man/man9/task_add.9   29 Mar 2019 04:26:01 -0000
@@ -81,9 +81,6 @@ argument:
 .Bl -tag -width xxx -offset indent
 .It Dv TASKQ_MPSAFE
 The threads servicing the taskq will be run without the kernel big lock.
-.It Dv TASKQ_CANTSLEEP
-The tasks run via the taskq cannot sleep.
-.El
 .Pp
 .Fn taskq_destroy
 causes the resources associated with a previously created taskq to be freed.
Index: sys/sys/task.h
===================================================================
RCS file: /cvs/src/sys/sys/task.h,v
retrieving revision 1.13
diff -u -p -r1.13 task.h
--- sys/sys/task.h      16 Dec 2018 03:36:02 -0000      1.13
+++ sys/sys/task.h      29 Mar 2019 04:26:01 -0000
@@ -35,7 +35,6 @@ struct task {
 TAILQ_HEAD(task_list, task);
 
 #define TASKQ_MPSAFE           (1 << 0)
-#define TASKQ_CANTSLEEP                (1 << 1)
 
 #define TASK_INITIALIZER(_f, _a)  {{ NULL, NULL }, (_f), (_a), 0 }
 
Index: sys/kern/kern_task.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_task.c,v
retrieving revision 1.23
diff -u -p -r1.23 kern_task.c
--- sys/kern/kern_task.c        16 Dec 2018 03:36:02 -0000      1.23
+++ sys/kern/kern_task.c        29 Mar 2019 04:26:01 -0000
@@ -59,9 +59,6 @@ struct taskq taskq_sys_mp = {
        TAILQ_HEAD_INITIALIZER(taskq_sys_mp.tq_worklist)
 };
 
-typedef int (*sleepfn)(const volatile void *, struct mutex *, int,
-    const char *, int);
-
 struct taskq *const systq = &taskq_sys;
 struct taskq *const systqmp = &taskq_sys_mp;
 
@@ -70,7 +67,7 @@ void  taskq_create_thread(void *);
 void   taskq_barrier_task(void *);
 int    taskq_sleep(const volatile void *, struct mutex *, int,
            const char *, int);
-int    taskq_next_work(struct taskq *, struct task *, sleepfn);
+int    taskq_next_work(struct taskq *, struct task *);
 void   taskq_thread(void *);
 
 void
@@ -246,21 +243,7 @@ task_del(struct taskq *tq, struct task *
 }
 
 int
-taskq_sleep(const volatile void *ident, struct mutex *mtx, int priority,
-    const char *wmesg, int tmo)
-{
-       u_int *flags = &curproc->p_flag;
-       int rv;
-
-       atomic_clearbits_int(flags, P_CANTSLEEP);
-       rv = msleep(ident, mtx, priority, wmesg, tmo);
-       atomic_setbits_int(flags, P_CANTSLEEP);
-
-       return (tmo);
-}
-
-int
-taskq_next_work(struct taskq *tq, struct task *work, sleepfn tqsleep)
+taskq_next_work(struct taskq *tq, struct task *work)
 {
        struct task *next;
 
@@ -271,7 +254,7 @@ taskq_next_work(struct taskq *tq, struct
                        return (0);
                }
 
-               tqsleep(tq, &tq->tq_mtx, PWAIT, "bored", 0);
+               msleep(tq, &tq->tq_mtx, PWAIT, "bored", 0);
        }
 
        TAILQ_REMOVE(&tq->tq_worklist, next, t_entry);
@@ -291,7 +274,6 @@ taskq_next_work(struct taskq *tq, struct
 void
 taskq_thread(void *xtq)
 {
-       sleepfn tqsleep = msleep;
        struct taskq *tq = xtq;
        struct task work;
        int last;
@@ -299,12 +281,7 @@ taskq_thread(void *xtq)
        if (ISSET(tq->tq_flags, TASKQ_MPSAFE))
                KERNEL_UNLOCK();
 
-       if (ISSET(tq->tq_flags, TASKQ_CANTSLEEP)) {
-               tqsleep = taskq_sleep;
-               atomic_setbits_int(&curproc->p_flag, P_CANTSLEEP);
-       }
-
-       while (taskq_next_work(tq, &work, tqsleep)) {
+       while (taskq_next_work(tq, &work)) {
                (*work.t_func)(work.t_arg);
                sched_pause(yield);
        }
@@ -312,9 +289,6 @@ taskq_thread(void *xtq)
        mtx_enter(&tq->tq_mtx);
        last = (--tq->tq_running == 0);
        mtx_leave(&tq->tq_mtx);
-
-       if (ISSET(tq->tq_flags, TASKQ_CANTSLEEP))
-               atomic_clearbits_int(&curproc->p_flag, P_CANTSLEEP);
 
        if (ISSET(tq->tq_flags, TASKQ_MPSAFE))
                KERNEL_LOCK();

Reply via email to