Module Name:    src
Committed By:   riastradh
Date:           Sun Dec 19 12:42:25 UTC 2021

Modified Files:
        src/sys/external/bsd/drm2/dist/drm/scheduler: sched_main.c
        src/sys/external/bsd/drm2/include/linux: kthread.h
        src/sys/external/bsd/drm2/linux: linux_kthread.c

Log Message:
drm: Rework Linux `kthread' abstraction to avoid race to sleep.

Requires passing in the caller's lock and condvar to kthread_run, but
for the one user that appears not to be an onerous requirement.


To generate a diff of this commit:
cvs rdiff -u -r1.7 -r1.8 \
    src/sys/external/bsd/drm2/dist/drm/scheduler/sched_main.c
cvs rdiff -u -r1.3 -r1.4 src/sys/external/bsd/drm2/include/linux/kthread.h
cvs rdiff -u -r1.5 -r1.6 src/sys/external/bsd/drm2/linux/linux_kthread.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/dist/drm/scheduler/sched_main.c
diff -u src/sys/external/bsd/drm2/dist/drm/scheduler/sched_main.c:1.7 src/sys/external/bsd/drm2/dist/drm/scheduler/sched_main.c:1.8
--- src/sys/external/bsd/drm2/dist/drm/scheduler/sched_main.c:1.7	Sun Dec 19 12:41:44 2021
+++ src/sys/external/bsd/drm2/dist/drm/scheduler/sched_main.c	Sun Dec 19 12:42:25 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: sched_main.c,v 1.7 2021/12/19 12:41:44 riastradh Exp $	*/
+/*	$NetBSD: sched_main.c,v 1.8 2021/12/19 12:42:25 riastradh Exp $	*/
 
 /*
  * Copyright 2015 Advanced Micro Devices, Inc.
@@ -47,7 +47,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sched_main.c,v 1.7 2021/12/19 12:41:44 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sched_main.c,v 1.8 2021/12/19 12:42:25 riastradh Exp $");
 
 #include <linux/kthread.h>
 #include <linux/wait.h>
@@ -857,7 +857,8 @@ int drm_sched_init(struct drm_gpu_schedu
 	atomic64_set(&sched->job_id_count, 0);
 
 	/* Each scheduler will run on a seperate kernel thread */
-	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
+	sched->thread = kthread_run(drm_sched_main, sched, sched->name,
+	    &sched->job_list_lock, &sched->wake_up_worker);
 	if (IS_ERR(sched->thread)) {
 		ret = PTR_ERR(sched->thread);
 		sched->thread = NULL;

Index: src/sys/external/bsd/drm2/include/linux/kthread.h
diff -u src/sys/external/bsd/drm2/include/linux/kthread.h:1.3 src/sys/external/bsd/drm2/include/linux/kthread.h:1.4
--- src/sys/external/bsd/drm2/include/linux/kthread.h:1.3	Sun Dec 19 12:23:07 2021
+++ src/sys/external/bsd/drm2/include/linux/kthread.h	Sun Dec 19 12:42:25 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: kthread.h,v 1.3 2021/12/19 12:23:07 riastradh Exp $	*/
+/*	$NetBSD: kthread.h,v 1.4 2021/12/19 12:42:25 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -32,6 +32,10 @@
 #ifndef _LINUX_KTHREAD_H_
 #define _LINUX_KTHREAD_H_
 
+#include <linux/spinlock.h>
+
+#include <drm/drm_wait_netbsd.h>
+
 struct task_struct;
 
 #define	__kthread_should_park	linux___kthread_should_park
@@ -43,7 +47,8 @@ struct task_struct;
 #define	kthread_stop		linux_kthread_stop
 #define	kthread_unpark		linux_kthread_unpark
 
-struct task_struct *kthread_run(int (*)(void *), void *, const char *);
+struct task_struct *kthread_run(int (*)(void *), void *, const char *,
+    spinlock_t *, drm_waitqueue_t *);
 
 int kthread_stop(struct task_struct *);
 int kthread_should_stop(void);

Index: src/sys/external/bsd/drm2/linux/linux_kthread.c
diff -u src/sys/external/bsd/drm2/linux/linux_kthread.c:1.5 src/sys/external/bsd/drm2/linux/linux_kthread.c:1.6
--- src/sys/external/bsd/drm2/linux/linux_kthread.c:1.5	Sun Dec 19 12:42:14 2021
+++ src/sys/external/bsd/drm2/linux/linux_kthread.c	Sun Dec 19 12:42:25 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: linux_kthread.c,v 1.5 2021/12/19 12:42:14 riastradh Exp $	*/
+/*	$NetBSD: linux_kthread.c,v 1.6 2021/12/19 12:42:25 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2021 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_kthread.c,v 1.5 2021/12/19 12:42:14 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_kthread.c,v 1.6 2021/12/19 12:42:25 riastradh Exp $");
 
 #include <sys/types.h>
 
@@ -42,6 +42,9 @@ __KERNEL_RCSID(0, "$NetBSD: linux_kthrea
 #include <sys/specificdata.h>
 
 #include <linux/kthread.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_wait_netbsd.h>
 
 struct task_struct {
 	kmutex_t	kt_lock;
@@ -52,6 +55,8 @@ struct task_struct {
 
 	int		(*kt_func)(void *);
 	void		*kt_cookie;
+	spinlock_t	*kt_interlock;
+	drm_waitqueue_t	*kt_wq;
 	struct lwp	*kt_lwp;
 };
 
@@ -109,7 +114,8 @@ linux_kthread_start(void *cookie)
 }
 
 static struct task_struct *
-kthread_alloc(int (*func)(void *), void *cookie)
+kthread_alloc(int (*func)(void *), void *cookie, spinlock_t *interlock,
+    drm_waitqueue_t *wq)
 {
 	struct task_struct *T;
 
@@ -120,6 +126,8 @@ kthread_alloc(int (*func)(void *), void 
 
 	T->kt_func = func;
 	T->kt_cookie = cookie;
+	T->kt_interlock = interlock;
+	T->kt_wq = wq;
 
 	return T;
 }
@@ -134,12 +142,13 @@ kthread_free(struct task_struct *T)
 }
 
 struct task_struct *
-kthread_run(int (*func)(void *), void *cookie, const char *name)
+kthread_run(int (*func)(void *), void *cookie, const char *name,
+    spinlock_t *interlock, drm_waitqueue_t *wq)
 {
 	struct task_struct *T;
 	int error;
 
-	T = kthread_alloc(func, cookie);
+	T = kthread_alloc(func, cookie, interlock, wq);
 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_MUSTJOIN, NULL,
 	    linux_kthread_start, T, &T->kt_lwp, "%s", name);
 	if (error) {
@@ -150,59 +159,35 @@ kthread_run(int (*func)(void *), void *c
 	return T;
 }
 
-/*
- * lwp_kick(l)
- *
- *	Cause l to wake up if it is asleep, no matter what condvar or
- *	other wchan it's asleep on.  This logic is like sleepq_timeout,
- *	but without setting LW_STIMO.  This is not a general-purpose
- *	mechanism -- don't go around using this instead of condvars.
- */
-static void
-lwp_kick(struct lwp *l)
-{
-
-	lwp_lock(l);
-	if (l->l_wchan == NULL) {
-		/* Not sleeping, so no need to wake up.  */
-		lwp_unlock(l);
-	} else {
-		/*
-		 * Sleeping, so wake it up.  lwp_unsleep has the side
-		 * effect of unlocking l when we pass unlock=true.
-		 */
-		lwp_unsleep(l, /*unlock*/true);
-	}
-}
-
 int
 kthread_stop(struct task_struct *T)
 {
-	struct lwp *l;
 	int ret;
 
+	/* Lock order: interlock, then kthread lock.  */
+	spin_lock(T->kt_interlock);
+	mutex_enter(&T->kt_lock);
+
 	/*
 	 * Notify the thread that it's stopping, and wake it if it's
-	 * parked.
+	 * parked or sleeping on its own waitqueue.
 	 */
-	mutex_enter(&T->kt_lock);
 	T->kt_shouldpark = false;
 	T->kt_shouldstop = true;
 	cv_broadcast(&T->kt_cv);
+	DRM_SPIN_WAKEUP_ALL(T->kt_wq, T->kt_interlock);
+
+	/* Release the locks.  */
 	mutex_exit(&T->kt_lock);
+	spin_unlock(T->kt_interlock);
 
-	/*
-	 * Kick the lwp in case it's waiting on anything else, and then
-	 * wait for it to complete.  It is the thread's obligation to
-	 * check kthread_shouldstop before sleeping again.
-	 */
-	l = T->kt_lwp;
-	KASSERT(l != curlwp);
-	lwp_kick(l);
-	ret = kthread_join(l);
+	/* Wait for the (NetBSD) kthread to exit.  */
+	ret = kthread_join(T->kt_lwp);
 
+	/* Free the (Linux) kthread.  */
 	kthread_free(T);
 
+	/* Return what the thread returned.  */
 	return ret;
 }
 
@@ -222,8 +207,9 @@ kthread_should_stop(void)
 void
 kthread_park(struct task_struct *T)
 {
-	struct lwp *l;
 
+	/* Lock order: interlock, then kthread lock.  */
+	spin_lock(T->kt_interlock);
 	mutex_enter(&T->kt_lock);
 
 	/* Caller must not ask to park if they've already asked to stop.  */
@@ -232,22 +218,26 @@ kthread_park(struct task_struct *T)
 	/* Ask the thread to park.  */
 	T->kt_shouldpark = true;
 
-	/* Don't wait for ourselves -- Linux allows this semantics.  */
-	if ((l = T->kt_lwp) == curlwp)
-		goto out;
-
 	/*
-	 * If the thread is asleep for any reason, give it a spurious
-	 * wakeup.  The thread is responsible for checking
-	 * kthread_shouldpark before sleeping.
+	 * Ensure the thread is not sleeping on its condvar.  After
+	 * this point, we are done with the interlock, which we must
+	 * not hold while we wait on the kthread condvar.
 	 */
-	lwp_kick(l);
+	DRM_SPIN_WAKEUP_ALL(T->kt_wq, T->kt_interlock);
+	spin_unlock(T->kt_interlock);
 
-	/* Wait until the thread has issued kthread_parkme.  */
-	while (!T->kt_parked)
-		cv_wait(&T->kt_cv, &T->kt_lock);
+	/*
+	 * Wait until the thread has issued kthread_parkme, unless we
+	 * are already the thread, which Linux allows and interprets to
+	 * mean don't wait.
+	 */
+	if (T->kt_lwp != curlwp) {
+		while (!T->kt_parked)
+			cv_wait(&T->kt_cv, &T->kt_lock);
+	}
 
-out:	mutex_exit(&T->kt_lock);
+	/* Release the kthread lock too.  */
+	mutex_exit(&T->kt_lock);
 }
 
 void

Reply via email to