Module Name:    src
Committed By:   riastradh
Date:           Wed Feb 12 16:01:00 UTC 2020

Modified Files:
        src/sys/arch/mips/adm5120/dev: ahci.c
        src/sys/dev/usb: ehci.c motg.c ohci.c uhci.c usbdi.c usbdi.h usbdivar.h
            xhci.c
        src/sys/external/bsd/dwc2: dwc2.c dwc2var.h

Log Message:
Factor out HCI-independent xfer completion logic.

New API for HCI drivers to synchronize hardware completion
interrupts, synchronous aborts, and asynchronous timeouts:

- When submitting an xfer to hardware, call
  usbd_xfer_schedule_timeout(xfer).

- On HCI completion interrupt for xfer completion:

        if (!usbd_xfer_trycomplete(xfer))
                return;         /* timed out or aborted, ignore it */

- In upm_abort methods, call usbd_xfer_abort(xfer).

For HCI drivers that use this API (not needed in drivers that don't,
or for xfers like root intr xfers that don't use it):

- New ubm_abortx method serves role of former *hci_abort_xfer, but
  without any logic for wrangling timeouts/callouts/tasks -- caller
  in usbd_xfer_abort has already handled them.

- New ubm_dying method, returns true if the device is in the process
  of detaching, used by the timeout logic.

Converted and tested:
- ehci
- ohci

Converted and compile-tested:
- ahci (XXX did this ever work?)
- dwc2
- motg (XXX missing usbd_xfer_schedule_timeout in motg_*_start?)
- uhci
- xhci

Not changed:

- slhci (sys/dev/ic/sl811hs.c) -- doesn't use a separate per-xfer
  callout for timeouts (XXX but maybe should?)

- ugenhc (sys/rump/dev/lib/libugenhc/ugenhc.c) -- doesn't manage its
  own transfer timeouts

- vhci -- times transfers out only on detach; could be adapted easily
  if we wanted to use the xfer->ux_callout


To generate a diff of this commit:
cvs rdiff -u -r1.17 -r1.18 src/sys/arch/mips/adm5120/dev/ahci.c
cvs rdiff -u -r1.270 -r1.271 src/sys/dev/usb/ehci.c
cvs rdiff -u -r1.25 -r1.26 src/sys/dev/usb/motg.c
cvs rdiff -u -r1.292 -r1.293 src/sys/dev/usb/ohci.c
cvs rdiff -u -r1.288 -r1.289 src/sys/dev/usb/uhci.c
cvs rdiff -u -r1.191 -r1.192 src/sys/dev/usb/usbdi.c
cvs rdiff -u -r1.100 -r1.101 src/sys/dev/usb/usbdi.h
cvs rdiff -u -r1.121 -r1.122 src/sys/dev/usb/usbdivar.h
cvs rdiff -u -r1.115 -r1.116 src/sys/dev/usb/xhci.c
cvs rdiff -u -r1.66 -r1.67 src/sys/external/bsd/dwc2/dwc2.c
cvs rdiff -u -r1.6 -r1.7 src/sys/external/bsd/dwc2/dwc2var.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/adm5120/dev/ahci.c
diff -u src/sys/arch/mips/adm5120/dev/ahci.c:1.17 src/sys/arch/mips/adm5120/dev/ahci.c:1.18
--- src/sys/arch/mips/adm5120/dev/ahci.c:1.17	Sun Feb 17 04:17:52 2019
+++ src/sys/arch/mips/adm5120/dev/ahci.c	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: ahci.c,v 1.17 2019/02/17 04:17:52 rin Exp $	*/
+/*	$NetBSD: ahci.c,v 1.18 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
@@ -64,7 +64,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ahci.c,v 1.17 2019/02/17 04:17:52 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ahci.c,v 1.18 2020/02/12 16:01:00 riastradh Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -98,6 +98,7 @@ static void		ahci_poll_device(void *arg)
 static struct usbd_xfer *
 			ahci_allocx(struct usbd_bus *, unsigned int);
 static void		ahci_freex(struct usbd_bus *, struct usbd_xfer *);
+static void		ahci_abortx(struct usbd_xfer *);
 
 static void		ahci_get_lock(struct usbd_bus *, kmutex_t **);
 static int		ahci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
@@ -136,7 +137,6 @@ static void		ahci_device_bulk_done(struc
 static int		ahci_transaction(struct ahci_softc *,
 	struct usbd_pipe *, uint8_t, int, u_char *, uint8_t);
 static void		ahci_noop(struct usbd_pipe *);
-static void		ahci_abort_xfer(struct usbd_xfer *, usbd_status);
 static void		ahci_device_clear_toggle(struct usbd_pipe *);
 
 extern int usbdebug;
@@ -169,6 +169,7 @@ struct usbd_bus_methods ahci_bus_methods
 	.ubm_dopoll = ahci_poll,
 	.ubm_allocx = ahci_allocx,
 	.ubm_freex = ahci_freex,
+	.ubm_abortx = ahci_abortx,
 	.ubm_getlock = ahci_get_lock,
 	.ubm_rhctrl = ahci_roothub_ctrl,
 };
@@ -919,7 +920,7 @@ static void
 ahci_device_ctrl_abort(struct usbd_xfer *xfer)
 {
 	DPRINTF(D_TRACE, ("Cab "));
-	ahci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 static void
@@ -1031,7 +1032,7 @@ ahci_device_intr_abort(struct usbd_xfer 
 	} else {
 		printf("%s: sx == NULL!\n", __func__);
 	}
-	ahci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 static void
@@ -1247,7 +1248,7 @@ static void
 ahci_device_bulk_abort(struct usbd_xfer *xfer)
 {
 	DPRINTF(D_TRACE, ("Bab "));
-	ahci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 static void
@@ -1377,11 +1378,15 @@ ahci_transaction(struct ahci_softc *sc, 
 #endif
 }
 
-void
-ahci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
+static void
+ahci_abortx(struct usbd_xfer *xfer)
 {
-	xfer->ux_status = status;
-	usb_transfer_complete(xfer);
+	/*
+	 * XXX This is totally busted; there's no way it can possibly
+	 * work!  All transfers are busy-waited, it seems, so there is
+	 * no opportunity to abort.
+	 */
+	KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
 }
 
 void

Index: src/sys/dev/usb/ehci.c
diff -u src/sys/dev/usb/ehci.c:1.270 src/sys/dev/usb/ehci.c:1.271
--- src/sys/dev/usb/ehci.c:1.270	Wed Feb 12 16:00:34 2020
+++ src/sys/dev/usb/ehci.c	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: ehci.c,v 1.270 2020/02/12 16:00:34 riastradh Exp $ */
+/*	$NetBSD: ehci.c,v 1.271 2020/02/12 16:01:00 riastradh Exp $ */
 
 /*
  * Copyright (c) 2004-2012 The NetBSD Foundation, Inc.
@@ -53,7 +53,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ehci.c,v 1.270 2020/02/12 16:00:34 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ehci.c,v 1.271 2020/02/12 16:01:00 riastradh Exp $");
 
 #include "ohci.h"
 #include "uhci.h"
@@ -166,11 +166,6 @@ Static void		ehci_check_itd_intr(ehci_so
 Static void		ehci_check_sitd_intr(ehci_softc_t *, struct ehci_xfer *,
 			    ex_completeq_t *);
 Static void		ehci_idone(struct ehci_xfer *, ex_completeq_t *);
-Static void		ehci_timeout(void *);
-Static void		ehci_timeout_task(void *);
-Static bool		ehci_probe_timeout(struct usbd_xfer *);
-Static void		ehci_schedule_timeout(struct usbd_xfer *);
-Static void		ehci_cancel_timeout_async(struct usbd_xfer *);
 Static void		ehci_intrlist_timeout(void *);
 Static void		ehci_doorbell(void *);
 Static void		ehci_pcd(void *);
@@ -180,6 +175,7 @@ Static struct usbd_xfer *
 Static void		ehci_freex(struct usbd_bus *, struct usbd_xfer *);
 
 Static void		ehci_get_lock(struct usbd_bus *, kmutex_t **);
+Static bool		ehci_dying(struct usbd_bus *);
 Static int		ehci_roothub_ctrl(struct usbd_bus *,
 			    usb_device_request_t *, void *, int);
 
@@ -281,7 +277,7 @@ Static void		ehci_set_qh_qtd(ehci_soft_q
 Static void		ehci_sync_hc(ehci_softc_t *);
 
 Static void		ehci_close_pipe(struct usbd_pipe *, ehci_soft_qh_t *);
-Static void		ehci_abort_xfer(struct usbd_xfer *, usbd_status);
+Static void		ehci_abortx(struct usbd_xfer *);
 
 #ifdef EHCI_DEBUG
 Static ehci_softc_t 	*theehci;
@@ -322,6 +318,8 @@ Static const struct usbd_bus_methods ehc
 	.ubm_dopoll =	ehci_poll,
 	.ubm_allocx =	ehci_allocx,
 	.ubm_freex =	ehci_freex,
+	.ubm_abortx =	ehci_abortx,
+	.ubm_dying =	ehci_dying,
 	.ubm_getlock =	ehci_get_lock,
 	.ubm_rhctrl =	ehci_roothub_ctrl,
 };
@@ -1049,22 +1047,11 @@ ehci_idone(struct ehci_xfer *ex, ex_comp
 	DPRINTF("ex=%#jx", (uintptr_t)ex, 0, 0, 0);
 
 	/*
-	 * If software has completed it, either by cancellation
-	 * or timeout, drop it on the floor.
+	 * Try to claim this xfer for completion.  If it has already
+	 * completed or aborted, drop it on the floor.
 	 */
-	if (xfer->ux_status != USBD_IN_PROGRESS) {
-		KASSERT(xfer->ux_status == USBD_CANCELLED ||
-		    xfer->ux_status == USBD_TIMEOUT);
-		DPRINTF("aborted xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
+	if (!usbd_xfer_trycomplete(xfer))
 		return;
-	}
-
-	/*
-	 * We are completing the xfer.  Cancel the timeout if we can,
-	 * but only asynchronously.  See ehci_cancel_timeout_async for
-	 * why we need not wait for the callout or task here.
-	 */
-	ehci_cancel_timeout_async(xfer);
 
 #ifdef DIAGNOSTIC
 #ifdef EHCI_DEBUG
@@ -1539,9 +1526,6 @@ ehci_allocx(struct usbd_bus *bus, unsign
 	if (xfer != NULL) {
 		memset(xfer, 0, sizeof(struct ehci_xfer));
 
-		/* Initialise this always so we can call remove on it. */
-		usb_init_task(&xfer->ux_aborttask, ehci_timeout_task, xfer,
-		    USB_TASKQ_MPSAFE);
 #ifdef DIAGNOSTIC
 		struct ehci_xfer *ex = EHCI_XFER2EXFER(xfer);
 		ex->ex_isdone = true;
@@ -1569,6 +1553,14 @@ ehci_freex(struct usbd_bus *bus, struct 
 	pool_cache_put(sc->sc_xferpool, xfer);
 }
 
+Static bool
+ehci_dying(struct usbd_bus *bus)
+{
+	struct ehci_softc *sc = EHCI_BUS2SC(bus);
+
+	return sc->sc_dying;
+}
+
 Static void
 ehci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
 {
@@ -3201,22 +3193,12 @@ ehci_close_pipe(struct usbd_pipe *pipe, 
 }
 
 /*
- * Cancel or timeout a device request.  We have two cases to deal with
- *
- * 1) A driver wants to stop scheduled or inflight transfers
- * 2) A transfer has timed out
- *
- * have (partially) happened since the hardware runs concurrently.
- *
- * Transfer state is protected by the bus lock and we set the transfer status
- * as soon as either of the above happens (with bus lock held).
- *
- * Then we arrange for the hardware to tells us that it is not still
+ * Arrrange for the hardware to tells us that it is not still
  * processing the TDs by setting the QH halted bit and wait for the ehci
  * door bell
  */
 Static void
-ehci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
+ehci_abortx(struct usbd_xfer *xfer)
 {
 	EHCIHIST_FUNC(); EHCIHIST_CALLED();
 	struct ehci_pipe *epipe = EHCI_XFER2EPIPE(xfer);
@@ -3228,48 +3210,14 @@ ehci_abort_xfer(struct usbd_xfer *xfer, 
 	uint32_t qhstatus;
 	int hit;
 
-	KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT),
-	    "invalid status for abort: %d", (int)status);
-
 	DPRINTF("xfer=%#jx pipe=%#jx", (uintptr_t)xfer, (uintptr_t)epipe, 0, 0);
 
 	KASSERT(mutex_owned(&sc->sc_lock));
 	ASSERT_SLEEPABLE();
 
-	/*
-	 * Nobody else can set this status: only one caller can time
-	 * out, and only one caller can synchronously abort.  So the
-	 * status can't be the status we're trying to set this to.
-	 */
-	KASSERT(xfer->ux_status != status);
-
-	/*
-	 * If host controller or timer interrupt has completed it, too
-	 * late to abort.  Forget about it.
-	 */
-	if (xfer->ux_status != USBD_IN_PROGRESS)
-		return;
-
-	if (status == USBD_CANCELLED) {
-		/*
-		 * We are synchronously aborting.  Cancel the timeout
-		 * if we can, but only asynchronously.  See
-		 * ehci_cancel_timeout_async for why we need not wait
-		 * for the callout or task here.
-		 */
-		ehci_cancel_timeout_async(xfer);
-	} else {
-		/*
-		 * Otherwise, we are timing out.  No action needed to
-		 * cancel the timeout because we _are_ the timeout.
-		 * This case should happen only via the timeout task,
-		 * invoked via the callout.
-		 */
-		KASSERT(status == USBD_TIMEOUT);
-	}
-
-	/* We beat everyone else.  Claim the status.  */
-	xfer->ux_status = status;
+	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
+		xfer->ux_status == USBD_TIMEOUT),
+	    "bad abort status: %d", xfer->ux_status);
 
 	/*
 	 * If we're dying, skip the hardware action and just notify the
@@ -3477,291 +3425,6 @@ dying:
 	KASSERT(mutex_owned(&sc->sc_lock));
 }
 
-/*
- * ehci_timeout(xfer)
- *
- *	Called at IPL_SOFTCLOCK when too much time has elapsed waiting
- *	for xfer to complete.  Since we can't abort the xfer at
- *	IPL_SOFTCLOCK, defer to a usb_task to run it in thread context,
- *	unless the xfer has completed or aborted concurrently -- and if
- *	the xfer has also been resubmitted, take care of rescheduling
- *	the callout.
- */
-Static void
-ehci_timeout(void *addr)
-{
-	EHCIHIST_FUNC(); EHCIHIST_CALLED();
-	struct usbd_xfer *xfer = addr;
-	ehci_softc_t *sc = EHCI_XFER2SC(xfer);
-	struct usbd_device *dev = xfer->ux_pipe->up_dev;
-
-	DPRINTF("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
-#ifdef EHCI_DEBUG
-	if (ehcidebug >= 2) {
-		struct usbd_pipe *pipe = xfer->ux_pipe;
-		usbd_dump_pipe(pipe);
-	}
-#endif
-
-	/* Acquire the lock so we can transition the timeout state.  */
-	mutex_enter(&sc->sc_lock);
-
-	/*
-	 * Use ehci_probe_timeout to check whether the timeout is still
-	 * valid, or to reschedule the callout if necessary.  If it is
-	 * still valid, schedule the task.
-	 */
-	if (ehci_probe_timeout(xfer))
-		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
-
-	/*
-	 * Notify ehci_cancel_timeout_async that we may have scheduled
-	 * the task.  This causes callout_invoking to return false in
-	 * ehci_cancel_timeout_async so that it can tell which stage in
-	 * the callout->task->abort process we're at.
-	 */
-	callout_ack(&xfer->ux_callout);
-
-	/* All done -- release the lock.  */
-	mutex_exit(&sc->sc_lock);
-}
-
-/*
- * ehci_timeout_task(xfer)
- *
- *	Called in thread context when too much time has elapsed waiting
- *	for xfer to complete.  Issue ehci_abort_xfer(USBD_TIMEOUT),
- *	unless the xfer has completed or aborted concurrently -- and if
- *	the xfer has also been resubmitted, take care of rescheduling
- *	the callout.
- */
-Static void
-ehci_timeout_task(void *addr)
-{
-	struct usbd_xfer *xfer = addr;
-	ehci_softc_t *sc = EHCI_XFER2SC(xfer);
-
-	EHCIHIST_FUNC(); EHCIHIST_CALLED();
-
-	DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
-
-	/* Acquire the lock so we can transition the timeout state.  */
-	mutex_enter(&sc->sc_lock);
-
-	/*
-	 * Use ehci_probe_timeout to check whether the timeout is still
-	 * valid, or to reschedule the callout if necessary.  If it is
-	 * still valid, schedule the task.
-	 */
-	if (ehci_probe_timeout(xfer))
-		ehci_abort_xfer(xfer, USBD_TIMEOUT);
-
-	/* All done -- release the lock.  */
-	mutex_exit(&sc->sc_lock);
-}
-
-/*
- * ehci_probe_timeout(xfer)
- *
- *	Probe the status of xfer's timeout.  Acknowledge and process a
- *	request to reschedule.  Return true if the timeout is still
- *	valid and the caller should take further action (queueing a
- *	task or aborting the xfer), false if it must stop here.
- */
-Static bool
-ehci_probe_timeout(struct usbd_xfer *xfer)
-{
-	EHCIHIST_FUNC(); EHCIHIST_CALLED();
-	struct ehci_softc *sc = EHCI_XFER2SC(xfer);
-	bool valid;
-
-	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
-
-	/* The timeout must be set.  */
-	KASSERT(xfer->ux_timeout_set);
-
-	/*
-	 * Neither callout nor task may be pending; they execute
-	 * alternately in lock step.
-	 */
-	KASSERT(!callout_pending(&xfer->ux_callout));
-	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
-
-	/* There are a few cases... */
-	if (sc->sc_dying) {
-		/* Host controller dying.  Drop it all on the floor.  */
-		xfer->ux_timeout_set = false;
-		xfer->ux_timeout_reset = false;
-		valid = false;
-	} else if (xfer->ux_timeout_reset) {
-		/*
-		 * The xfer completed _and_ got resubmitted while we
-		 * waited for the lock.  Acknowledge the request to
-		 * reschedule, and reschedule it if there is a timeout
-		 * and the bus is not polling.
-		 */
-		xfer->ux_timeout_reset = false;
-		if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) {
-			KASSERT(xfer->ux_timeout_set);
-			callout_schedule(&xfer->ux_callout,
-			    mstohz(xfer->ux_timeout));
-		} else {
-			/* No more callout or task scheduled.  */
-			xfer->ux_timeout_set = false;
-		}
-		valid = false;
-	} else if (xfer->ux_status != USBD_IN_PROGRESS) {
-		/*
-		 * The xfer has completed by hardware completion or by
-		 * software abort, and has not been resubmitted, so the
-		 * timeout must be unset, and is no longer valid for
-		 * the caller.
-		 */
-		xfer->ux_timeout_set = false;
-		valid = false;
-	} else {
-		/*
-		 * The xfer has not yet completed, so the timeout is
-		 * valid.
-		 */
-		valid = true;
-	}
-
-	/* Any reset must have been processed.  */
-	KASSERT(!xfer->ux_timeout_reset);
-
-	/*
-	 * Either we claim the timeout is set, or the callout is idle.
-	 * If the timeout is still set, we may be handing off to the
-	 * task instead, so this is an if but not an iff.
-	 */
-	KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout));
-
-	/*
-	 * The task must be idle now.
-	 *
-	 * - If the caller is the callout, _and_ the timeout is still
-	 *   valid, the caller will schedule it, but it hasn't been
-	 *   scheduled yet.  (If the timeout is not valid, the task
-	 *   should not be scheduled.)
-	 *
-	 * - If the caller is the task, it cannot be scheduled again
-	 *   until the callout runs again, which won't happen until we
-	 *   next release the lock.
-	 */
-	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
-
-	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
-
-	return valid;
-}
-
-/*
- * ehci_schedule_timeout(xfer)
- *
- *	Ensure that xfer has a timeout.  If the callout is already
- *	queued or the task is already running, request that they
- *	reschedule the callout.  If not, and if we're not polling,
- *	schedule the callout anew.
- */
-Static void
-ehci_schedule_timeout(struct usbd_xfer *xfer)
-{
-	EHCIHIST_FUNC(); EHCIHIST_CALLED();
-	ehci_softc_t *sc = EHCI_XFER2SC(xfer);
-
-	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
-
-	if (xfer->ux_timeout_set) {
-		/*
-		 * Callout or task has fired from a prior completed
-		 * xfer but has not yet noticed that the xfer is done.
-		 * Ask it to reschedule itself to ux_timeout.
-		 */
-		xfer->ux_timeout_reset = true;
-	} else if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) {
-		/* Callout is not scheduled.  Schedule it.  */
-		KASSERT(!callout_pending(&xfer->ux_callout));
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-		    ehci_timeout, xfer);
-		xfer->ux_timeout_set = true;
-	}
-
-	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
-}
-
-/*
- * ehci_cancel_timeout_async(xfer)
- *
- *	Cancel the callout and the task of xfer, which have not yet run
- *	to completion, but don't wait for the callout or task to finish
- *	running.
- *
- *	If they have already fired, at worst they are waiting for the
- *	bus lock.  They will see that the xfer is no longer in progress
- *	and give up, or they will see that the xfer has been
- *	resubmitted with a new timeout and reschedule the callout.
- *
- *	If a resubmitted request completed so fast that the callout
- *	didn't have time to process a timer reset, just cancel the
- *	timer reset.
- */
-Static void
-ehci_cancel_timeout_async(struct usbd_xfer *xfer)
-{
-	EHCIHIST_FUNC(); EHCIHIST_CALLED();
-	struct ehci_softc *sc = EHCI_XFER2SC(xfer);
-
-	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
-
-	KASSERT(xfer->ux_timeout_set);
-	xfer->ux_timeout_reset = false;
-	if (!callout_stop(&xfer->ux_callout)) {
-		/*
-		 * We stopped the callout before it ran.  The timeout
-		 * is no longer set.
-		 */
-		xfer->ux_timeout_set = false;
-	} else if (callout_invoking(&xfer->ux_callout)) {
-		/*
-		 * The callout has begun to run but it has not yet
-		 * acquired the lock and called callout_ack.  The task
-		 * cannot be queued yet, and the callout cannot have
-		 * been rescheduled yet.
-		 *
-		 * By the time the callout acquires the lock, we will
-		 * have transitioned from USBD_IN_PROGRESS to a
-		 * completed status, and possibly also resubmitted the
-		 * xfer and set xfer->ux_timeout_reset = true.  In both
-		 * cases, the callout will DTRT, so no further action
-		 * is needed here.
-		 */
-	} else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) {
-		/*
-		 * The callout had fired and scheduled the task, but we
-		 * stopped the task before it could run.  The timeout
-		 * is therefore no longer set -- the next resubmission
-		 * of the xfer must schedule a new timeout.
-		 *
-		 * The callout should not be be pending at this point:
-		 * it is scheduled only under the lock, and only when
-		 * xfer->ux_timeout_set is false, or by the callout or
-		 * task itself when xfer->ux_timeout_reset is true.
-		 */
-		xfer->ux_timeout_set = false;
-	}
-
-	/*
-	 * The callout cannot be scheduled and the task cannot be
-	 * queued at this point.  Either we cancelled them, or they are
-	 * already running and waiting for the bus lock.
-	 */
-	KASSERT(!callout_pending(&xfer->ux_callout));
-	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
-
-	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
-}
-
 /************************/
 
 Static int
@@ -4003,7 +3666,7 @@ ehci_device_ctrl_start(struct usbd_xfer 
 
 	/* Insert qTD in QH list - also does usb_syncmem(sqh) */
 	ehci_set_qh_qtd(sqh, setup);
-	ehci_schedule_timeout(xfer);
+	usbd_xfer_schedule_timeout(xfer);
 	ehci_add_intr_list(sc, exfer);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	if (!polling)
@@ -4054,7 +3717,7 @@ ehci_device_ctrl_abort(struct usbd_xfer 
 	EHCIHIST_FUNC(); EHCIHIST_CALLED();
 
 	DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
-	ehci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 /* Close a device control pipe. */
@@ -4201,7 +3864,7 @@ ehci_device_bulk_start(struct usbd_xfer 
 
 	/* also does usb_syncmem(sqh) */
 	ehci_set_qh_qtd(sqh, exfer->ex_sqtdstart);
-	ehci_schedule_timeout(xfer);
+	usbd_xfer_schedule_timeout(xfer);
 	ehci_add_intr_list(sc, exfer);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	if (!polling)
@@ -4232,7 +3895,7 @@ ehci_device_bulk_abort(struct usbd_xfer 
 	EHCIHIST_FUNC(); EHCIHIST_CALLED();
 
 	DPRINTF("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
-	ehci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 /*
@@ -4417,7 +4080,7 @@ ehci_device_intr_start(struct usbd_xfer 
 
 	/* also does usb_syncmem(sqh) */
 	ehci_set_qh_qtd(sqh, exfer->ex_sqtdstart);
-	ehci_schedule_timeout(xfer);
+	usbd_xfer_schedule_timeout(xfer);
 	ehci_add_intr_list(sc, exfer);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	if (!polling)
@@ -4451,7 +4114,7 @@ ehci_device_intr_abort(struct usbd_xfer 
 	 *       async doorbell. That's dependent on the async list, wheras
 	 *       intr xfers are periodic, should not use this?
 	 */
-	ehci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 Static void

Index: src/sys/dev/usb/motg.c
diff -u src/sys/dev/usb/motg.c:1.25 src/sys/dev/usb/motg.c:1.26
--- src/sys/dev/usb/motg.c:1.25	Sun Feb 17 04:17:52 2019
+++ src/sys/dev/usb/motg.c	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: motg.c,v 1.25 2019/02/17 04:17:52 rin Exp $	*/
+/*	$NetBSD: motg.c,v 1.26 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1998, 2004, 2011, 2012, 2014 The NetBSD Foundation, Inc.
@@ -40,7 +40,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: motg.c,v 1.25 2019/02/17 04:17:52 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: motg.c,v 1.26 2020/02/12 16:01:00 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_usb.h"
@@ -144,6 +144,7 @@ static void		motg_softintr(void *);
 static struct usbd_xfer *
 			motg_allocx(struct usbd_bus *, unsigned int);
 static void		motg_freex(struct usbd_bus *, struct usbd_xfer *);
+static bool		motg_dying(struct usbd_bus *);
 static void		motg_get_lock(struct usbd_bus *, kmutex_t **);
 static int		motg_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
 			    void *, int);
@@ -174,7 +175,7 @@ static void		motg_device_data_read(struc
 static void		motg_device_data_write(struct usbd_xfer *);
 
 static void		motg_device_clear_toggle(struct usbd_pipe *);
-static void		motg_device_xfer_abort(struct usbd_xfer *);
+static void		motg_abortx(struct usbd_xfer *);
 
 #define UBARR(sc) bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, 0, (sc)->sc_size, \
 			BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE)
@@ -233,6 +234,8 @@ const struct usbd_bus_methods motg_bus_m
 	.ubm_dopoll =	motg_poll,
 	.ubm_allocx =	motg_allocx,
 	.ubm_freex =	motg_freex,
+	.ubm_abortx =	motg_abortx,
+	.ubm_dying =	motg_dying,
 	.ubm_getlock =	motg_get_lock,
 	.ubm_rhctrl =	motg_roothub_ctrl,
 };
@@ -770,6 +773,14 @@ motg_freex(struct usbd_bus *bus, struct 
 	pool_cache_put(sc->sc_xferpool, xfer);
 }
 
+static bool
+motg_dying(struct usbd_bus *bus)
+{
+	struct motg_softc *sc = MOTG_BUS2SC(bus);
+
+	return sc->sc_dying;
+}
+
 static void
 motg_get_lock(struct usbd_bus *bus, kmutex_t **lock)
 {
@@ -1387,6 +1398,13 @@ motg_device_ctrl_intr_rx(struct motg_sof
 
 	KASSERT(mutex_owned(&sc->sc_lock));
 
+	/*
+	 * Try to claim this xfer for completion.  If it has already
+	 * completed or aborted, drop it on the floor.
+	 */
+	if (!usbd_xfer_trycomplete(xfer))
+		return;
+
 	KASSERT(ep->phase == DATA_IN || ep->phase == STATUS_IN);
 	/* select endpoint 0 */
 	UWRITE1(sc, MUSB2_REG_EPINDEX, 0);
@@ -1501,6 +1519,14 @@ motg_device_ctrl_intr_tx(struct motg_sof
 	MOTGHIST_FUNC(); MOTGHIST_CALLED();
 
 	KASSERT(mutex_owned(&sc->sc_lock));
+
+	/*
+	 * Try to claim this xfer for completion.  If it has already
+	 * completed or aborted, drop it on the floor.
+	 */
+	if (!usbd_xfer_trycomplete(xfer))
+		return;
+
 	if (ep->phase == DATA_IN || ep->phase == STATUS_IN) {
 		motg_device_ctrl_intr_rx(sc);
 		return;
@@ -1633,7 +1659,7 @@ motg_device_ctrl_abort(struct usbd_xfer 
 {
 	MOTGHIST_FUNC(); MOTGHIST_CALLED();
 
-	motg_device_xfer_abort(xfer);
+	usbd_xfer_abort(xfer);
 }
 
 /* Close a device control pipe */
@@ -2019,6 +2045,14 @@ motg_device_intr_tx(struct motg_softc *s
 	MOTGHIST_FUNC(); MOTGHIST_CALLED();
 
 	KASSERT(mutex_owned(&sc->sc_lock));
+
+	/*
+	 * Try to claim this xfer for completion.  If it has already
+	 * completed or aborted, drop it on the floor.
+	 */
+	if (!usbd_xfer_trycomplete(xfer))
+		return;
+
 	KASSERT(ep->ep_number == epnumber);
 
 	DPRINTFN(MD_BULK, " on ep %jd", epnumber, 0, 0, 0);
@@ -2102,7 +2136,7 @@ motg_device_data_abort(struct usbd_xfer 
 
 	MOTGHIST_FUNC(); MOTGHIST_CALLED();
 
-	motg_device_xfer_abort(xfer);
+	usbd_xfer_abort(xfer);
 }
 
 /* Close a device control pipe */
@@ -2151,7 +2185,7 @@ motg_device_clear_toggle(struct usbd_pip
 
 /* Abort a device control request. */
 static void
-motg_device_xfer_abort(struct usbd_xfer *xfer)
+motg_abortx(struct usbd_xfer *xfer)
 {
 	MOTGHIST_FUNC(); MOTGHIST_CALLED();
 	uint8_t csr;
@@ -2162,30 +2196,6 @@ motg_device_xfer_abort(struct usbd_xfer 
 	ASSERT_SLEEPABLE();
 
 	/*
-	 * We are synchronously aborting.  Try to stop the
-	 * callout and task, but if we can't, wait for them to
-	 * complete.
-	 */
-	callout_halt(&xfer->ux_callout, &sc->sc_lock);
-	usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
-	    USB_TASKQ_HC, &sc->sc_lock);
-
-	/*
-	 * The xfer cannot have been cancelled already.  It is the
-	 * responsibility of the caller of usbd_abort_pipe not to try
-	 * to abort a pipe multiple times, whether concurrently or
-	 * sequentially.
-	 */
-	KASSERT(xfer->ux_status != USBD_CANCELLED);
-
-	/* If anyone else beat us, we're done.  */
-	if (xfer->ux_status != USBD_IN_PROGRESS)
-		return;
-
-	/* We beat everyone else.  Claim the status.  */
-	xfer->ux_status = USBD_CANCELLED;
-
-	/*
 	 * If we're dying, skip the hardware action and just notify the
 	 * software that we're done.
 	 */

Index: src/sys/dev/usb/ohci.c
diff -u src/sys/dev/usb/ohci.c:1.292 src/sys/dev/usb/ohci.c:1.293
--- src/sys/dev/usb/ohci.c:1.292	Fri Nov 29 14:15:41 2019
+++ src/sys/dev/usb/ohci.c	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: ohci.c,v 1.292 2019/11/29 14:15:41 gson Exp $	*/
+/*	$NetBSD: ohci.c,v 1.293 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1998, 2004, 2005, 2012 The NetBSD Foundation, Inc.
@@ -41,7 +41,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ohci.c,v 1.292 2019/11/29 14:15:41 gson Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ohci.c,v 1.293 2020/02/12 16:01:00 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_usb.h"
@@ -169,6 +169,7 @@ Static void		ohci_device_isoc_enter(stru
 Static struct usbd_xfer *
 			ohci_allocx(struct usbd_bus *, unsigned int);
 Static void		ohci_freex(struct usbd_bus *, struct usbd_xfer *);
+Static bool		ohci_dying(struct usbd_bus *);
 Static void		ohci_get_lock(struct usbd_bus *, kmutex_t **);
 Static int		ohci_roothub_ctrl(struct usbd_bus *,
 			    usb_device_request_t *, void *, int);
@@ -213,12 +214,10 @@ Static void		ohci_device_isoc_done(struc
 Static usbd_status	ohci_device_setintr(ohci_softc_t *,
 			    struct ohci_pipe *, int);
 
-Static void		ohci_timeout(void *);
-Static void		ohci_timeout_task(void *);
 Static void		ohci_rhsc_enable(void *);
 
 Static void		ohci_close_pipe(struct usbd_pipe *, ohci_soft_ed_t *);
-Static void		ohci_abort_xfer(struct usbd_xfer *, usbd_status);
+Static void		ohci_abortx(struct usbd_xfer *);
 
 Static void		ohci_device_clear_toggle(struct usbd_pipe *);
 Static void		ohci_noop(struct usbd_pipe *);
@@ -287,6 +286,8 @@ Static const struct usbd_bus_methods ohc
 	.ubm_dopoll =	ohci_poll,
 	.ubm_allocx =	ohci_allocx,
 	.ubm_freex =	ohci_freex,
+	.ubm_abortx =	ohci_abortx,
+	.ubm_dying =	ohci_dying,
 	.ubm_getlock =	ohci_get_lock,
 	.ubm_rhctrl =	ohci_roothub_ctrl,
 };
@@ -1068,9 +1069,6 @@ ohci_allocx(struct usbd_bus *bus, unsign
 	if (xfer != NULL) {
 		memset(xfer, 0, sizeof(struct ohci_xfer));
 
-		/* Initialise this always so we can call remove on it. */
-		usb_init_task(&xfer->ux_aborttask, ohci_timeout_task, xfer,
-		    USB_TASKQ_MPSAFE);
 #ifdef DIAGNOSTIC
 		xfer->ux_state = XFER_BUSY;
 #endif
@@ -1092,6 +1090,14 @@ ohci_freex(struct usbd_bus *bus, struct 
 	pool_cache_put(sc->sc_xferpool, xfer);
 }
 
+Static bool
+ohci_dying(struct usbd_bus *bus)
+{
+	ohci_softc_t *sc = OHCI_BUS2SC(bus);
+
+	return sc->sc_dying;
+}
+
 Static void
 ohci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
 {
@@ -1463,23 +1469,11 @@ ohci_softintr(void *v)
 		}
 
 		/*
-		 * If software has completed it, either by cancellation
-		 * or timeout, drop it on the floor.
+		 * Try to claim this xfer for completion.  If it has
+		 * already completed or aborted, drop it on the floor.
 		 */
-		if (xfer->ux_status != USBD_IN_PROGRESS) {
-			KASSERT(xfer->ux_status == USBD_CANCELLED ||
-			    xfer->ux_status == USBD_TIMEOUT);
+		if (!usbd_xfer_trycomplete(xfer))
 			continue;
-		}
-
-		/*
-		 * Cancel the timeout and the task, which have not yet
-		 * run.  If they have already fired, at worst they are
-		 * waiting for the lock.  They will see that the xfer
-		 * is no longer in progress and give up.
-		 */
-		callout_stop(&xfer->ux_callout);
-		usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask);
 
 		len = std->len;
 		if (std->td.td_cbp != 0)
@@ -1553,23 +1547,11 @@ ohci_softintr(void *v)
 			continue;
 
 		/*
-		 * If software has completed it, either by cancellation
-		 * or timeout, drop it on the floor.
+		 * Try to claim this xfer for completion.  If it has
+		 * already completed or aborted, drop it on the floor.
 		 */
-		if (xfer->ux_status != USBD_IN_PROGRESS) {
-			KASSERT(xfer->ux_status == USBD_CANCELLED ||
-			    xfer->ux_status == USBD_TIMEOUT);
+		if (!usbd_xfer_trycomplete(xfer))
 			continue;
-		}
-
-		/*
-		 * Cancel the timeout and the task, which have not yet
-		 * run.  If they have already fired, at worst they are
-		 * waiting for the lock.  They will see that the xfer
-		 * is no longer in progress and give up.
-		 */
-		callout_stop(&xfer->ux_callout);
-		usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask);
 
 		KASSERT(!sitd->isdone);
 #ifdef DIAGNOSTIC
@@ -1909,37 +1891,6 @@ ohci_hash_find_itd(ohci_softc_t *sc, ohc
 	return NULL;
 }
 
-void
-ohci_timeout(void *addr)
-{
-	OHCIHIST_FUNC(); OHCIHIST_CALLED();
-	struct usbd_xfer *xfer = addr;
-	ohci_softc_t *sc = OHCI_XFER2SC(xfer);
-	struct usbd_device *dev = xfer->ux_pipe->up_dev;
-
-	DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
-
-	mutex_enter(&sc->sc_lock);
-	if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS)
-		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
-	mutex_exit(&sc->sc_lock);
-}
-
-void
-ohci_timeout_task(void *addr)
-{
-	struct usbd_xfer *xfer = addr;
-	ohci_softc_t *sc = OHCI_XFER2SC(xfer);
-
-	OHCIHIST_FUNC(); OHCIHIST_CALLED();
-
-	DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
-
-	mutex_enter(&sc->sc_lock);
-	ohci_abort_xfer(xfer, USBD_TIMEOUT);
-	mutex_exit(&sc->sc_lock);
-}
-
 #ifdef OHCI_DEBUG
 void
 ohci_dump_tds(ohci_softc_t *sc, ohci_soft_td_t *std)
@@ -2212,19 +2163,8 @@ ohci_close_pipe(struct usbd_pipe *pipe, 
 }
 
 /*
- * Cancel or timeout a device request.  We have two cases to deal with
- *
- * 1) A driver wants to stop scheduled or inflight transfers
- * 2) A transfer has timed out
- *
- * It's impossible to guarantee that the requested transfer will not
- * have (partially) happened since the hardware runs concurrently.
- *
- * Transfer state is protected by the bus lock and we set the transfer status
- * as soon as either of the above happens (with bus lock held).
- *
- * Then we arrange for the hardware to tells us that it is not still
- * processing the TDs by setting the sKip bit and requesting a SOF interrupt
+ * Arrange for the hardware to tells us that it is not still processing
+ * the TDs by setting the sKip bit and requesting a SOF interrupt
  *
  * Once we see the SOF interrupt we can check the transfer TDs/iTDs to see if
  * they've been processed and either
@@ -2233,7 +2173,7 @@ ohci_close_pipe(struct usbd_pipe *pipe, 
  *         used.  The softint handler will free the old ones.
  */
 void
-ohci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
+ohci_abortx(struct usbd_xfer *xfer)
 {
 	OHCIHIST_FUNC(); OHCIHIST_CALLED();
 	struct ohci_pipe *opipe = OHCI_PIPE2OPIPE(xfer->ux_pipe);
@@ -2243,46 +2183,15 @@ ohci_abort_xfer(struct usbd_xfer *xfer, 
 	ohci_physaddr_t headp;
 	int hit;
 
-	KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT),
-	    "invalid status for abort: %d", (int)status);
-
 	DPRINTF("xfer=%#jx pipe=%#jx sed=%#jx", (uintptr_t)xfer,
 	    (uintptr_t)opipe, (uintptr_t)sed, 0);
 
 	KASSERT(mutex_owned(&sc->sc_lock));
 	ASSERT_SLEEPABLE();
 
-	if (status == USBD_CANCELLED) {
-		/*
-		 * We are synchronously aborting.  Try to stop the
-		 * callout and task, but if we can't, wait for them to
-		 * complete.
-		 */
-		callout_halt(&xfer->ux_callout, &sc->sc_lock);
-		usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
-		    USB_TASKQ_HC, &sc->sc_lock);
-	} else {
-		/* Otherwise, we are timing out.  */
-		KASSERT(status == USBD_TIMEOUT);
-	}
-
-	/*
-	 * The xfer cannot have been cancelled already.  It is the
-	 * responsibility of the caller of usbd_abort_pipe not to try
-	 * to abort a pipe multiple times, whether concurrently or
-	 * sequentially.
-	 */
-	KASSERT(xfer->ux_status != USBD_CANCELLED);
-
-	/* Only the timeout, which runs only once, can time it out.  */
-	KASSERT(xfer->ux_status != USBD_TIMEOUT);
-
-	/* If anyone else beat us, we're done.  */
-	if (xfer->ux_status != USBD_IN_PROGRESS)
-		return;
-
-	/* We beat everyone else.  Claim the status.  */
-	xfer->ux_status = status;
+	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
+		xfer->ux_status == USBD_TIMEOUT),
+	    "bad abort status: %d", xfer->ux_status);
 
 	/*
 	 * If we're dying, skip the hardware action and just notify the
@@ -2875,10 +2784,7 @@ ohci_device_ctrl_start(struct usbd_xfer 
 	    sizeof(sed->ed.ed_tailp),
 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 	OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF);
-	if (xfer->ux_timeout && !polling) {
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-			    ohci_timeout, xfer);
-	}
+	usbd_xfer_schedule_timeout(xfer);
 
 	DPRINTF("done", 0, 0, 0, 0);
 
@@ -2899,7 +2805,7 @@ ohci_device_ctrl_abort(struct usbd_xfer 
 
 	OHCIHIST_FUNC(); OHCIHIST_CALLED();
 	DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
-	ohci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 /* Close a device control pipe. */
@@ -3090,11 +2996,7 @@ ohci_device_bulk_start(struct usbd_xfer 
 	usb_syncmem(&sed->dma, sed->offs, sizeof(sed->ed),
 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 	OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF);
-	if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) {
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-			    ohci_timeout, xfer);
-	}
-
+	usbd_xfer_schedule_timeout(xfer);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	if (!polling)
 		mutex_exit(&sc->sc_lock);
@@ -3112,7 +3014,7 @@ ohci_device_bulk_abort(struct usbd_xfer 
 	KASSERT(mutex_owned(&sc->sc_lock));
 
 	DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
-	ohci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 /*
@@ -3300,7 +3202,7 @@ ohci_device_intr_abort(struct usbd_xfer 
 	KASSERT(mutex_owned(&sc->sc_lock));
 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
 
-	ohci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 /* Close a device interrupt pipe. */

Index: src/sys/dev/usb/uhci.c
diff -u src/sys/dev/usb/uhci.c:1.288 src/sys/dev/usb/uhci.c:1.289
--- src/sys/dev/usb/uhci.c:1.288	Sun Feb 17 04:17:52 2019
+++ src/sys/dev/usb/uhci.c	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uhci.c,v 1.288 2019/02/17 04:17:52 rin Exp $	*/
+/*	$NetBSD: uhci.c,v 1.289 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1998, 2004, 2011, 2012 The NetBSD Foundation, Inc.
@@ -42,7 +42,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.288 2019/02/17 04:17:52 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.289 2020/02/12 16:01:00 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_usb.h"
@@ -194,10 +194,8 @@ Static void		uhci_check_intr(uhci_softc_
 			    ux_completeq_t *);
 Static void		uhci_idone(struct uhci_xfer *, ux_completeq_t *);
 
-Static void		uhci_abort_xfer(struct usbd_xfer *, usbd_status);
+Static void		uhci_abortx(struct usbd_xfer *);
 
-Static void		uhci_timeout(void *);
-Static void		uhci_timeout_task(void *);
 Static void		uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
 Static void		uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *);
 Static void		uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *);
@@ -212,6 +210,7 @@ Static usbd_status	uhci_setup_isoc(struc
 Static struct usbd_xfer *
 			uhci_allocx(struct usbd_bus *, unsigned int);
 Static void		uhci_freex(struct usbd_bus *, struct usbd_xfer *);
+Static bool		uhci_dying(struct usbd_bus *);
 Static void		uhci_get_lock(struct usbd_bus *, kmutex_t **);
 Static int		uhci_roothub_ctrl(struct usbd_bus *,
 			    usb_device_request_t *, void *, int);
@@ -330,6 +329,8 @@ const struct usbd_bus_methods uhci_bus_m
 	.ubm_dopoll =	uhci_poll,
 	.ubm_allocx =	uhci_allocx,
 	.ubm_freex =	uhci_freex,
+	.ubm_abortx =	uhci_abortx,
+	.ubm_dying =	uhci_dying,
 	.ubm_getlock =	uhci_get_lock,
 	.ubm_rhctrl =	uhci_roothub_ctrl,
 };
@@ -657,9 +658,6 @@ uhci_allocx(struct usbd_bus *bus, unsign
 	if (xfer != NULL) {
 		memset(xfer, 0, sizeof(struct uhci_xfer));
 
-		/* Initialise this always so we can call remove on it. */
-		usb_init_task(&xfer->ux_aborttask, uhci_timeout_task, xfer,
-		    USB_TASKQ_MPSAFE);
 #ifdef DIAGNOSTIC
 		struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer);
 		uxfer->ux_isdone = true;
@@ -686,6 +684,14 @@ uhci_freex(struct usbd_bus *bus, struct 
 	pool_cache_put(sc->sc_xferpool, xfer);
 }
 
+Static bool
+uhci_dying(struct usbd_bus *bus)
+{
+	struct uhci_softc *sc = UHCI_BUS2SC(bus);
+
+	return sc->sc_dying;
+}
+
 Static void
 uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
 {
@@ -1565,24 +1571,11 @@ uhci_idone(struct uhci_xfer *ux, ux_comp
 	DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0);
 
 	/*
-	 * If software has completed it, either by cancellation
-	 * or timeout, drop it on the floor.
+	 * Try to claim this xfer for completion.  If it has already
+	 * completed or aborted, drop it on the floor.
 	 */
-	if (xfer->ux_status != USBD_IN_PROGRESS) {
-		KASSERT(xfer->ux_status == USBD_CANCELLED ||
-		    xfer->ux_status == USBD_TIMEOUT);
-		DPRINTF("aborted xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
+	if (!usbd_xfer_trycomplete(xfer))
 		return;
-	}
-
-	/*
-	 * Cancel the timeout and the task, which have not yet
-	 * run.  If they have already fired, at worst they are
-	 * waiting for the lock.  They will see that the xfer
-	 * is no longer in progress and give up.
-	 */
-	callout_stop(&xfer->ux_callout);
-	usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask);
 
 #ifdef DIAGNOSTIC
 #ifdef UHCI_DEBUG
@@ -1712,40 +1705,6 @@ uhci_idone(struct uhci_xfer *ux, ux_comp
 	DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0);
 }
 
-/*
- * Called when a request does not complete.
- */
-void
-uhci_timeout(void *addr)
-{
-	UHCIHIST_FUNC(); UHCIHIST_CALLED();
-	struct usbd_xfer *xfer = addr;
-	uhci_softc_t *sc = UHCI_XFER2SC(xfer);
-	struct usbd_device *dev = xfer->ux_pipe->up_dev;
-
-	DPRINTF("xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
-
-	mutex_enter(&sc->sc_lock);
-	if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS)
-		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
-	mutex_exit(&sc->sc_lock);
-}
-
-void
-uhci_timeout_task(void *addr)
-{
-	struct usbd_xfer *xfer = addr;
-	uhci_softc_t *sc = UHCI_XFER2SC(xfer);
-
-	UHCIHIST_FUNC(); UHCIHIST_CALLED();
-
-	DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
-
-	mutex_enter(&sc->sc_lock);
-	uhci_abort_xfer(xfer, USBD_TIMEOUT);
-	mutex_exit(&sc->sc_lock);
-}
-
 void
 uhci_poll(struct usbd_bus *bus)
 {
@@ -2314,11 +2273,7 @@ uhci_device_bulk_start(struct usbd_xfer 
 
 	uhci_add_bulk(sc, sqh);
 	uhci_add_intr_list(sc, ux);
-
-	if (xfer->ux_timeout && !polling) {
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-			    uhci_timeout, xfer);
-	}
+	usbd_xfer_schedule_timeout(xfer);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	if (!polling)
 		mutex_exit(&sc->sc_lock);
@@ -2336,25 +2291,14 @@ uhci_device_bulk_abort(struct usbd_xfer 
 
 	UHCIHIST_FUNC(); UHCIHIST_CALLED();
 
-	uhci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 /*
- * Cancel or timeout a device request.  We have two cases to deal with
- *
- * 1) A driver wants to stop scheduled or inflight transfers
- * 2) A transfer has timed out
- *
- * It's impossible to guarantee that the requested transfer will not
- * have (partially) happened since the hardware runs concurrently.
- *
- * Transfer state is protected by the bus lock and we set the transfer status
- * as soon as either of the above happens (with bus lock held).
- *
  * To allow the hardware time to notice we simply wait.
  */
-void
-uhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
+Static void
+uhci_abortx(struct usbd_xfer *xfer)
 {
 	UHCIHIST_FUNC(); UHCIHIST_CALLED();
 	struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer);
@@ -2362,45 +2306,14 @@ uhci_abort_xfer(struct usbd_xfer *xfer, 
 	uhci_softc_t *sc = UHCI_XFER2SC(xfer);
 	uhci_soft_td_t *std;
 
-	KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT),
-	    "invalid status for abort: %d", (int)status);
-
-	DPRINTFN(1,"xfer=%#jx, status=%jd", (uintptr_t)xfer, status, 0, 0);
+	DPRINTFN(1,"xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
 
 	KASSERT(mutex_owned(&sc->sc_lock));
 	ASSERT_SLEEPABLE();
 
-	if (status == USBD_CANCELLED) {
-		/*
-		 * We are synchronously aborting.  Try to stop the
-		 * callout and task, but if we can't, wait for them to
-		 * complete.
-		 */
-		callout_halt(&xfer->ux_callout, &sc->sc_lock);
-		usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
-		    USB_TASKQ_HC, &sc->sc_lock);
-	} else {
-		/* Otherwise, we are timing out.  */
-		KASSERT(status == USBD_TIMEOUT);
-	}
-
-	/*
-	 * The xfer cannot have been cancelled already.  It is the
-	 * responsibility of the caller of usbd_abort_pipe not to try
-	 * to abort a pipe multiple times, whether concurrently or
-	 * sequentially.
-	 */
-	KASSERT(xfer->ux_status != USBD_CANCELLED);
-
-	/* Only the timeout, which runs only once, can time it out.  */
-	KASSERT(xfer->ux_status != USBD_TIMEOUT);
-
-	/* If anyone else beat us, we're done.  */
-	if (xfer->ux_status != USBD_IN_PROGRESS)
-		return;
-
-	/* We beat everyone else.  Claim the status.  */
-	xfer->ux_status = status;
+	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
+		xfer->ux_status == USBD_TIMEOUT),
+	    "bad abort status: %d", xfer->ux_status);
 
 	/*
 	 * If we're dying, skip the hardware action and just notify the
@@ -2672,10 +2585,7 @@ uhci_device_ctrl_start(struct usbd_xfer 
 		DPRINTF("--- dump end ---", 0, 0, 0, 0);
 	}
 #endif
-	if (xfer->ux_timeout && !polling) {
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-			    uhci_timeout, xfer);
-	}
+	usbd_xfer_schedule_timeout(xfer);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	if (!polling)
 		mutex_exit(&sc->sc_lock);
@@ -2834,7 +2744,7 @@ uhci_device_ctrl_abort(struct usbd_xfer 
 	KASSERT(mutex_owned(&sc->sc_lock));
 
 	UHCIHIST_FUNC(); UHCIHIST_CALLED();
-	uhci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 /* Close a device control pipe. */
@@ -2862,7 +2772,7 @@ uhci_device_intr_abort(struct usbd_xfer 
 	UHCIHIST_FUNC(); UHCIHIST_CALLED();
 	DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0);
 
-	uhci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 /* Close a device interrupt pipe. */

Index: src/sys/dev/usb/usbdi.c
diff -u src/sys/dev/usb/usbdi.c:1.191 src/sys/dev/usb/usbdi.c:1.192
--- src/sys/dev/usb/usbdi.c:1.191	Wed Feb 12 16:00:17 2020
+++ src/sys/dev/usb/usbdi.c	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: usbdi.c,v 1.191 2020/02/12 16:00:17 riastradh Exp $	*/
+/*	$NetBSD: usbdi.c,v 1.192 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1998, 2012, 2015 The NetBSD Foundation, Inc.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.191 2020/02/12 16:00:17 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.192 2020/02/12 16:01:00 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_usb.h"
@@ -71,6 +71,10 @@ static void usbd_free_buffer(struct usbd
 static struct usbd_xfer *usbd_alloc_xfer(struct usbd_device *, unsigned int);
 static usbd_status usbd_free_xfer(struct usbd_xfer *);
 static void usbd_request_async_cb(struct usbd_xfer *, void *, usbd_status);
+static void usbd_xfer_timeout(void *);
+static void usbd_xfer_timeout_task(void *);
+static bool usbd_xfer_probe_timeout(struct usbd_xfer *);
+static void usbd_xfer_cancel_timeout_async(struct usbd_xfer *);
 
 #if defined(USB_DEBUG)
 void
@@ -474,7 +478,10 @@ usbd_alloc_xfer(struct usbd_device *dev,
 		goto out;
 	xfer->ux_bus = dev->ud_bus;
 	callout_init(&xfer->ux_callout, CALLOUT_MPSAFE);
+	callout_setfunc(&xfer->ux_callout, usbd_xfer_timeout, xfer);
 	cv_init(&xfer->ux_cv, "usbxfer");
+	usb_init_task(&xfer->ux_aborttask, usbd_xfer_timeout_task, xfer,
+	    USB_TASKQ_MPSAFE);
 
 out:
 	USBHIST_CALLARGS(usbdebug, "returns %#jx", (uintptr_t)xfer, 0, 0, 0);
@@ -1402,3 +1409,381 @@ usbd_get_string0(struct usbd_device *dev
 #endif
 	return USBD_NORMAL_COMPLETION;
 }
+
+/*
+ * usbd_xfer_trycomplete(xfer)
+ *
+ *	Try to claim xfer for completion.  Return true if successful,
+ *	false if the xfer has been synchronously aborted or has timed
+ *	out.
+ *
+ *	If this returns true, caller is responsible for setting
+ *	xfer->ux_status and calling usb_transfer_complete.  To be used
+ *	in a host controller interrupt handler.
+ *
+ *	Caller must either hold the bus lock or have the bus in polling
+ *	mode.
+ */
+bool
+usbd_xfer_trycomplete(struct usbd_xfer *xfer)
+{
+	struct usbd_bus *bus __diagused = xfer->ux_bus;
+
+	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
+
+	/*
+	 * If software has completed it, either by synchronous abort or
+	 * by timeout, too late.
+	 */
+	if (xfer->ux_status != USBD_IN_PROGRESS)
+		return false;
+
+	/*
+	 * We are completing the xfer.  Cancel the timeout if we can,
+	 * but only asynchronously.  See usbd_xfer_cancel_timeout_async
+	 * for why we need not wait for the callout or task here.
+	 */
+	usbd_xfer_cancel_timeout_async(xfer);
+
+	/* Success!  Note: Caller must set xfer->ux_status afterwar.  */
+	return true;
+}
+
+/*
+ * usbd_xfer_abort(xfer)
+ *
+ *	Try to claim xfer to abort.  If successful, mark it completed
+ *	with USBD_CANCELLED and call the bus-specific method to abort
+ *	at the hardware level.
+ *
+ *	To be called in thread context from struct
+ *	usbd_pipe_methods::upm_abort.
+ *
+ *	Caller must hold the bus lock.
+ */
+void
+usbd_xfer_abort(struct usbd_xfer *xfer)
+{
+	struct usbd_bus *bus = xfer->ux_bus;
+
+	KASSERT(mutex_owned(bus->ub_lock));
+
+	/*
+	 * If host controller interrupt or timer interrupt has
+	 * completed it, too late.  But the xfer cannot be
+	 * cancelled already -- only one caller can synchronously
+	 * abort.
+	 */
+	KASSERT(xfer->ux_status != USBD_CANCELLED);
+	if (xfer->ux_status != USBD_IN_PROGRESS)
+		return;
+
+	/*
+	 * Cancel the timeout if we can, but only asynchronously; see
+	 * usbd_xfer_cancel_timeout_async for why we need not wait for
+	 * the callout or task here.
+	 */
+	usbd_xfer_cancel_timeout_async(xfer);
+
+	/*
+	 * We beat everyone else.  Claim the status as cancelled and do
+	 * the bus-specific dance to abort the hardware.
+	 */
+	xfer->ux_status = USBD_CANCELLED;
+	bus->ub_methods->ubm_abortx(xfer);
+}
+
+/*
+ * usbd_xfer_timeout(xfer)
+ *
+ *	Called at IPL_SOFTCLOCK when too much time has elapsed waiting
+ *	for xfer to complete.  Since we can't abort the xfer at
+ *	IPL_SOFTCLOCK, defer to a usb_task to run it in thread context,
+ *	unless the xfer has completed or aborted concurrently -- and if
+ *	the xfer has also been resubmitted, take care of rescheduling
+ *	the callout.
+ */
+static void
+usbd_xfer_timeout(void *cookie)
+{
+	struct usbd_xfer *xfer = cookie;
+	struct usbd_bus *bus = xfer->ux_bus;
+	struct usbd_device *dev = xfer->ux_pipe->up_dev;
+
+	/* Acquire the lock so we can transition the timeout state.  */
+	mutex_enter(bus->ub_lock);
+
+	/*
+	 * Use usbd_xfer_probe_timeout to check whether the timeout is
+	 * still valid, or to reschedule the callout if necessary.  If
+	 * it is still valid, schedule the task.
+	 */
+	if (usbd_xfer_probe_timeout(xfer))
+		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
+
+	/*
+	 * Notify usbd_xfer_cancel_timeout_async that we may have
+	 * scheduled the task.  This causes callout_invoking to return
+	 * false in usbd_xfer_cancel_timeout_async so that it can tell
+	 * which stage in the callout->task->abort process we're at.
+	 */
+	callout_ack(&xfer->ux_callout);
+
+	/* All done -- release the lock.  */
+	mutex_exit(bus->ub_lock);
+}
+
+/*
+ * usbd_xfer_timeout_task(xfer)
+ *
+ *	Called in thread context when too much time has elapsed waiting
+ *	for xfer to complete.  Abort the xfer with USBD_TIMEOUT, unless
+ *	it has completed or aborted concurrently -- and if the xfer has
+ *	also been resubmitted, take care of rescheduling the callout.
+ */
+static void
+usbd_xfer_timeout_task(void *cookie)
+{
+	struct usbd_xfer *xfer = cookie;
+	struct usbd_bus *bus = xfer->ux_bus;
+
+	/* Acquire the lock so we can transition the timeout state.  */
+	mutex_enter(bus->ub_lock);
+
+	/*
+	 * Use usbd_xfer_probe_timeout to check whether the timeout is
+	 * still valid, or to reschedule the callout if necessary.  If
+	 * it is not valid -- the timeout has been asynchronously
+	 * cancelled, or the xfer has already been resubmitted -- then
+	 * we're done here.
+	 */
+	if (!usbd_xfer_probe_timeout(xfer))
+		goto out;
+
+	/*
+	 * May have completed or been aborted, but we're the only one
+	 * who can time it out.  If it has completed or been aborted,
+	 * no need to timeout.
+	 */
+	KASSERT(xfer->ux_status != USBD_TIMEOUT);
+	if (xfer->ux_status != USBD_IN_PROGRESS)
+		goto out;
+
+	/*
+	 * We beat everyone else.  Claim the status as timed out and do
+	 * the bus-specific dance to abort the hardware.
+	 */
+	xfer->ux_status = USBD_TIMEOUT;
+	bus->ub_methods->ubm_abortx(xfer);
+
+out:	/* All done -- release the lock.  */
+	mutex_exit(bus->ub_lock);
+}
+
+/*
+ * usbd_xfer_probe_timeout(xfer)
+ *
+ *	Probe the status of xfer's timeout.  Acknowledge and process a
+ *	request to reschedule.  Return true if the timeout is still
+ *	valid and the caller should take further action (queueing a
+ *	task or aborting the xfer), false if it must stop here.
+ */
+static bool
+usbd_xfer_probe_timeout(struct usbd_xfer *xfer)
+{
+	struct usbd_bus *bus = xfer->ux_bus;
+	bool valid;
+
+	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
+
+	/* The timeout must be set.  */
+	KASSERT(xfer->ux_timeout_set);
+
+	/*
+	 * Neither callout nor task may be pending; they execute
+	 * alternately in lock step.
+	 */
+	KASSERT(!callout_pending(&xfer->ux_callout));
+	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
+
+	/* There are a few cases... */
+	if (bus->ub_methods->ubm_dying(bus)) {
+		/* Host controller dying.  Drop it all on the floor.  */
+		xfer->ux_timeout_set = false;
+		xfer->ux_timeout_reset = false;
+		valid = false;
+	} else if (xfer->ux_timeout_reset) {
+		/*
+		 * The xfer completed _and_ got resubmitted while we
+		 * waited for the lock.  Acknowledge the request to
+		 * reschedule, and reschedule it if there is a timeout
+		 * and the bus is not polling.
+		 */
+		xfer->ux_timeout_reset = false;
+		if (xfer->ux_timeout && !bus->ub_usepolling) {
+			KASSERT(xfer->ux_timeout_set);
+			callout_schedule(&xfer->ux_callout,
+			    mstohz(xfer->ux_timeout));
+		} else {
+			/* No more callout or task scheduled.  */
+			xfer->ux_timeout_set = false;
+		}
+		valid = false;
+	} else if (xfer->ux_status != USBD_IN_PROGRESS) {
+		/*
+		 * The xfer has completed by hardware completion or by
+		 * software abort, and has not been resubmitted, so the
+		 * timeout must be unset, and is no longer valid for
+		 * the caller.
+		 */
+		xfer->ux_timeout_set = false;
+		valid = false;
+	} else {
+		/*
+		 * The xfer has not yet completed, so the timeout is
+		 * valid.
+		 */
+		valid = true;
+	}
+
+	/* Any reset must have been processed.  */
+	KASSERT(!xfer->ux_timeout_reset);
+
+	/*
+	 * Either we claim the timeout is set, or the callout is idle.
+	 * If the timeout is still set, we may be handing off to the
+	 * task instead, so this is an if but not an iff.
+	 */
+	KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout));
+
+	/*
+	 * The task must be idle now.
+	 *
+	 * - If the caller is the callout, _and_ the timeout is still
+	 *   valid, the caller will schedule it, but it hasn't been
+	 *   scheduled yet.  (If the timeout is not valid, the task
+	 *   should not be scheduled.)
+	 *
+	 * - If the caller is the task, it cannot be scheduled again
+	 *   until the callout runs again, which won't happen until we
+	 *   next release the lock.
+	 */
+	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
+
+	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
+
+	return valid;
+}
+
+/*
+ * usbd_xfer_schedule_timeout(xfer)
+ *
+ *	Ensure that xfer has a timeout.  If the callout is already
+ *	queued or the task is already running, request that they
+ *	reschedule the callout.  If not, and if we're not polling,
+ *	schedule the callout anew.
+ *
+ *	To be called in thread context from struct
+ *	usbd_pipe_methods::upm_start.
+ */
+void
+usbd_xfer_schedule_timeout(struct usbd_xfer *xfer)
+{
+	struct usbd_bus *bus = xfer->ux_bus;
+
+	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
+
+	if (xfer->ux_timeout_set) {
+		/*
+		 * Callout or task has fired from a prior completed
+		 * xfer but has not yet noticed that the xfer is done.
+		 * Ask it to reschedule itself to ux_timeout.
+		 */
+		xfer->ux_timeout_reset = true;
+	} else if (xfer->ux_timeout && !bus->ub_usepolling) {
+		/* Callout is not scheduled.  Schedule it.  */
+		KASSERT(!callout_pending(&xfer->ux_callout));
+		callout_schedule(&xfer->ux_callout, mstohz(xfer->ux_timeout));
+		xfer->ux_timeout_set = true;
+	}
+
+	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
+}
+
+/*
+ * usbd_xfer_cancel_timeout_async(xfer)
+ *
+ *	Cancel the callout and the task of xfer, which have not yet run
+ *	to completion, but don't wait for the callout or task to finish
+ *	running.
+ *
+ *	If they have already fired, at worst they are waiting for the
+ *	bus lock.  They will see that the xfer is no longer in progress
+ *	and give up, or they will see that the xfer has been
+ *	resubmitted with a new timeout and reschedule the callout.
+ *
+ *	If a resubmitted request completed so fast that the callout
+ *	didn't have time to process a timer reset, just cancel the
+ *	timer reset.
+ */
+static void
+usbd_xfer_cancel_timeout_async(struct usbd_xfer *xfer)
+{
+	struct usbd_bus *bus __diagused = xfer->ux_bus;
+
+	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
+
+	/*
+	 * If the timer wasn't running anyway, forget about it.  This
+	 * can happen if we are completing an isochronous transfer
+	 * which doesn't use the same timeout logic.
+	 */
+	if (!xfer->ux_timeout_set)
+		return;
+
+	xfer->ux_timeout_reset = false;
+	if (!callout_stop(&xfer->ux_callout)) {
+		/*
+		 * We stopped the callout before it ran.  The timeout
+		 * is no longer set.
+		 */
+		xfer->ux_timeout_set = false;
+	} else if (callout_invoking(&xfer->ux_callout)) {
+		/*
+		 * The callout has begun to run but it has not yet
+		 * acquired the lock and called callout_ack.  The task
+		 * cannot be queued yet, and the callout cannot have
+		 * been rescheduled yet.
+		 *
+		 * By the time the callout acquires the lock, we will
+		 * have transitioned from USBD_IN_PROGRESS to a
+		 * completed status, and possibly also resubmitted the
+		 * xfer and set xfer->ux_timeout_reset = true.  In both
+		 * cases, the callout will DTRT, so no further action
+		 * is needed here.
+		 */
+	} else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) {
+		/*
+		 * The callout had fired and scheduled the task, but we
+		 * stopped the task before it could run.  The timeout
+		 * is therefore no longer set -- the next resubmission
+		 * of the xfer must schedule a new timeout.
+		 *
+		 * The callout should not be be pending at this point:
+		 * it is scheduled only under the lock, and only when
+		 * xfer->ux_timeout_set is false, or by the callout or
+		 * task itself when xfer->ux_timeout_reset is true.
+		 */
+		xfer->ux_timeout_set = false;
+	}
+
+	/*
+	 * The callout cannot be scheduled and the task cannot be
+	 * queued at this point.  Either we cancelled them, or they are
+	 * already running and waiting for the bus lock.
+	 */
+	KASSERT(!callout_pending(&xfer->ux_callout));
+	KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask));
+
+	KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock));
+}

Index: src/sys/dev/usb/usbdi.h
diff -u src/sys/dev/usb/usbdi.h:1.100 src/sys/dev/usb/usbdi.h:1.101
--- src/sys/dev/usb/usbdi.h:1.100	Wed Feb 12 15:59:44 2020
+++ src/sys/dev/usb/usbdi.h	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: usbdi.h,v 1.100 2020/02/12 15:59:44 riastradh Exp $	*/
+/*	$NetBSD: usbdi.h,v 1.101 2020/02/12 16:01:00 riastradh Exp $	*/
 /*	$FreeBSD: src/sys/dev/usb/usbdi.h,v 1.18 1999/11/17 22:33:49 n_hibma Exp $	*/
 
 /*
@@ -188,6 +188,11 @@ int usbd_ratecheck(struct timeval *);
 usbd_status usbd_get_string(struct usbd_device *, int, char *);
 usbd_status usbd_get_string0(struct usbd_device *, int, char *, int);
 
+/* For use by HCI drivers, not USB device drivers */
+void usbd_xfer_schedule_timeout(struct usbd_xfer *);
+bool usbd_xfer_trycomplete(struct usbd_xfer *);
+void usbd_xfer_abort(struct usbd_xfer *);
+
 /* An iterator for descriptors. */
 typedef struct {
 	const uByte *cur;

Index: src/sys/dev/usb/usbdivar.h
diff -u src/sys/dev/usb/usbdivar.h:1.121 src/sys/dev/usb/usbdivar.h:1.122
--- src/sys/dev/usb/usbdivar.h:1.121	Wed Feb 12 16:00:17 2020
+++ src/sys/dev/usb/usbdivar.h	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: usbdivar.h,v 1.121 2020/02/12 16:00:17 riastradh Exp $	*/
+/*	$NetBSD: usbdivar.h,v 1.122 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*
  * Copyright (c) 1998, 2012 The NetBSD Foundation, Inc.
@@ -96,6 +96,8 @@ struct usbd_bus_methods {
 	void		      (*ubm_dopoll)(struct usbd_bus *);
 	struct usbd_xfer     *(*ubm_allocx)(struct usbd_bus *, unsigned int);
 	void		      (*ubm_freex)(struct usbd_bus *, struct usbd_xfer *);
+	void		      (*ubm_abortx)(struct usbd_xfer *);
+	bool		      (*ubm_dying)(struct usbd_bus *);
 	void		      (*ubm_getlock)(struct usbd_bus *, kmutex_t **);
 	usbd_status	      (*ubm_newdev)(device_t, struct usbd_bus *, int,
 					    int, int, struct usbd_port *);

Index: src/sys/dev/usb/xhci.c
diff -u src/sys/dev/usb/xhci.c:1.115 src/sys/dev/usb/xhci.c:1.116
--- src/sys/dev/usb/xhci.c:1.115	Sun Dec 29 09:17:51 2019
+++ src/sys/dev/usb/xhci.c	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: xhci.c,v 1.115 2019/12/29 09:17:51 skrll Exp $	*/
+/*	$NetBSD: xhci.c,v 1.116 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*
  * Copyright (c) 2013 Jonathan A. Kollasch
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.115 2019/12/29 09:17:51 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.116 2020/02/12 16:01:00 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_usb.h"
@@ -143,6 +143,8 @@ static void xhci_softintr(void *);
 static void xhci_poll(struct usbd_bus *);
 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
+static void xhci_abortx(struct usbd_xfer *);
+static bool xhci_dying(struct usbd_bus *);
 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
     struct usbd_port *);
@@ -208,15 +210,14 @@ static void xhci_device_bulk_abort(struc
 static void xhci_device_bulk_close(struct usbd_pipe *);
 static void xhci_device_bulk_done(struct usbd_xfer *);
 
-static void xhci_timeout(void *);
-static void xhci_timeout_task(void *);
-
 static const struct usbd_bus_methods xhci_bus_methods = {
 	.ubm_open = xhci_open,
 	.ubm_softint = xhci_softintr,
 	.ubm_dopoll = xhci_poll,
 	.ubm_allocx = xhci_allocx,
 	.ubm_freex = xhci_freex,
+	.ubm_abortx = xhci_abortx,
+	.ubm_dying = xhci_dying,
 	.ubm_getlock = xhci_get_lock,
 	.ubm_newdev = xhci_new_device,
 	.ubm_rhctrl = xhci_roothub_ctrl,
@@ -1720,53 +1721,22 @@ xhci_close_pipe(struct usbd_pipe *pipe)
  * Should be called with sc_lock held.
  */
 static void
-xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
+xhci_abortx(struct usbd_xfer *xfer)
 {
 	XHCIHIST_FUNC();
 	struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
 	struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
 	const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
 
-	KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT),
-	    "invalid status for abort: %d", (int)status);
-
-	XHCIHIST_CALLARGS("xfer %#jx pipe %#jx status %jd",
-	    (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, status, 0);
+	XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
+	    (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
 
 	KASSERT(mutex_owned(&sc->sc_lock));
 	ASSERT_SLEEPABLE();
 
-	if (status == USBD_CANCELLED) {
-		/*
-		 * We are synchronously aborting.  Try to stop the
-		 * callout and task, but if we can't, wait for them to
-		 * complete.
-		 */
-		callout_halt(&xfer->ux_callout, &sc->sc_lock);
-		usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
-		    USB_TASKQ_HC, &sc->sc_lock);
-	} else {
-		/* Otherwise, we are timing out.  */
-		KASSERT(status == USBD_TIMEOUT);
-	}
-
-	/*
-	 * The xfer cannot have been cancelled already.  It is the
-	 * responsibility of the caller of usbd_abort_pipe not to try
-	 * to abort a pipe multiple times, whether concurrently or
-	 * sequentially.
-	 */
-	KASSERT(xfer->ux_status != USBD_CANCELLED);
-
-	/* Only the timeout, which runs only once, can time it out.  */
-	KASSERT(xfer->ux_status != USBD_TIMEOUT);
-
-	/* If anyone else beat us, we're done.  */
-	if (xfer->ux_status != USBD_IN_PROGRESS)
-		return;
-
-	/* We beat everyone else.  Claim the status.  */
-	xfer->ux_status = status;
+	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
+		xfer->ux_status == USBD_TIMEOUT),
+	    "bad abort status: %d", xfer->ux_status);
 
 	/*
 	 * If we're dying, skip the hardware action and just notify the
@@ -1998,6 +1968,13 @@ xhci_event_transfer(struct xhci_softc * 
 		return;
 	}
 
+	/*
+	 * Try to claim this xfer for completion.  If it has already
+	 * completed or aborted, drop it on the floor.
+	 */
+	if (!usbd_xfer_trycomplete(xfer))
+		return;
+
 	/* 4.11.5.2 Event Data TRB */
 	if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
 		DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
@@ -2046,17 +2023,7 @@ xhci_event_transfer(struct xhci_softc * 
 	case XHCI_TRB_ERROR_STOPPED:
 	case XHCI_TRB_ERROR_LENGTH:
 	case XHCI_TRB_ERROR_STOPPED_SHORT:
-		/*
-		 * don't complete the transfer being aborted
-		 * as abort_xfer does instead.
-		 */
-		if (xfer->ux_status == USBD_CANCELLED ||
-		    xfer->ux_status == USBD_TIMEOUT) {
-			DPRINTFN(14, "ignore aborting xfer %#jx",
-			    (uintptr_t)xfer, 0, 0, 0);
-			return;
-		}
-		err = USBD_CANCELLED;
+		err = USBD_IOERROR;
 		break;
 	case XHCI_TRB_ERROR_STALL:
 	case XHCI_TRB_ERROR_BABBLE:
@@ -2079,15 +2046,6 @@ xhci_event_transfer(struct xhci_softc * 
 		/* Override the status.  */
 		xfer->ux_status = USBD_STALLED;
 
-		/*
-		 * Cancel the timeout and the task, which have not yet
-		 * run.  If they have already fired, at worst they are
-		 * waiting for the lock.  They will see that the xfer
-		 * is no longer in progress and give up.
-		 */
-		callout_stop(&xfer->ux_callout);
-		usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask);
-
 		xhci_clear_endpoint_stall_async(xfer);
 		return;
 	default:
@@ -2096,29 +2054,9 @@ xhci_event_transfer(struct xhci_softc * 
 		break;
 	}
 
-	/*
-	 * If software has completed it, either by cancellation
-	 * or timeout, drop it on the floor.
-	 */
-	if (xfer->ux_status != USBD_IN_PROGRESS) {
-		KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
-		            xfer->ux_status == USBD_TIMEOUT),
-			   "xfer %p status %x", xfer, xfer->ux_status);
-		return;
-	}
-
-	/* Otherwise, set the status.  */
+	/* Set the status.  */
 	xfer->ux_status = err;
 
-	/*
-	 * Cancel the timeout and the task, which have not yet
-	 * run.  If they have already fired, at worst they are
-	 * waiting for the lock.  They will see that the xfer
-	 * is no longer in progress and give up.
-	 */
-	callout_stop(&xfer->ux_callout);
-	usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask);
-
 	if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
 	    (trb_0 & 0x3) == 0x0) {
 		usb_transfer_complete(xfer);
@@ -2285,8 +2223,6 @@ xhci_allocx(struct usbd_bus *bus, unsign
 	xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
 	if (xfer != NULL) {
 		memset(xfer, 0, sizeof(struct xhci_xfer));
-		usb_init_task(&xfer->ux_aborttask, xhci_timeout_task, xfer,
-		    USB_TASKQ_MPSAFE);
 #ifdef DIAGNOSTIC
 		xfer->ux_state = XFER_BUSY;
 #endif
@@ -2313,6 +2249,14 @@ xhci_freex(struct usbd_bus *bus, struct 
 	pool_cache_put(sc->sc_xferpool, xfer);
 }
 
+static bool
+xhci_dying(struct usbd_bus *bus)
+{
+	struct xhci_softc * const sc = XHCI_BUS2SC(bus);
+
+	return sc->sc_dying;
+}
+
 static void
 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
 {
@@ -3908,11 +3852,7 @@ xhci_device_ctrl_start(struct usbd_xfer 
 		mutex_enter(&sc->sc_lock);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
-
-	if (xfer->ux_timeout && !xhci_polling_p(sc)) {
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-		    xhci_timeout, xfer);
-	}
+	usbd_xfer_schedule_timeout(xfer);
 	if (!polling)
 		mutex_exit(&sc->sc_lock);
 
@@ -3937,7 +3877,7 @@ xhci_device_ctrl_abort(struct usbd_xfer 
 {
 	XHCIHIST_FUNC(); XHCIHIST_CALLED();
 
-	xhci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 static void
@@ -4032,11 +3972,7 @@ xhci_device_bulk_start(struct usbd_xfer 
 		mutex_enter(&sc->sc_lock);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
-
-	if (xfer->ux_timeout && !xhci_polling_p(sc)) {
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-		    xhci_timeout, xfer);
-	}
+	usbd_xfer_schedule_timeout(xfer);
 	if (!polling)
 		mutex_exit(&sc->sc_lock);
 
@@ -4065,7 +4001,7 @@ xhci_device_bulk_abort(struct usbd_xfer 
 {
 	XHCIHIST_FUNC(); XHCIHIST_CALLED();
 
-	xhci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 static void
@@ -4146,11 +4082,7 @@ xhci_device_intr_start(struct usbd_xfer 
 		mutex_enter(&sc->sc_lock);
 	xfer->ux_status = USBD_IN_PROGRESS;
 	xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
-
-	if (xfer->ux_timeout && !polling) {
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-		    xhci_timeout, xfer);
-	}
+	usbd_xfer_schedule_timeout(xfer);
 	if (!polling)
 		mutex_exit(&sc->sc_lock);
 
@@ -4187,7 +4119,7 @@ xhci_device_intr_abort(struct usbd_xfer 
 
 	KASSERT(mutex_owned(&sc->sc_lock));
 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
-	xhci_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 static void
@@ -4200,32 +4132,3 @@ xhci_device_intr_close(struct usbd_pipe 
 
 	xhci_close_pipe(pipe);
 }
-
-/* ------------ */
-
-static void
-xhci_timeout(void *addr)
-{
-	XHCIHIST_FUNC(); XHCIHIST_CALLED();
-	struct xhci_xfer * const xx = addr;
-	struct usbd_xfer * const xfer = &xx->xx_xfer;
-	struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
-	struct usbd_device *dev = xfer->ux_pipe->up_dev;
-
-	mutex_enter(&sc->sc_lock);
-	if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS)
-		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
-	mutex_exit(&sc->sc_lock);
-}
-
-static void
-xhci_timeout_task(void *addr)
-{
-	XHCIHIST_FUNC(); XHCIHIST_CALLED();
-	struct usbd_xfer * const xfer = addr;
-	struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
-
-	mutex_enter(&sc->sc_lock);
-	xhci_abort_xfer(xfer, USBD_TIMEOUT);
-	mutex_exit(&sc->sc_lock);
-}

Index: src/sys/external/bsd/dwc2/dwc2.c
diff -u src/sys/external/bsd/dwc2/dwc2.c:1.66 src/sys/external/bsd/dwc2/dwc2.c:1.67
--- src/sys/external/bsd/dwc2/dwc2.c:1.66	Wed Dec  4 06:28:35 2019
+++ src/sys/external/bsd/dwc2/dwc2.c	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: dwc2.c,v 1.66 2019/12/04 06:28:35 skrll Exp $	*/
+/*	$NetBSD: dwc2.c,v 1.67 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.66 2019/12/04 06:28:35 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.67 2020/02/12 16:01:00 riastradh Exp $");
 
 #include "opt_usb.h"
 
@@ -116,6 +116,7 @@ Static struct usbd_xfer *
 			dwc2_allocx(struct usbd_bus *, unsigned int);
 Static void		dwc2_freex(struct usbd_bus *, struct usbd_xfer *);
 Static void		dwc2_get_lock(struct usbd_bus *, kmutex_t **);
+Static bool		dwc2_dying(struct usbd_bus *);
 Static int		dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
 			    void *, int);
 
@@ -150,7 +151,7 @@ Static void		dwc2_device_isoc_done(struc
 Static usbd_status	dwc2_device_start(struct usbd_xfer *);
 
 Static void		dwc2_close_pipe(struct usbd_pipe *);
-Static void		dwc2_abort_xfer(struct usbd_xfer *, usbd_status);
+Static void		dwc2_abortx(struct usbd_xfer *);
 
 Static void		dwc2_device_clear_toggle(struct usbd_pipe *);
 Static void		dwc2_noop(struct usbd_pipe *pipe);
@@ -158,9 +159,6 @@ Static void		dwc2_noop(struct usbd_pipe 
 Static int		dwc2_interrupt(struct dwc2_softc *);
 Static void		dwc2_rhc(void *);
 
-Static void		dwc2_timeout(void *);
-Static void		dwc2_timeout_task(void *);
-
 
 static inline void
 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
@@ -180,6 +178,8 @@ Static const struct usbd_bus_methods dwc
 	.ubm_dopoll =	dwc2_poll,
 	.ubm_allocx =	dwc2_allocx,
 	.ubm_freex =	dwc2_freex,
+	.ubm_abortx =	dwc2_abortx,
+	.ubm_dying =	dwc2_dying,
 	.ubm_getlock =	dwc2_get_lock,
 	.ubm_rhctrl =	dwc2_roothub_ctrl,
 };
@@ -232,22 +232,15 @@ dwc2_allocx(struct usbd_bus *bus, unsign
 {
 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
 	struct dwc2_xfer *dxfer;
-	struct usbd_xfer *xfer;
 
 	DPRINTFN(10, "\n");
 
 	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget);
 	dxfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
-	xfer = (struct usbd_xfer *)dxfer;
 	if (dxfer != NULL) {
 		memset(dxfer, 0, sizeof(*dxfer));
-
 		dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg,
 		    nframes, GFP_KERNEL);
-
-		/* Initialise this always so we can call remove on it. */
-		usb_init_task(&xfer->ux_aborttask, dwc2_timeout_task, xfer,
-		    USB_TASKQ_MPSAFE);
 #ifdef DIAGNOSTIC
 		dxfer->xfer.ux_state = XFER_BUSY;
 #endif
@@ -275,6 +268,14 @@ dwc2_freex(struct usbd_bus *bus, struct 
 	pool_cache_put(sc->sc_xferpool, xfer);
 }
 
+Static bool
+dwc2_dying(struct usbd_bus *bus)
+{
+	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
+
+	return sc->sc_dying;
+}
+
 Static void
 dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock)
 {
@@ -318,61 +319,49 @@ dwc2_softintr(void *v)
 	struct usbd_bus *bus = v;
 	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
-	struct dwc2_xfer *dxfer;
+	struct dwc2_xfer *dxfer, *next;
+	TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed);
 
 	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
 
+	/*
+	 * Grab all the xfers that have not been aborted or timed out.
+	 * Do so under a single lock -- without dropping it to run
+	 * usb_transfer_complete as we go -- so that dwc2_abortx won't
+	 * remove next out from under us during iteration when we've
+	 * dropped the lock.
+	 */
 	mutex_spin_enter(&hsotg->lock);
-	while ((dxfer = TAILQ_FIRST(&sc->sc_complete)) != NULL) {
-
-		KASSERTMSG(!callout_pending(&dxfer->xfer.ux_callout),
-		    "xfer %p pipe %p\n", dxfer, dxfer->xfer.ux_pipe);
-
-		/*
-		 * dwc2_abort_xfer will remove this transfer from the
-		 * sc_complete queue
-		 */
-		/*XXXNH not tested */
-		if (dxfer->xfer.ux_status == USBD_CANCELLED ||
-		    dxfer->xfer.ux_status == USBD_TIMEOUT) {
+	TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) {
+		if (!usbd_xfer_trycomplete(&dxfer->xfer))
+			/*
+			 * The hard interrput handler decided to
+			 * complete the xfer, and put it on sc_complete
+			 * to pass it to us in the soft interrupt
+			 * handler, but in the time between hard
+			 * interrupt and soft interrupt, the xfer was
+			 * aborted or timed out and we lost the race.
+			 */
 			continue;
-		}
-
+		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
+		KASSERT(dxfer->intr_status != USBD_CANCELLED);
+		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
 		TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
-
-		mutex_spin_exit(&hsotg->lock);
-		usb_transfer_complete(&dxfer->xfer);
-		mutex_spin_enter(&hsotg->lock);
+		TAILQ_INSERT_TAIL(&claimed, dxfer, xnext);
 	}
 	mutex_spin_exit(&hsotg->lock);
-}
-
-Static void
-dwc2_timeout(void *addr)
-{
-	struct usbd_xfer *xfer = addr;
- 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
-	struct usbd_device *dev = xfer->ux_pipe->up_dev;
-
-	DPRINTF("xfer=%p\n", xfer);
-
-	mutex_enter(&sc->sc_lock);
-	if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS)
-		usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC);
-	mutex_exit(&sc->sc_lock);
-}
-
-Static void
-dwc2_timeout_task(void *addr)
-{
-	struct usbd_xfer *xfer = addr;
- 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
 
-	DPRINTF("xfer=%p\n", xfer);
+	/* Now complete them.  */
+	while (!TAILQ_EMPTY(&claimed)) {
+		dxfer = TAILQ_FIRST(&claimed);
+		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
+		KASSERT(dxfer->intr_status != USBD_CANCELLED);
+		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
+		TAILQ_REMOVE(&claimed, dxfer, xnext);
 
-	mutex_enter(&sc->sc_lock);
-	dwc2_abort_xfer(xfer, USBD_TIMEOUT);
-	mutex_exit(&sc->sc_lock);
+		dxfer->xfer.ux_status = dxfer->intr_status;
+		usb_transfer_complete(&dxfer->xfer);
+	}
 }
 
 usbd_status
@@ -469,53 +458,53 @@ dwc2_close_pipe(struct usbd_pipe *pipe)
  * Abort a device request.
  */
 Static void
-dwc2_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
+dwc2_abortx(struct usbd_xfer *xfer)
 {
 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
 	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
 	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
-	struct dwc2_xfer *d, *tmp;
+	struct dwc2_xfer *d;
 	int err;
 
-	KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT),
-	    "invalid status for abort: %d", (int)status);
-
-	DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe, status);
+	DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe,
+	    xfer->ux_status);
 
 	KASSERT(mutex_owned(&sc->sc_lock));
 	ASSERT_SLEEPABLE();
 
-	if (status == USBD_CANCELLED) {
-		/*
-		 * We are synchronously aborting.  Try to stop the
-		 * callout and task, but if we can't, wait for them to
-		 * complete.
-		 */
-		callout_halt(&xfer->ux_callout, &sc->sc_lock);
-		usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask,
-		    USB_TASKQ_HC, &sc->sc_lock);
-	} else {
-		/* Otherwise, we are timing out.  */
-		KASSERT(status == USBD_TIMEOUT);
-	}
+	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
+		xfer->ux_status == USBD_TIMEOUT),
+	    "bad abort status: %d", xfer->ux_status);
+
+	mutex_spin_enter(&hsotg->lock);
 
 	/*
-	 * The xfer cannot have been cancelled already.  It is the
-	 * responsibility of the caller of usbd_abort_pipe not to try
-	 * to abort a pipe multiple times, whether concurrently or
-	 * sequentially.
+	 * Check whether we aborted or timed out after the hardware
+	 * completion interrupt determined that it's done but before
+	 * the soft interrupt could actually complete it.  If so, it's
+	 * too late for the soft interrupt -- at this point we've
+	 * already committed to abort it or time it out, so we need to
+	 * take it off the softint's list of work in case the caller,
+	 * say, frees the xfer before the softint runs.
+	 *
+	 * This logic is unusual among host controller drivers, and
+	 * happens because dwc2 decides to complete xfers in the hard
+	 * interrupt handler rather than in the soft interrupt handler,
+	 * but usb_transfer_complete must be deferred to softint -- and
+	 * we happened to swoop in between the hard interrupt and the
+	 * soft interrupt.  Other host controller drivers do almost all
+	 * processing in the softint so there's no intermediate stage.
+	 *
+	 * Fortunately, this linear search to discern the intermediate
+	 * stage is not likely to be a serious performance impact
+	 * because it happens only on abort or timeout.
 	 */
-	KASSERT(xfer->ux_status != USBD_CANCELLED);
-
-	/* Only the timeout, which runs only once, can time it out.  */
-	KASSERT(xfer->ux_status != USBD_TIMEOUT);
-
-	/* If anyone else beat us, we're done.  */
-	if (xfer->ux_status != USBD_IN_PROGRESS)
-		return;
-
-	/* We beat everyone else.  Claim the status.  */
-	xfer->ux_status = status;
+	TAILQ_FOREACH(d, &sc->sc_complete, xnext) {
+		if (d == dxfer) {
+			TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
+			break;
+		}
+	}
 
 	/*
 	 * If we're dying, skip the hardware action and just notify the
@@ -529,26 +518,17 @@ dwc2_abort_xfer(struct usbd_xfer *xfer, 
 	/*
 	 * HC Step 1: Handle the hardware.
 	 */
-	mutex_spin_enter(&hsotg->lock);
-	/* XXXNH suboptimal */
-	TAILQ_FOREACH_SAFE(d, &sc->sc_complete, xnext, tmp) {
-		if (d == dxfer) {
-			TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
-			break;
-		}
-	}
-
 	err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb);
 	if (err) {
 		DPRINTF("dwc2_hcd_urb_dequeue failed\n");
 	}
 
+dying:
 	mutex_spin_exit(&hsotg->lock);
 
 	/*
 	 * Final Step: Notify completion to waiting xfers.
 	 */
-dying:
 	usb_transfer_complete(xfer);
 	KASSERT(mutex_owned(&sc->sc_lock));
 }
@@ -761,7 +741,7 @@ dwc2_device_ctrl_abort(struct usbd_xfer 
 	KASSERT(mutex_owned(&sc->sc_lock));
 
 	DPRINTF("xfer=%p\n", xfer);
-	dwc2_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 Static void
@@ -811,7 +791,7 @@ dwc2_device_bulk_abort(struct usbd_xfer 
 	KASSERT(mutex_owned(&sc->sc_lock));
 
 	DPRINTF("xfer=%p\n", xfer);
-	dwc2_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 Static void
@@ -885,8 +865,7 @@ dwc2_device_intr_abort(struct usbd_xfer 
 	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
 
 	DPRINTF("xfer=%p\n", xfer);
-
-	dwc2_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 Static void
@@ -935,7 +914,7 @@ dwc2_device_isoc_abort(struct usbd_xfer 
 	KASSERT(mutex_owned(&sc->sc_lock));
 
 	DPRINTF("xfer=%p\n", xfer);
-	dwc2_abort_xfer(xfer, USBD_CANCELLED);
+	usbd_xfer_abort(xfer);
 }
 
 void
@@ -1150,14 +1129,11 @@ dwc2_device_start(struct usbd_xfer *xfer
 
 	/* might need to check cpu_intr_p */
 	mutex_spin_enter(&hsotg->lock);
-
-	if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) {
-		callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout),
-		    dwc2_timeout, xfer);
-	}
 	retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
 	if (retval)
 		goto fail2;
+	usbd_xfer_schedule_timeout(xfer);
+	xfer->ux_status = USBD_IN_PROGRESS;
 
 	if (alloc_bandwidth) {
 		dwc2_allocate_bus_bandwidth(hsotg,
@@ -1171,7 +1147,6 @@ dwc2_device_start(struct usbd_xfer *xfer
 	return USBD_IN_PROGRESS;
 
 fail2:
-	callout_halt(&xfer->ux_callout, &hsotg->lock);
 	dwc2_urb->priv = NULL;
 	mutex_spin_exit(&hsotg->lock);
 	pool_cache_put(sc->sc_qtdpool, qtd);
@@ -1461,6 +1436,8 @@ void dwc2_host_complete(struct dwc2_hsot
 	usb_endpoint_descriptor_t *ed;
 	uint8_t xfertype;
 
+	KASSERT(mutex_owned(&hsotg->lock));
+
 	if (!qtd) {
 		dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
 		return;
@@ -1477,25 +1454,6 @@ void dwc2_host_complete(struct dwc2_hsot
 		return;
 	}
 
-	/*
-	 * If software has completed it, either by cancellation
-	 * or timeout, drop it on the floor.
-	 */
-	if (xfer->ux_status != USBD_IN_PROGRESS) {
-		KASSERT(xfer->ux_status == USBD_CANCELLED ||
-		    xfer->ux_status == USBD_TIMEOUT);
-		return;
-	}
-
-	/*
-	 * Cancel the timeout and the task, which have not yet
-	 * run.  If they have already fired, at worst they are
-	 * waiting for the lock.  They will see that the xfer
-	 * is no longer in progress and give up.
-	 */
-	callout_stop(&xfer->ux_callout);
-	usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask);
-
 	dxfer = DWC2_XFER2DXFER(xfer);
 	sc = DWC2_XFER2SC(xfer);
 	ed = xfer->ux_pipe->up_endpoint->ue_edesc;
@@ -1533,29 +1491,26 @@ void dwc2_host_complete(struct dwc2_hsot
 
 	switch (status) {
 	case 0:
-		xfer->ux_status = USBD_NORMAL_COMPLETION;
+		dxfer->intr_status = USBD_NORMAL_COMPLETION;
 		break;
 	case -EPIPE:
-		xfer->ux_status = USBD_STALLED;
-		break;
-	case -ETIMEDOUT:
-		xfer->ux_status = USBD_TIMEOUT;
+		dxfer->intr_status = USBD_STALLED;
 		break;
 	case -EPROTO:
-		xfer->ux_status = USBD_INVAL;
+		dxfer->intr_status = USBD_INVAL;
 		break;
 	case -EIO:
-		xfer->ux_status = USBD_IOERROR;
+		dxfer->intr_status = USBD_IOERROR;
 		break;
 	case -EOVERFLOW:
-		xfer->ux_status = USBD_IOERROR;
+		dxfer->intr_status = USBD_IOERROR;
 		break;
 	default:
-		xfer->ux_status = USBD_IOERROR;
+		dxfer->intr_status = USBD_IOERROR;
 		printf("%s: unknown error status %d\n", __func__, status);
 	}
 
-	if (xfer->ux_status == USBD_NORMAL_COMPLETION) {
+	if (dxfer->intr_status == USBD_NORMAL_COMPLETION) {
 		/*
 		 * control transfers with no data phase don't touch dmabuf, but
 		 * everything else does.

Index: src/sys/external/bsd/dwc2/dwc2var.h
diff -u src/sys/external/bsd/dwc2/dwc2var.h:1.6 src/sys/external/bsd/dwc2/dwc2var.h:1.7
--- src/sys/external/bsd/dwc2/dwc2var.h:1.6	Sun Nov 18 11:48:57 2018
+++ src/sys/external/bsd/dwc2/dwc2var.h	Wed Feb 12 16:01:00 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: dwc2var.h,v 1.6 2018/11/18 11:48:57 skrll Exp $	*/
+/*	$NetBSD: dwc2var.h,v 1.7 2020/02/12 16:01:00 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -43,6 +43,7 @@ struct dwc2_xfer {
 	struct dwc2_hcd_urb *urb;
 
 	TAILQ_ENTRY(dwc2_xfer) xnext;		/* list of complete xfers */
+	usbd_status intr_status;
 };
 
 struct dwc2_pipe {

Reply via email to