This is a "might crash, be careful" patch that updates the scheduling of interrupt transfers.
* The interrupt chedule is now a sparse tree of QH, much
like OHCI does with EDs. This lets "ehci-hcd" support
many more hubs, mice, keyboards, etc ... by lifting the
long-standing one-transfer-per-frame restriction. * TT scheduling is explicit, sub-microframe, and for now
pessimistic (max two transfers per TT per frame). It's
set up to play (someday) with split ISO, and should
handle TT-per-port style hubs properly too. * Uses "first fit" for new scheduling decisions. OHCI
used "best fit"; but even usb 1.1 hasn't usually needed
load balancing. * Should support "high bandwidth" transfer rates (untested)
as well as normal transfer-per-microframe rates. (Didn't
implement 2 or 4 microframe periods.) * Fixes a buglet that limited high speed interrupt transfers
to 1023 bytes (should be 1024).The "might crash" is a scenario that happened a while back that I've not had a chance to look at ... unlinking nodes on the interior of the tree oopsed. (With a likely workaround being to unplug in reverse order of plug-in.)
I've tested it lightly, and I'm passing it around now since people have finally been noticing those driver restrictions.
It'd be good to lift those restrictions before 2.6.0-final ships, and that'll depend on testing (and likely fixes) from other folk...
- Dave
--- 1.23/drivers/usb/host/ehci-dbg.c Wed Aug 6 04:52:20 2003
+++ edited/drivers/usb/host/ehci-dbg.c Thu Oct 2 19:23:09 2003
@@ -465,7 +465,7 @@
spin_lock_irqsave (&ehci->lock, flags);
for (i = 0; i < ehci->periodic_size; i++) {
p = ehci->pshadow [i];
- if (!p.ptr)
+ if (likely (!p.ptr))
continue;
tag = Q_NEXT_TYPE (ehci->periodic [i]);
@@ -495,7 +495,7 @@
break;
}
/* show more info the first time around */
- if (temp == seen_count) {
+ if (temp == seen_count && p.ptr) {
u32 scratch = cpu_to_le32p (
&p.qh->hw_info1);
struct ehci_qtd *qtd;
@@ -528,8 +528,10 @@
seen [seen_count++].qh = p.qh;
} else
temp = 0;
- tag = Q_NEXT_TYPE (p.qh->hw_next);
- p = p.qh->qh_next;
+ if (p.qh) {
+ tag = Q_NEXT_TYPE (p.qh->hw_next);
+ p = p.qh->qh_next;
+ }
break;
case Q_TYPE_FSTN:
temp = snprintf (next, size,
--- 1.60/drivers/usb/host/ehci-hcd.c Fri Aug 29 11:21:44 2003
+++ edited/drivers/usb/host/ehci-hcd.c Thu Oct 2 09:13:42 2003
@@ -834,12 +834,18 @@
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
- if (qh->qh_state == QH_STATE_LINKED) {
- /* messy, can spin or block a microframe ... */
- intr_deschedule (ehci, qh, 1);
- /* qh_state == IDLE */
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ intr_deschedule (ehci, qh);
+ /* FALL THROUGH */
+ case QH_STATE_IDLE:
+ qh_completions (ehci, qh, NULL);
+ break;
+ default:
+ ehci_dbg (ehci, "bogus qh %p state %d\n",
+ qh, qh->qh_state);
+ goto done;
}
- qh_completions (ehci, qh, NULL);
/* reschedule QH iff another request is queued */
if (!list_empty (&qh->qtd_list)
@@ -867,6 +873,7 @@
urb->transfer_flags |= EHCI_STATE_UNLINK;
break;
}
+done:
spin_unlock_irqrestore (&ehci->lock, flags);
return 0;
}
--- 1.15/drivers/usb/host/ehci-mem.c Wed Aug 6 04:52:20 2003
+++ edited/drivers/usb/host/ehci-mem.c Wed Oct 1 18:44:10 2003
@@ -131,6 +131,7 @@
}
if (qh->dummy)
ehci_qtd_free (ehci, qh->dummy);
+ usb_put_dev (qh->dev);
pci_pool_free (ehci->qh_pool, qh, qh->qh_dma);
}
--- 1.52/drivers/usb/host/ehci-q.c Wed Aug 13 02:58:53 2003
+++ edited/drivers/usb/host/ehci-q.c Thu Oct 2 09:34:21 2003
@@ -567,7 +567,7 @@
// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
// ... and packet size, for any kind of endpoint descriptor
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x03ff)
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
/*
* Each QH holds a qtd list; a QH is used for everything except iso.
@@ -618,9 +618,12 @@
qh->c_usecs = 0;
qh->gap_uf = 0;
- /* FIXME handle HS periods of less than 1 frame. */
qh->period = urb->interval >> 3;
- if (qh->period < 1) {
+ if (qh->period == 0 && urb->interval != 1) {
+ /* NOTE interval 2 or 4 uframes could work.
+ * But interval 1 scheduling is simpler, and
+ * includes high bandwidth.
+ */
dbg ("intr period %d uframes, NYET!",
urb->interval);
goto done;
@@ -641,6 +644,9 @@
qh->period = urb->interval;
}
+
+ /* support for tt scheduling */
+ qh->dev = usb_get_dev (urb->dev);
}
/* using TT? */
--- 1.27/drivers/usb/host/ehci-sched.c Wed Jul 30 03:05:59 2003
+++ edited/drivers/usb/host/ehci-sched.c Thu Oct 2 12:02:26 2003
@@ -69,30 +69,25 @@
union ehci_shadow *prev_p = &ehci->pshadow [frame];
u32 *hw_p = &ehci->periodic [frame];
union ehci_shadow here = *prev_p;
- union ehci_shadow *next_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
- hw_p = &here.qh->hw_next;
+ hw_p = prev_p->hw_next;
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
- if (!here.ptr) {
- dbg ("entry %p no longer on frame [%d]", ptr, frame);
+ if (!here.ptr)
return 0;
- }
- // vdbg ("periodic unlink %p from frame %d", ptr, frame);
- /* update hardware list ... HC may still know the old structure, so
- * don't change hw_next until it'll have purged its cache
+ /* update hardware then software lists ... the old "next" pointers
+ * may still be in use, the caller updates them.
*/
- next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
- *hw_p = here.qh->hw_next;
+ *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
+ *hw_p = *here.hw_next;
+ wmb ();
- /* unlink from shadow list; HCD won't see old structure again */
- *prev_p = *next_p;
- next_p->ptr = 0;
+ //
return 1;
}
@@ -114,6 +109,7 @@
/* ... or C-mask? */
if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
usecs += q->qh->c_usecs;
+ hw_p = &q->qh->hw_next;
q = &q->qh->qh_next;
break;
case Q_TYPE_FSTN:
@@ -123,12 +119,14 @@
if (q->fstn->hw_prev != EHCI_LIST_END) {
dbg ("not counting FSTN bandwidth yet ...");
}
+ hw_p = &q->fstn->hw_next;
q = &q->fstn->fstn_next;
break;
case Q_TYPE_ITD:
/* NOTE the "one uframe per itd" policy */
if (q->itd->hw_transaction [uframe] != 0)
usecs += q->itd->usecs;
+ hw_p = &q->itd->hw_next;
q = &q->itd->itd_next;
break;
#ifdef have_split_iso
@@ -154,6 +152,7 @@
else
usecs += HS_USECS (188);
}
+ hw_p = &q->sitd->hw_next;
q = &q->sitd->sitd_next;
break;
#endif /* have_split_iso */
@@ -171,6 +170,75 @@
/*-------------------------------------------------------------------------*/
+static inline int same_tt (struct usb_device *dev1, struct usb_device *dev2)
+{
+ if (!dev1->tt || !dev2->tt)
+ return 0;
+ if (dev1->tt != dev2->tt)
+ return 0;
+ if (dev1->tt->multi)
+ return dev1->ttport == dev2->ttport;
+ else
+ return 1;
+}
+
+static int check_tt_collision (
+ struct ehci_hcd *ehci,
+ unsigned frame,
+ unsigned uframe,
+ unsigned gap,
+ unsigned period,
+ struct usb_device *dev
+) {
+ if (period == 0) /* error */
+ return 0;
+
+ for (; frame < ehci->periodic_size; frame += period) {
+ union ehci_shadow here;
+ u32 type;
+
+ here = ehci->pshadow [frame];
+ type = Q_NEXT_TYPE (ehci->periodic [frame]);
+ while (here.ptr) {
+ switch (type) {
+ case Q_TYPE_ITD:
+ type = Q_NEXT_TYPE (here.itd->hw_next);
+ here = here.itd->itd_next;
+ continue;
+ case Q_TYPE_QH:
+ if (same_tt (dev, here.qh->dev)) {
+ u32 mask;
+
+ /* FIXME this expects a specific
+ * qh-only scheduling policy...
+ */
+ mask = le32_to_cpu (here.qh->hw_info2);
+ mask |= mask >> 8;
+ if (mask & (0x07 << uframe))
+ break;
+ }
+ type = Q_NEXT_TYPE (here.qh->hw_next);
+ here = here.qh->qh_next;
+ continue;
+ // case Q_TYPE_SITD:
+ // case Q_TYPE_FSTN:
+ default:
+ ehci_dbg (ehci,
+ "periodic frame %d bogus type %d\n",
+ frame, type);
+ }
+
+ /* collision or error */
+ return 0;
+ }
+ }
+
+ /* no collision */
+ return 1;
+}
+
+/*-------------------------------------------------------------------------*/
+
static int enable_periodic (struct ehci_hcd *ehci)
{
u32 cmd;
@@ -220,64 +288,147 @@
/*-------------------------------------------------------------------------*/
-// FIXME microframe periods not yet handled
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; ehci 0.96+)
+ */
+static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ unsigned i;
+ unsigned period = qh->period;
-static void intr_deschedule (
- struct ehci_hcd *ehci,
- struct ehci_qh *qh,
- int wait
-) {
- int status;
- unsigned frame = qh->start;
+ dev_dbg (&qh->dev->dev,
+ "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ period, le32_to_cpup (&qh->hw_info2) & 0xffff,
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* high bandwidth, or otherwise every microframe */
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < ehci->periodic_size; i += period) {
+ union ehci_shadow *prev = &ehci->pshadow [i];
+ u32 *hw_p = &ehci->periodic [i];
+ union ehci_shadow here = *prev;
+ u32 type = 0;
+
+ /* skip the iso nodes at list head */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE (*hw_p);
+ if (type == Q_TYPE_QH)
+ break;
+ prev = periodic_next_shadow (prev, type);
+ hw_p = &here.qh->hw_next;
+ here = *prev;
+ }
- do {
- periodic_unlink (ehci, frame, qh);
- qh_put (ehci, qh);
- frame += qh->period;
- } while (frame < ehci->periodic_size);
+ /* sorting each branch by period (slow-->fast)
+ * enables sharing interior tree nodes
+ */
+ while (here.ptr && qh != here.qh) {
+ if (qh->period > here.qh->period)
+ break;
+ prev = periodic_next_shadow (prev, type);
+ hw_p = &here.qh->hw_next;
+ here = *prev;
+ }
+ /* link in this qh, unless some earlier pass did that */
+ if (qh != here.qh) {
+ qh->qh_next = here;
+ if (here.qh)
+ qh->hw_next = *hw_p;
+ wmb ();
+ prev->qh = qh;
+ *hw_p = QH_NEXT (qh->qh_dma);
+ }
+ }
+ qh->qh_state = QH_STATE_LINKED;
+ qh_get (qh);
+
+ /* update per-qh bandwidth for usbfs */
+ hcd_to_bus (&ehci->hcd)->bandwidth_allocated += qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ /* maybe enable periodic schedule processing */
+ if (!ehci->periodic_sched++)
+ return enable_periodic (ehci);
+ return 0;
+}
+
+static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ unsigned i;
+ unsigned period;
+
+ // IF this isn't high speed
+ // and this qh is active in the current uframe
+ // (and overlay token SplitXstate is false?)
+ // THEN
+ // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */);
+
+ /* high bandwidth, or otherwise part of every microframe */
+ if ((period = qh->period) == 0)
+ period = 1;
+
+ for (i = qh->start; i < ehci->periodic_size; i += period)
+ periodic_unlink (ehci, i, qh);
+
+ /* update per-qh bandwidth for usbfs */
+ hcd_to_bus (&ehci->hcd)->bandwidth_allocated -= qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ dev_dbg (&qh->dev->dev,
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period, le32_to_cpup (&qh->hw_info2) & 0xffff,
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = 0;
- ehci->periodic_sched--;
+ qh_put (ehci, qh);
/* maybe turn off periodic schedule */
+ ehci->periodic_sched--;
if (!ehci->periodic_sched)
- status = disable_periodic (ehci);
- else {
- status = 0;
- vdbg ("periodic schedule still enabled");
- }
+ (void) disable_periodic (ehci);
+}
- /*
- * If the hc may be looking at this qh, then delay a uframe
- * (yeech!) to be sure it's done.
- * No other threads may be mucking with this qh.
- */
- if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
- if (wait) {
- udelay (125);
- qh->hw_next = EHCI_LIST_END;
- } else {
- /* we may not be IDLE yet, but if the qh is empty
- * the race is very short. then if qh also isn't
- * rescheduled soon, it won't matter. otherwise...
- */
- vdbg ("intr_deschedule...");
- }
- } else
- qh->hw_next = EHCI_LIST_END;
+static void intr_deschedule (
+ struct ehci_hcd *ehci,
+ struct ehci_qh *qh
+) {
+ unsigned frame = qh->start;
+ qh_unlink_periodic (ehci, qh);
+ if (qh->period == 0 || ((ehci_get_frame (&ehci->hcd) - frame)
+ % qh->period) == 0) {
+ unsigned wait;
+
+ /* hc might have been using this qh. delay enough to
+ * finish any transaction, then use qh->hw_next.
+ * (hmm, what about pending CSPLITs?)
+ */
+ if (list_empty (&qh->qtd_list) /* just read hw_next */
+ /* nonzero c-mask == split */
+ || (__constant_cpu_to_le32 (0x0ff << 8)
+ & qh->hw_info2) != 0)
+ wait = 2;
+ else /* finish 3*1024 transfer */
+ wait = 55;
+ udelay (wait);
+ }
qh->qh_state = QH_STATE_IDLE;
-
- /* update per-qh bandwidth utilization (for usbfs) */
- hcd_to_bus (&ehci->hcd)->bandwidth_allocated -=
- (qh->usecs + qh->c_usecs) / qh->period;
-
- dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d",
- qh, qh->period, frame,
- atomic_read (&qh->refcount), ehci->periodic_sched);
+ qh->hw_next = EHCI_LIST_END;
+ wmb ();
}
+/*-------------------------------------------------------------------------*/
+
static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
@@ -285,6 +436,8 @@
unsigned period,
unsigned usecs
) {
+ int claimed;
+
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
@@ -297,22 +450,34 @@
*/
usecs = 100 - usecs;
- do {
- int claimed;
-
-// FIXME delete when intr_submit handles non-empty queues
-// this gives us a one intr/frame limit (vs N/uframe)
-// ... and also lets us avoid tracking split transactions
-// that might collide at a given TT/hub.
- if (ehci->pshadow [frame].ptr)
- return 0;
+#if 1
+ /* this limits us to one periodic qh per frame, pending
+ * removal of an unlink-from-mid-tree problem.
+ */
+ if (ehci->pshadow [frame].ptr)
+ return 0;
+#endif
- claimed = periodic_usecs (ehci, frame, uframe);
- if (claimed > usecs)
- return 0;
+ /* we "know" 2 and 4 uframe intervals were rejected; so
+ * for period 0, check _every_ microframe in the schedule.
+ */
+ if (unlikely (period == 0)) {
+ do {
+ for (uframe = 0; uframe < 7; uframe++) {
+ claimed = periodic_usecs (ehci, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ }
+ } while ((frame += 1) < ehci->periodic_size);
-// FIXME update to handle sub-frame periods
- } while ((frame += period) < ehci->periodic_size);
+ /* just check the specified uframe, at that period */
+ } else {
+ do {
+ claimed = periodic_usecs (ehci, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ } while ((frame += period) < ehci->periodic_size);
+ }
// success!
return 1;
@@ -328,6 +493,9 @@
{
int retval = -ENOSPC;
+ if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ goto done;
+
if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
@@ -336,20 +504,16 @@
goto done;
}
- /* This is a split transaction; check the bandwidth available for
- * the completion too. Check both worst and best case gaps: worst
- * case is SPLIT near uframe end, and CSPLIT near start ... best is
- * vice versa. Difference can be almost two uframe times, but we
- * reserve unnecessary bandwidth (waste it) this way. (Actually
- * even better cases exist, like immediate device NAK.)
- *
- * FIXME don't even bother unless we know this TT is idle in that
- * range of uframes ... for now, check_period() allows only one
- * interrupt transfer per frame, so needn't check "TT busy" status
- * when scheduling a split (QH, SITD, or FSTN).
- *
- * FIXME ehci 0.96 and above can use FSTNs
+ /* NOTE: splits could be done in just one smart pass; and the
+ * typical full-speed case should probably be a single completion
+ * in uframe+1 (taking two adjacent uframes, not three).
*/
+
+ /* make sure this tt's buffer is available */
+ if (!check_tt_collision (ehci, frame, uframe, qh->gap_uf + 1,
+ qh->period, qh->dev))
+ goto done;
+
if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
qh->period, qh->c_usecs))
goto done;
@@ -363,6 +527,9 @@
return retval;
}
+/* "first fit" scheduling policy used the first time through, or
+ * when the previous schedule can't be re-used. no balancing.
+ */
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int status;
@@ -388,55 +555,39 @@
* uframes have enough periodic bandwidth available.
*/
if (status) {
- frame = qh->period - 1;
- do {
- for (uframe = 0; uframe < 8; uframe++) {
- status = check_intr_schedule (ehci,
- frame, uframe, qh,
- &c_mask);
- if (status == 0)
- break;
- }
- } while (status && frame--);
+ /* "normal" case, uframing flexible exept with splits */
+ if (qh->period) {
+ frame = qh->period - 1;
+ do {
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule (ehci,
+ frame, uframe, qh,
+ &c_mask);
+ if (status == 0)
+ break;
+ }
+ } while (status && frame--);
+
+ /* qh->period == 0 means every uframe */
+ } else {
+ frame = 0;
+ status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
+ }
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
- qh->hw_info2 &= ~0xffff;
- qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
+ qh->hw_info2 &= __constant_cpu_to_le32 (~0xffff);
+ qh->hw_info2 |= qh->period
+ ? cpu_to_le32 (1 << uframe)
+ : __constant_cpu_to_le32 (0xff);
+ qh->hw_info2 |= c_mask;
} else
- dbg ("reused previous qh %p schedule", qh);
+ ehci_dbg (ehci, "reused qh %p schedule\n", qh);
/* stuff into the periodic schedule */
- qh->qh_state = QH_STATE_LINKED;
- dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
- qh, qh->usecs, qh->c_usecs,
- qh->period, frame, uframe, qh->gap_uf);
- do {
- if (unlikely (ehci->pshadow [frame].ptr != 0)) {
-
-// FIXME -- just link toward the end, before any qh with a shorter period,
-// AND accommodate it already having been linked here (after some other qh)
-// AS WELL AS updating the schedule checking logic
-
- BUG ();
- } else {
- ehci->pshadow [frame].qh = qh_get (qh);
- ehci->periodic [frame] =
- QH_NEXT (qh->qh_dma);
- }
- wmb ();
- frame += qh->period;
- } while (frame < ehci->periodic_size);
-
- /* update per-qh bandwidth for usbfs */
- hcd_to_bus (&ehci->hcd)->bandwidth_allocated +=
- (qh->usecs + qh->c_usecs) / qh->period;
-
- /* maybe enable periodic schedule processing */
- if (!ehci->periodic_sched++)
- status = enable_periodic (ehci);
+ status = qh_link_periodic (ehci, qh);
done:
return status;
}
@@ -491,33 +642,6 @@
return status;
}
-static unsigned
-intr_complete (
- struct ehci_hcd *ehci,
- unsigned frame,
- struct ehci_qh *qh,
- struct pt_regs *regs
-) {
- unsigned count;
-
- /* nothing to report? */
- if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE))
- != 0))
- return 0;
- if (unlikely (list_empty (&qh->qtd_list))) {
- dbg ("intr qh %p no TDs?", qh);
- return 0;
- }
-
- /* handle any completions */
- count = qh_completions (ehci, qh, regs);
-
- if (unlikely (list_empty (&qh->qtd_list)))
- intr_deschedule (ehci, qh, 0);
-
- return count;
-}
-
/*-------------------------------------------------------------------------*/
static void
@@ -1000,6 +1124,7 @@
union ehci_shadow q, *q_p;
u32 type, *hw_p;
unsigned uframes;
+ int limit = 20;
restart:
/* scan schedule to _before_ current frame index */
@@ -1015,22 +1140,25 @@
/* scan each element in frame's queue for completions */
while (q.ptr != 0) {
- int last;
+ int rescan = 0;
unsigned uf;
union ehci_shadow temp;
switch (type) {
case Q_TYPE_QH:
- last = (q.qh->hw_next == EHCI_LIST_END);
- temp = q.qh->qh_next;
- type = Q_NEXT_TYPE (q.qh->hw_next);
- count += intr_complete (ehci, frame,
- qh_get (q.qh), regs);
- qh_put (ehci, q.qh);
- q = temp;
+ /* handle any completions */
+ temp.qh = qh_get (q.qh);
+ rescan = qh_completions (ehci, q.qh, regs);
+ if (unlikely (list_empty (&q.qh->qtd_list))) {
+ intr_deschedule (ehci, q.qh);
+ rescan = 1;
+ } else {
+ type = Q_NEXT_TYPE (q.qh->hw_next);
+ q = q.qh->qh_next;
+ }
+ qh_put (ehci, temp.qh);
break;
case Q_TYPE_FSTN:
- last = (q.fstn->hw_next == EHCI_LIST_END);
/* for "save place" FSTNs, look at QH entries
* in the previous frame for completions.
*/
@@ -1041,8 +1169,6 @@
q = q.fstn->fstn_next;
break;
case Q_TYPE_ITD:
- last = (q.itd->hw_next == EHCI_LIST_END);
-
/* Unlink each (S)ITD we see, since the ISO
* URB model forces constant rescheduling.
* That complicates sharing uframes in ITDs,
@@ -1057,7 +1183,7 @@
type = Q_NEXT_TYPE (*hw_p);
/* might free q.itd ... */
- count += itd_complete (ehci,
+ rescan = itd_complete (ehci,
temp.itd, uf, regs);
break;
}
@@ -1073,7 +1199,6 @@
break;
#ifdef have_split_iso
case Q_TYPE_SITD:
- last = (q.sitd->hw_next == EHCI_LIST_END);
sitd_complete (ehci, q.sitd);
type = Q_NEXT_TYPE (q.sitd->hw_next);
@@ -1085,13 +1210,23 @@
dbg ("corrupt type %d frame %d shadow %p",
type, frame, q.ptr);
// BUG ();
- last = 1;
q.ptr = 0;
}
- /* did completion remove an interior q entry? */
- if (unlikely (q.ptr == 0 && !last))
+ limit--;
+ if (limit == 0) {
+ count--;
+ ehci_dbg (ehci, "periodic loop %d, c%d\n",
+ frame, clock);
+ break;
+ }
+
+ /* maybe this schedule branch changed */
+ if (unlikely (rescan)) {
+ ehci_vdbg (ehci, "periodic restart %d.%d\n",
+ frame, uframes);
goto restart;
+ }
}
/* stop when we catch up to the HC */
--- 1.21/drivers/usb/host/ehci.h Mon Jun 9 12:20:12 2003
+++ edited/drivers/usb/host/ehci.h Thu Oct 2 09:13:25 2003
@@ -325,6 +325,7 @@
struct ehci_itd *itd; /* Q_TYPE_ITD */
struct ehci_sitd *sitd; /* Q_TYPE_SITD */
struct ehci_fstn *fstn; /* Q_TYPE_FSTN */
+ u32 *hw_next; /* (all types) */
void *ptr;
};
@@ -377,7 +378,7 @@
unsigned short period; /* polling interval */
unsigned short start; /* where polling starts */
#define NO_FRAME ((unsigned short)~0) /* pick new start */
-
+ struct usb_device *dev; /* access to TT */
} __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/
@@ -469,26 +470,11 @@
/*-------------------------------------------------------------------------*/
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,32)
-
-#define SUBMIT_URB(urb,mem_flags) usb_submit_urb(urb)
-#define STUB_DEBUG_FILES
-
-static inline int hcd_register_root (struct usb_hcd *hcd)
-{
- return usb_new_device (hcd_to_bus (hcd)->root_hub);
-}
-
-#else /* LINUX_VERSION_CODE */
-
#define SUBMIT_URB(urb,mem_flags) usb_submit_urb(urb,mem_flags)
#ifndef DEBUG
#define STUB_DEBUG_FILES
#endif /* DEBUG */
-
-#endif /* LINUX_VERSION_CODE */
/*-------------------------------------------------------------------------*/
