Hi all. Finally got time to work on this...so, this patch is rather large, but it (almost) all needs to happen at once. So, I'll list essentially what all the changes are, not necessarily in order. I have tested control and interrupt queueing, but not bulk (no bulk devices on hand...), so if people could test out bulk (and isochronous, although that's not really changed) it would be great. Note that any drivers who previously expected the HCD to automatically resubmit interrupt URBs will not work anymore!!
-all int_td's changed to int_qh's. Number decreased from 9 to 8 (only up to 128ms instead of 256ms). This affected the uhci-hcd.h file and the uhci initialization code, as well as the interrupt submission/result code. -uhci-debug.c changed to match int_td change to int_qh. Also, added code to display contents of uhci lists (urb_list, urb_remove_list, and complete_list). Doubled buffer size. -changed QH handling for removing active URB with queue : before unlinked QH, now change previous endpoint's qh->link (including queued qhs) to next queued QH. -for places where a td->link was written, added check to make sure HC hadn't already wrote that td->link back to the td's qh->element. -moved uhci_find_urb_ep, no code change. -changed lowspeed control TDs from depth to breadth. -created common submit method for bulk and interrupt, with only minor differences. Note interrupt now has maxerr in TD set to 3, was 0 before. -remove reset_interrupt. -move bandwidth reservation into submit_interrupt and submit_isochronous. Reserve bandwidth at start of submit_iso, and release on error. Reserve at end of submit_common (for int only, not bulk). For interrupt, only reserve if the URB is the first (i.e. not queued). Only release if the URB is the last (i.e. no more queued). -For all (control, interrupt, bulk) if URB is queued, do not place directly onto skelqh - place onto active URB's queue. This is fine for control and bulk, but for interrupt a side effect is the interval is only checked for the first URB - if later URBs (on that same endpoint) have a different interval, it's ignored and the new URB stays on the orginal interval. this should be ok, since it doesn't really make sense to queue URBs with different intervals in the same endpoint/pipe. -fixed error case of enqueue_urb to remove the urbp from the urb_list. -remove interrupt resubmission. -fix bug of using different names for create and remove driverfs entry. -updated comments where appropriate. -added FIXME to uhci_delete_queued_urb to indicate killing an active URB may confuse the device, especially for control transfers, if some data has already beed transferred. I'm pretty sure this assumption is correct...? diff -ur usb-2.5/drivers/usb/host/uhci-debug.c linux/drivers/usb/host/uhci-debug.c --- usb-2.5/drivers/usb/host/uhci-debug.c Fri Oct 11 16:52:49 2002 +++ linux/drivers/usb/host/uhci-debug.c Fri Oct 11 15:10:34 2002 @@ -285,13 +285,13 @@ return out - buf; } -static const char *td_names[] = {"skel_int1_td", "skel_int2_td", - "skel_int4_td", "skel_int8_td", - "skel_int16_td", "skel_int32_td", - "skel_int64_td", "skel_int128_td", - "skel_int256_td", "skel_term_td" }; -static const char *qh_names[] = { "skel_ls_control_qh", "skel_hs_control_qh", - "skel_bulk_qh", "skel_term_qh" }; +static const char *qh_names[] = { + "skel_int128_qh", "skel_int64_qh", + "skel_int32_qh", "skel_int16_qh", + "skel_int8_qh", "skel_int4_qh", + "skel_int2_qh", "skel_int1_qh", + "skel_ls_control_qh", "skel_hs_control_qh", + "skel_bulk_qh", "skel_term_qh" }; #define show_frame_num() \ if (!shown) { \ @@ -299,25 +299,139 @@ out += sprintf(out, "- Frame %d\n", i); \ } -#define show_td_name() \ - if (!shown) { \ - shown = 1; \ - out += sprintf(out, "- %s\n", td_names[i]); \ - } - #define show_qh_name() \ if (!shown) { \ shown = 1; \ out += sprintf(out, "- %s\n", qh_names[i]); \ } +static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp, char *buf, +int len) +{ + struct list_head *tmp; + char *out = buf; + int count = 0; + + if (len < 200) + return 0; + + out += sprintf(out, "urb_priv [%p] ", urbp); + out += sprintf(out, "urb [%p] ", urbp->urb); + out += sprintf(out, "qh [%p] ", urbp->qh); + out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe)); + out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe), +(usb_pipein(urbp->urb->pipe) ? "IN" : "OUT")); + switch (usb_pipetype(urbp->urb->pipe)) { + case PIPE_ISOCHRONOUS: out += sprintf(out, "ISO "); break; + case PIPE_INTERRUPT: out += sprintf(out, "INT "); break; + case PIPE_BULK: out += sprintf(out, "BLK "); break; + case PIPE_CONTROL: out += sprintf(out, "CTL "); break; + } + out += sprintf(out, "%s", (urbp->fsbr ? "FSBR " : "")); + out += sprintf(out, "%s", (urbp->fsbr_timeout ? "FSBR_TO " : "")); + if (-EINPROGRESS != urbp->status) + out += sprintf(out, "Status=%d ", urbp->status); + //out += sprintf(out, "Inserttime=%lx ",urbp->inserttime); + //out += sprintf(out, "FSBRtime=%lx ",urbp->fsbrtime); + + spin_lock(&urbp->urb->lock); + count = 0; + list_for_each(tmp, &urbp->td_list) count++; + spin_unlock(&urbp->urb->lock); + out += sprintf(out, "TDs=%d ",count); + + if (urbp->queued) { + out += sprintf(out, "Is Q'd\n"); + } else { + spin_lock(&uhci->frame_list_lock); + count = 0; + list_for_each(tmp, &urbp->queue_list) count++; + spin_unlock(&uhci->frame_list_lock); + out += sprintf(out, "Q'd URBs=%d\n", count); + } + + return out - buf; +} + +static int uhci_show_lists(struct uhci_hcd *uhci, char *buf, int len) +{ + char *out = buf; + unsigned long flags; + struct list_head *head, *tmp; + int count; + + out += sprintf(out, "Main list URBs:"); + spin_lock_irqsave(&uhci->urb_list_lock, flags); + if (list_empty(&uhci->urb_list)) { + out += sprintf(out, " Empty\n"); + } else { + out += sprintf(out, "\n"); + count = 0; + head = &uhci->urb_list; + tmp = head->next; + while (tmp != head) { + struct urb_priv *urbp = list_entry(tmp, struct urb_priv, +urb_list); + out += sprintf(out, " %d: ", ++count); + out += uhci_show_urbp(uhci, urbp, out, len - (out - buf)); + tmp = tmp->next; + } + } + spin_unlock_irqrestore(&uhci->urb_list_lock, flags); + + out += sprintf(out, "Remove list URBs:"); + spin_lock_irqsave(&uhci->urb_remove_list_lock, flags); + if (list_empty(&uhci->urb_remove_list)) { + out += sprintf(out, " Empty\n"); + } else { + out += sprintf(out, "\n"); + count = 0; + head = &uhci->urb_remove_list; + tmp = head->next; + while (tmp != head) { + struct urb_priv *urbp = list_entry(tmp, struct urb_priv, +urb_list); + out += sprintf(out, " %d: ", ++count); + out += uhci_show_urbp(uhci, urbp, out, len - (out - buf)); + tmp = tmp->next; + } + } + spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags); + + out += sprintf(out, "Complete list URBs:"); + spin_lock_irqsave(&uhci->complete_list, flags); + if (list_empty(&uhci->complete_list)) { + out += sprintf(out, " Empty\n"); + } else { + out += sprintf(out, "\n"); + count = 0; + head = &uhci->complete_list; + tmp = head->next; + while (tmp != head) { + struct urb_priv *urbp = list_entry(tmp, struct urb_priv, +complete_list); + out += sprintf(out, " %d: ", ++count); + out += uhci_show_urbp(uhci, urbp, out, len - (out - buf)); + tmp = tmp->next; + } + } + spin_unlock_irqrestore(&uhci->complete_list, flags); + + return out - buf; +} + static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) { + unsigned long flags; char *out = buf; int i; struct uhci_qh *qh; struct uhci_td *td; struct list_head *tmp, *head; + struct uhci_qh *qhs[] = { + uhci->skel_int128_qh, uhci->skel_int64_qh, + uhci->skel_int32_qh, uhci->skel_int16_qh, + uhci->skel_int8_qh, uhci->skel_int4_qh, + uhci->skel_int2_qh, uhci->skel_int1_qh, + uhci->skel_ls_control_qh, uhci->skel_hs_control_qh, + uhci->skel_bulk_qh, uhci->skel_term_qh }; + + spin_lock_irqsave(&uhci->frame_list_lock, flags); out += sprintf(out, "HC status\n"); out += uhci_show_status(uhci, out, len - (out - buf)); @@ -346,96 +460,42 @@ } while (tmp != head); } - out += sprintf(out, "Skeleton TD's\n"); - for (i = UHCI_NUM_SKELTD - 1; i >= 0; i--) { - int shown = 0; - - td = uhci->skeltd[i]; - - if (debug > 1) { - show_td_name(); - out += uhci_show_td(td, out, len - (out - buf), 4); - } - - if (list_empty(&td->fl_list)) { - /* TD 0 is the int1 TD and links to control_ls_qh */ - if (!i) { - if (td->link != - (cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH)) { - show_td_name(); - out += sprintf(out, " skeleton TD not linked to ls_control QH!\n"); - } - } else if (i < 9) { - if (td->link != cpu_to_le32(uhci->skeltd[i - 1]->dma_handle)) { - show_td_name(); - out += sprintf(out, " skeleton TD not linked to next skeleton TD!\n"); - } - } else { - show_td_name(); - - if (td->link != cpu_to_le32(td->dma_handle)) - out += sprintf(out, " skel_term_td does not link to self\n"); - - /* Don't show it twice */ - if (debug <= 1) - out += uhci_show_td(td, out, len - (out - buf), 4); - } - - continue; - } - - show_td_name(); - - head = &td->fl_list; - tmp = head->next; - - while (tmp != head) { - td = list_entry(tmp, struct uhci_td, fl_list); - - tmp = tmp->next; - - out += uhci_show_td(td, out, len - (out - buf), 4); - } - - if (!i) { - if (td->link != - (cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH)) - out += sprintf(out, " last TD not linked to ls_control QH!\n"); - } else if (i < 9) { - if (td->link != cpu_to_le32(uhci->skeltd[i - 1]->dma_handle)) - out += sprintf(out, " last TD not linked to next skeleton!\n"); - } - } - out += sprintf(out, "Skeleton QH's\n"); - for (i = 0; i < UHCI_NUM_SKELQH; ++i) { int shown = 0; - qh = uhci->skelqh[i]; + qh = qhs[i]; if (debug > 1) { show_qh_name(); out += uhci_show_qh(qh, out, len - (out - buf), 4); } - /* QH 3 is the Terminating QH, it's different */ - if (i == 3) { + /* last QH is the Terminating QH, it's different */ + if (i == UHCI_NUM_SKELQH-1) { + show_qh_name(); if (qh->link != UHCI_PTR_TERM) { - show_qh_name(); - out += sprintf(out, " bandwidth reclamation on!\n"); + if (qh->link != +(cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH)) + out += sprintf(out, " bandwidth reclamation +link pointing to unknown destination!\n"); + else + out += sprintf(out, " bandwidth reclamation +on.\n"); } if (qh->element != cpu_to_le32(uhci->skel_term_td->dma_handle)) { show_qh_name(); - out += sprintf(out, " skel_term_qh element is not set to skel_term_td\n"); + out += sprintf(out, " skel_term_qh element is not +set to skel_term_td!\n"); + td = uhci->skel_term_td; + if (td->link != cpu_to_le32(td->dma_handle)) + out += sprintf(out, " skel_term_td does not +link to self\n"); } + + continue; } if (list_empty(&qh->list)) { - if (i < 3) { + if (i < UHCI_NUM_SKELQH-1) { if (qh->link != - (cpu_to_le32(uhci->skelqh[i + 1]->dma_handle) | UHCI_PTR_QH)) { + (cpu_to_le32(qhs[i + 1]->dma_handle) | +UHCI_PTR_QH)) { show_qh_name(); out += sprintf(out, " skeleton QH not linked to next skeleton QH!\n"); } @@ -457,18 +517,23 @@ out += uhci_show_qh(qh, out, len - (out - buf), 4); } - if (i < 3) { + if (i < UHCI_NUM_SKELQH-1) { if (qh->link != - (cpu_to_le32(uhci->skelqh[i + 1]->dma_handle) | UHCI_PTR_QH)) + (cpu_to_le32(qhs[i + 1]->dma_handle) | UHCI_PTR_QH)) out += sprintf(out, " last QH not linked to next skeleton!\n"); } } + spin_unlock_irqrestore(&uhci->frame_list_lock, flags); + + if (debug > 2) + out += uhci_show_lists(uhci, out, len - (out - buf)); + return out - buf; } #ifdef CONFIG_PROC_FS -#define MAX_OUTPUT (PAGE_SIZE * 8) +#define MAX_OUTPUT (PAGE_SIZE * 16) static struct proc_dir_entry *uhci_proc_root = NULL; @@ -483,7 +548,6 @@ const struct proc_dir_entry *dp = PDE(inode); struct uhci_hcd *uhci = dp->data; struct uhci_proc *up; - unsigned long flags; int ret = -ENOMEM; lock_kernel(); @@ -497,9 +561,7 @@ goto out; } - spin_lock_irqsave(&uhci->frame_list_lock, flags); up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT); - spin_unlock_irqrestore(&uhci->frame_list_lock, flags); file->private_data = up; diff -ur usb-2.5/drivers/usb/host/uhci-hcd.c linux/drivers/usb/host/uhci-hcd.c --- usb-2.5/drivers/usb/host/uhci-hcd.c Fri Oct 11 16:52:49 2002 +++ linux/drivers/usb/host/uhci-hcd.c Fri Oct 11 16:59:53 2002 @@ -174,30 +174,11 @@ td->buffer = cpu_to_le32(buffer); } -static void uhci_insert_td(struct uhci_hcd *uhci, struct uhci_td *skeltd, struct uhci_td *td) -{ - unsigned long flags; - struct uhci_td *ltd; - - spin_lock_irqsave(&uhci->frame_list_lock, flags); - - ltd = list_entry(skeltd->fl_list.prev, struct uhci_td, fl_list); - - td->link = ltd->link; - mb(); - ltd->link = cpu_to_le32(td->dma_handle); - - list_add_tail(&td->fl_list, &skeltd->fl_list); - - spin_unlock_irqrestore(&uhci->frame_list_lock, flags); -} - /* * We insert Isochronous transfers directly into the frame list at the * beginning * The layout looks as follows: - * frame list pointer -> iso td's (if any) -> - * periodic interrupt td (if frame 0) -> irq td's -> control qh -> bulk qh + * frame list pointer -> iso td's (if any) -> irq qh's -> ls control qh -> hs control +qh -> bulk qh */ static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum) { @@ -437,7 +418,11 @@ qh->urbp = NULL; + * Queued QHs are removed in uhci_delete_queued_urb, + * since (for queued URBs) the pqh is pointed to the next + * QH in the queue, not the next endpoint's QH. + */ spin_lock_irqsave(&uhci->frame_list_lock, flags); if (!list_empty(&qh->list)) { pqh = list_entry(qh->list.prev, struct uhci_qh, list); @@ -459,7 +444,7 @@ pqh->link = qh->link; mb(); - qh->element = qh->link = UHCI_PTR_TERM; + qh->element = UHCI_PTR_TERM; /* leave qh->link in case HC is on qh +now, it will continue the frame */ list_del_init(&qh->list); } @@ -502,8 +487,7 @@ } /* This function will append one URB's QH to another URB's QH. This is for */ -/* queuing bulk transfers and soon implicitily for */ -/* control transfers */ +/* queuing interrupt, control, or bulk transfers */ static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb) { struct urb_priv *eurbp, *urbp, *furbp, *lurbp; @@ -545,9 +529,16 @@ urbp->qh->link = eurbp->qh->link; mb(); /* Make sure we flush everything */ + lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; + /* If the HC wrote lltd->link (which used to point to TERM) back into +lurb->qh->element + * before we wrote lltd->link, fix lurb->qh->element + */ + mb(); + if (lurbp->qh->element == UHCI_PTR_TERM) + lurbp->qh->element = lltd->link; + list_add_tail(&urbp->queue_list, &furbp->queue_list); urbp->queued = 1; @@ -560,6 +551,7 @@ struct urb_priv *urbp, *nurbp; struct list_head *head, *tmp; struct urb_priv *purbp; + struct uhci_qh *pqh; struct uhci_td *pltd; unsigned int toggle; unsigned long flags; @@ -573,9 +565,10 @@ nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list); +//FIXME - can't abort (at least) control urb in the middle of execution, device will +be confused /* Fix up the toggle for the next URB's */ if (!urbp->queued) - /* We set the toggle when we unlink */ + /* We just set the toggle in uhci_unlink_generic */ toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)); else { /* If we're in the middle of the queue, grab the toggle */ @@ -609,7 +602,31 @@ if (!urbp->queued) { nurbp->queued = 0; - _uhci_insert_qh(uhci, uhci->skel_bulk_qh, nurbp->urb); + /* Point the previous endpoint's QH (and its queue) at the next QH in +our queue. + * This removes the QH from the frame here instead of in +uhci_remove_qh. + */ + pqh = list_entry(urbp->qh->list.prev, struct uhci_qh, list); + + if (pqh->urbp) { + struct list_head *head, *tmp; + + head = &pqh->urbp->queue_list; + tmp = head->next; + while (head != tmp) { + struct urb_priv *turbp = + list_entry(tmp, struct urb_priv, queue_list); + + tmp = tmp->next; + + turbp->qh->link = cpu_to_le32(nurbp->qh->dma_handle) | +UHCI_PTR_QH; + } + } + + pqh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; + + /* replace the current QH with the next queued QH in the QH list */ + list_add_tail(&nurbp->qh->list, &urbp->qh->list); + list_del_init(&urbp->qh->list); } else { /* We're somewhere in the middle (or end). A bit trickier */ /* than the head scenario */ @@ -623,6 +640,15 @@ /* The next URB happens to be the beginning, so */ /* we're the last, end the chain */ pltd->link = UHCI_PTR_TERM; + + pqh = purbp->qh; /* set to previous QH in our endpoint's queue */ + + /* If the HC wrote pltd->link (which used to point to our QH) back +into pqh->element + * before we wrote pltd->link, fix pqh->element + */ + mb(); + if (pqh->element == (cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH)) + pqh->element = pltd->link; } list_del_init(&urbp->queue_list); @@ -784,13 +810,50 @@ } /* + * MUST be called with uhci->urb_list_lock acquired + */ +static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb) +{ + struct list_head *tmp, *head; + + /* We don't match Isoc transfers since they are special */ + if (usb_pipeisoc(urb->pipe)) + return NULL; + + head = &uhci->urb_list; + tmp = head->next; + while (tmp != head) { + struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); + struct urb *u = up->urb; + + tmp = tmp->next; + + /* if we reach the specified urb, there is no previously submitted urb +that matches */ + if (u == urb) + return NULL; + + if (u->dev == urb->dev && u->status == -EINPROGRESS) { + /* For control, ignore the direction */ + if (usb_pipecontrol(urb->pipe) && + (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN)) + return u; + else if (u->pipe == urb->pipe) + return u; + } + } + + return NULL; +} + +/* * Control transfers */ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb) { struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; + struct urb *eurb; struct uhci_td *td; - struct uhci_qh *qh; + struct uhci_qh *qh, *skelqh; unsigned long destination, status; int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); int len = urb->transfer_buffer_length; @@ -880,19 +943,31 @@ urbp->qh = qh; qh->urbp = urbp; + uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); + /* Low speed transfers get a different queue, and won't hog the bus */ if (urb->dev->speed == USB_SPEED_LOW) { - uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_DEPTH); - uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb); + skelqh = uhci->skel_ls_control_qh; } else { - uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); - uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb); + skelqh = uhci->skel_hs_control_qh; uhci_inc_fsbr(uhci, urb); } + if ((eurb = uhci_find_urb_ep(uhci, urb))) + uhci_append_queued_urb(uhci, eurb, urb); + else + uhci_insert_qh(uhci, skelqh, urb); + return -EINPROGRESS; } +/* + * If control was short, then end status packet wasn't sent, so this reorganizes so +it's sent + * to finish the transfer. The original QH is removed from the skel and discarded; +all TDs + * except the last (status) are deleted; the last (status) TD is put on a new QH +which is reinserted + * into the skel. Since the last TD and urb_priv are reused, the TD->link and +urb_priv maintain any + * queued QHs. + */ static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb) { struct list_head *tmp, *head; @@ -1047,41 +1123,115 @@ } /* - * Interrupt transfers + * Common submit for bulk and interrupt */ -static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb) +static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct uhci_qh +*skelqh) { + struct urb *eurb; struct uhci_td *td; + struct uhci_qh *qh; unsigned long destination, status; + int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); + int len = urb->transfer_buffer_length; + struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; + dma_addr_t data = urb->transfer_dma; - if (urb->transfer_buffer_length > usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) + if (len < 0) return -EINVAL; /* The "pipe" thing contains the destination in bits 8--18 */ destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); - status = TD_CTRL_ACTIVE | TD_CTRL_IOC; + status = TD_CTRL_ACTIVE; + status |= uhci_maxerr(3); /* 3 errors */ if (urb->dev->speed == USB_SPEED_LOW) status |= TD_CTRL_LS; + if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) + status |= TD_CTRL_SPD; - td = uhci_alloc_td(uhci, urb->dev); - if (!td) + /* + * Build the DATA TD's + */ + do { /* Allow zero length packets */ + int pktsze = len; + + if (pktsze > maxsze) + pktsze = maxsze; + + td = uhci_alloc_td(uhci, urb->dev); + if (!td) + return -ENOMEM; + + uhci_add_td_to_urb(urb, td); + uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) | + (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), + data); + + data += pktsze; + len -= maxsze; + + usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe)); + } while (len > 0); + + /* + * USB_ZERO_PACKET means adding a 0-length packet, if + * direction is OUT and the transfer_length was an + * exact multiple of maxsze, hence + * (len = transfer_length - N * maxsze) == 0 + * however, if transfer_length == 0, the zero packet + * was already prepared above. + */ + if (usb_pipeout(urb->pipe) && (urb->transfer_flags & USB_ZERO_PACKET) && + !len && urb->transfer_buffer_length) { + td = uhci_alloc_td(uhci, urb->dev); + if (!td) + return -ENOMEM; + + uhci_add_td_to_urb(urb, td); + uhci_fill_td(td, status, destination | +uhci_explen(UHCI_NULL_DATA_SIZE) | + (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), + data); + + usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe)); + } + + /* Set the flag on the last packet */ + td->status |= cpu_to_le32(TD_CTRL_IOC); + + qh = uhci_alloc_qh(uhci, urb->dev); + if (!qh) return -ENOMEM; - destination |= (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT); - destination |= uhci_explen(urb->transfer_buffer_length - 1); + urbp->qh = qh; + qh->urbp = urbp; - usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)); + /* Always breadth first */ + uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, destination, urb->transfer_dma); + if ((eurb = uhci_find_urb_ep(uhci, urb))) { + uhci_append_queued_urb(uhci, eurb, urb); + } else { + if (usb_pipeint(urb->pipe) && !urb->bandwidth) { /* Interrupt URB that +needs to reserve BW */ + int bustime = usb_check_bandwidth(urb->dev, urb); + if (0 > bustime) + return bustime; - uhci_insert_td(uhci, uhci->skeltd[__interval_to_skel(urb->interval)], td); + usb_claim_bandwidth(urb->dev, urb, bustime, 0); + } + uhci_insert_qh(uhci, skelqh, urb); + } return -EINPROGRESS; } -static int uhci_result_interrupt(struct uhci_hcd *uhci, struct urb *urb) +/* + * Common result for bulk and interrupt + */ +static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) { struct list_head *tmp, *head; struct urb_priv *urbp = urb->hcpriv; @@ -1128,15 +1278,12 @@ err: if ((debug == 1 && ret != -EPIPE) || debug > 1) { /* Some debugging code */ - dbg("uhci_result_interrupt/bulk() failed with status %x", + dbg("uhci_result_common() failed with status %x", status); if (errbuf) { /* Print the chain for debugging purposes */ - if (urbp->qh) - uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); - else - uhci_show_td(td, errbuf, ERRBUF_LEN, 0); + uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); lprintk(errbuf); } @@ -1145,129 +1292,42 @@ return ret; } -static void uhci_reset_interrupt(struct uhci_hcd *uhci, struct urb *urb) -{ - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - struct uhci_td *td; - unsigned long flags; - - spin_lock_irqsave(&urb->lock, flags); - - td = list_entry(urbp->td_list.next, struct uhci_td, list); - - td->status = (td->status & cpu_to_le32(0x2F000000)) | cpu_to_le32(TD_CTRL_ACTIVE | TD_CTRL_IOC); - td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE); - td->token |= cpu_to_le32(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT); - usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)); - - urb->status = -EINPROGRESS; - - spin_unlock_irqrestore(&urb->lock, flags); -} - /* - * Bulk transfers + * Submit for bulk */ -static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) +static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb) { - struct uhci_td *td; - struct uhci_qh *qh; - unsigned long destination, status; - int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); - int len = urb->transfer_buffer_length; - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - dma_addr_t data = urb->transfer_dma; - - if (len < 0) - return -EINVAL; + int ret; /* Can't have low speed bulk transfers */ if (urb->dev->speed == USB_SPEED_LOW) return -EINVAL; - /* The "pipe" thing contains the destination in bits 8--18 */ - destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); - - /* 3 errors */ - status = TD_CTRL_ACTIVE | uhci_maxerr(3); - if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) - status |= TD_CTRL_SPD; - - /* - * Build the DATA TD's - */ - do { /* Allow zero length packets */ - int pktsze = len; - - if (pktsze > maxsze) - pktsze = maxsze; - - td = uhci_alloc_td(uhci, urb->dev); - if (!td) - return -ENOMEM; - - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) | - (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), - data); - - data += pktsze; - len -= maxsze; - - usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe)); - } while (len > 0); - - /* - * USB_ZERO_PACKET means adding a 0-length packet, if - * direction is OUT and the transfer_length was an - * exact multiple of maxsze, hence - * (len = transfer_length - N * maxsze) == 0 - * however, if transfer_length == 0, the zero packet - * was already prepared above. - */ - if (usb_pipeout(urb->pipe) && (urb->transfer_flags & USB_ZERO_PACKET) && - !len && urb->transfer_buffer_length) { - td = uhci_alloc_td(uhci, urb->dev); - if (!td) - return -ENOMEM; - - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) | - (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), - data); - - usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe)); - } + ret = uhci_submit_common(uhci, urb, uhci->skel_bulk_qh); - /* Set the flag on the last packet */ - td->status |= cpu_to_le32(TD_CTRL_IOC); - - qh = uhci_alloc_qh(uhci, urb->dev); - if (!qh) - return -ENOMEM; - - urbp->qh = qh; - qh->urbp = urbp; - - /* Always breadth first */ - uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); + if (-EINPROGRESS == ret) + uhci_inc_fsbr(uhci, urb); - if (eurb) - uhci_append_queued_urb(uhci, eurb, urb); - else - uhci_insert_qh(uhci, uhci->skel_bulk_qh, urb); + return ret; +} - uhci_inc_fsbr(uhci, urb); +/* + * Submit for interrupt + */ +static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb) +{ + /* Interrupt-IN can't be more than 1 packet */ + if (usb_pipein(urb->pipe) && urb->transfer_buffer_length > +usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) + return -EINVAL; - return -EINPROGRESS; + return uhci_submit_common(uhci, urb, +uhci->skelqh[__interval_to_skel(urb->interval)]); } -/* We can use the result interrupt since they're identical */ -#define uhci_result_bulk uhci_result_interrupt +/* + * Bulk and interrupt use common result + */ +#define uhci_result_bulk uhci_result_common +#define uhci_result_interrupt uhci_result_common /* * Isochronous transfers @@ -1336,15 +1396,23 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb) { struct uhci_td *td; - int i, ret, frame; + int i, ret, frame, bustime; int status, destination; + if (!urb->bandwidth) { /* not yet checked/allocated */ + bustime = usb_check_bandwidth(urb->dev, urb); + if (bustime < 0) { + ret = bustime; + goto iso_err; + } + usb_claim_bandwidth(urb->dev, urb, bustime, 1); + } + status = TD_CTRL_ACTIVE | TD_CTRL_IOS; destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); - ret = isochronous_find_start(uhci, urb); - if (ret) - return ret; + if ((ret = isochronous_find_start(uhci, urb))) + goto iso_err; frame = urb->start_frame; for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) { @@ -1352,8 +1420,10 @@ continue; td = uhci_alloc_td(uhci, urb->dev); - if (!td) - return -ENOMEM; + if (!td) { + ret = -ENOMEM; + goto iso_err; + } uhci_add_td_to_urb(urb, td); uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1), @@ -1366,6 +1436,12 @@ } return -EINPROGRESS; + +iso_err: + if (urb->bandwidth) + usb_release_bandwidth(urb->dev, urb, 1); + + return ret; } static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) @@ -1406,50 +1482,14 @@ return ret; } -/* - * MUST be called with uhci->urb_list_lock acquired - */ -static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb) -{ - struct list_head *tmp, *head; - - /* We don't match Isoc transfers since they are special */ - if (usb_pipeisoc(urb->pipe)) - return NULL; - - head = &uhci->urb_list; - tmp = head->next; - while (tmp != head) { - struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list); - struct urb *u = up->urb; - - tmp = tmp->next; - - if (u->dev == urb->dev && u->status == -EINPROGRESS) { - /* For control, ignore the direction */ - if (usb_pipecontrol(urb->pipe) && - (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN)) - return u; - else if (u->pipe == urb->pipe) - return u; - } - } - - return NULL; -} - static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags) { int ret = -EINVAL; struct uhci_hcd *uhci = hcd_to_uhci(hcd); unsigned long flags; - struct urb *eurb; - int bustime; spin_lock_irqsave(&uhci->urb_list_lock, flags); - eurb = uhci_find_urb_ep(uhci, urb); - if (!uhci_alloc_urb_priv(uhci, urb)) { spin_unlock_irqrestore(&uhci->urb_list_lock, flags); return -ENOMEM; @@ -1457,51 +1497,28 @@ switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: - if (eurb) - ret = -ENXIO; /* no control queueing yet */ - else - ret = uhci_submit_control(uhci, urb); - break; - case PIPE_INTERRUPT: - if (eurb) - ret = -ENXIO; /* no interrupt queueing yet */ - else if (urb->bandwidth == 0) { /* not yet checked/allocated */ - bustime = usb_check_bandwidth(urb->dev, urb); - if (bustime < 0) - ret = bustime; - else { - ret = uhci_submit_interrupt(uhci, urb); - if (ret == -EINPROGRESS) - usb_claim_bandwidth(urb->dev, urb, bustime, 0); - } - } else /* bandwidth is already set */ - ret = uhci_submit_interrupt(uhci, urb); + ret = uhci_submit_control(uhci, urb); break; case PIPE_BULK: - ret = uhci_submit_bulk(uhci, urb, eurb); + ret = uhci_submit_bulk(uhci, urb); break; - case PIPE_ISOCHRONOUS: - if (urb->bandwidth == 0) { /* not yet checked/allocated */ - bustime = usb_check_bandwidth(urb->dev, urb); - if (bustime < 0) { - ret = bustime; - break; - } - - ret = uhci_submit_isochronous(uhci, urb); - if (ret == -EINPROGRESS) - usb_claim_bandwidth(urb->dev, urb, bustime, 1); - } else /* bandwidth is already set */ - ret = uhci_submit_isochronous(uhci, urb); + case PIPE_ISOCHRONOUS: /* BW reserved in uhci_submit_isochronous */ + ret = uhci_submit_isochronous(uhci, urb); + break; + case PIPE_INTERRUPT: /* BW reserved in uhci_submit_interrupt */ + ret = uhci_submit_interrupt(uhci, urb); break; } - spin_unlock_irqrestore(&uhci->urb_list_lock, flags); - if (ret != -EINPROGRESS) { + struct urb_priv *urbp = urb->hcpriv; + list_del_init(&urbp->urb_list); + spin_unlock_irqrestore(&uhci->urb_list_lock, flags); uhci_destroy_urb_priv (uhci, urb); return ret; } + + spin_unlock_irqrestore(&uhci->urb_list_lock, flags); return 0; } @@ -1512,7 +1529,7 @@ */ static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) { - int ret = -EINVAL; + int ret = -EINVAL, isoc = 1, release = 1; unsigned long flags; struct urb_priv *urbp; @@ -1546,24 +1563,22 @@ goto out; switch (usb_pipetype(urb->pipe)) { - case PIPE_CONTROL: - case PIPE_BULK: - case PIPE_ISOCHRONOUS: - /* Release bandwidth for Interrupt or Isoc. transfers */ - /* Spinlock needed ? */ - if (urb->bandwidth) - usb_release_bandwidth(urb->dev, urb, 1); - uhci_unlink_generic(uhci, urb); - break; case PIPE_INTERRUPT: - /* Interrupts are an exception */ - if (urb->interval) - goto out_complete; - + isoc = 0; + spin_lock(&uhci->frame_list_lock); + if (!list_empty(&urbp->queue_list)) /* don't release BW if there are +more queued */ + release = 0; + spin_unlock(&uhci->frame_list_lock); + /* fall through */ + case PIPE_ISOCHRONOUS: /* Release bandwidth for Interrupt or Isoc. transfers */ /* Spinlock needed ? */ - if (urb->bandwidth) - usb_release_bandwidth(urb->dev, urb, 0); + if (release && urb->bandwidth) + usb_release_bandwidth(urb->dev, urb, isoc); + /* fall through */ + case PIPE_CONTROL: + /* fall through */ + case PIPE_BULK: uhci_unlink_generic(uhci, urb); break; default: @@ -1574,7 +1589,6 @@ /* Remove it from uhci->urb_list */ list_del_init(&urbp->urb_list); -out_complete: uhci_add_complete(uhci, urb); out: @@ -1803,40 +1817,23 @@ static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb) { struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - struct usb_device *dev = urb->dev; struct uhci_hcd *uhci = hcd_to_uhci(hcd); - int killed, resubmit_interrupt, status; + int killed; unsigned long flags; spin_lock_irqsave(&urb->lock, flags); killed = (urb->status == -ENOENT || urb->status == -ECONNRESET); - resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT && - urb->interval); - - status = urbp->status; - if (!resubmit_interrupt || killed) - /* We don't need urb_priv anymore */ - uhci_destroy_urb_priv(uhci, urb); if (!killed) - urb->status = status; - spin_unlock_irqrestore(&urb->lock, flags); + urb->status = urbp->status; - if (resubmit_interrupt) - urb->complete(urb); - else - usb_hcd_giveback_urb(hcd, urb); + /* We don't need urb_priv anymore */ + uhci_destroy_urb_priv(uhci, urb); - if (resubmit_interrupt) - /* Recheck the status. The completion handler may have */ - /* unlinked the resubmitting interrupt URB */ - killed = (urb->status == -ENOENT || urb->status == -ECONNRESET); - - if (resubmit_interrupt && !killed) { - urb->dev = dev; - uhci_reset_interrupt(uhci, urb); - } + spin_unlock_irqrestore(&urb->lock, flags); + + usb_hcd_giveback_urb(hcd, urb); } static void uhci_finish_completion(struct usb_hcd *hcd) @@ -2193,56 +2190,37 @@ goto err_alloc_root_hub; } - uhci->skeltd[0] = uhci_alloc_td(uhci, udev); - if (!uhci->skeltd[0]) { - err("unable to allocate TD 0"); - goto err_alloc_skeltd; - } - - /* - * 9 Interrupt queues; link int2 to int1, int4 to int2, etc - * then link int1 to control and control to bulk - */ - for (i = 1; i < 9; i++) { - struct uhci_td *td; - - td = uhci->skeltd[i] = uhci_alloc_td(uhci, udev); - if (!td) { + for (i = 0; i < UHCI_NUM_SKELTD; i++) { + uhci->skeltd[i] = uhci_alloc_td(uhci, udev); + if (!uhci->skeltd[i]) { err("unable to allocate TD %d", i); - goto err_alloc_skeltd; + goto err_alloc_skel; } - - uhci_fill_td(td, 0, uhci_explen(UHCI_NULL_DATA_SIZE) | - (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); - td->link = cpu_to_le32(uhci->skeltd[i - 1]->dma_handle); - } - - uhci->skel_term_td = uhci_alloc_td(uhci, udev); - if (!uhci->skel_term_td) { - err("unable to allocate skel TD term"); - goto err_alloc_skeltd; } for (i = 0; i < UHCI_NUM_SKELQH; i++) { uhci->skelqh[i] = uhci_alloc_qh(uhci, udev); if (!uhci->skelqh[i]) { err("unable to allocate QH %d", i); - goto err_alloc_skelqh; + goto err_alloc_skel; } } - uhci_fill_td(uhci->skel_int1_td, 0, (UHCI_NULL_DATA_SIZE << 21) | - (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); - uhci->skel_int1_td->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH; - + /* + * 8 Interrupt queues; link int2 to int1, int4 to int2, etc + * then link int1 to control and control to bulk + */ + uhci->skel_int128_qh->link = cpu_to_le32(uhci->skel_int64_qh->dma_handle) | +UHCI_PTR_QH; + uhci->skel_int64_qh->link = cpu_to_le32(uhci->skel_int32_qh->dma_handle) | +UHCI_PTR_QH; + uhci->skel_int32_qh->link = cpu_to_le32(uhci->skel_int16_qh->dma_handle) | +UHCI_PTR_QH; + uhci->skel_int16_qh->link = cpu_to_le32(uhci->skel_int8_qh->dma_handle) | +UHCI_PTR_QH; + uhci->skel_int8_qh->link = cpu_to_le32(uhci->skel_int4_qh->dma_handle) | +UHCI_PTR_QH; + uhci->skel_int4_qh->link = cpu_to_le32(uhci->skel_int2_qh->dma_handle) | +UHCI_PTR_QH; + uhci->skel_int2_qh->link = cpu_to_le32(uhci->skel_int1_qh->dma_handle) | +UHCI_PTR_QH; + uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | +UHCI_PTR_QH; uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH; - uhci->skel_ls_control_qh->element = UHCI_PTR_TERM; - uhci->skel_hs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH; - uhci->skel_hs_control_qh->element = UHCI_PTR_TERM; - uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH; - uhci->skel_bulk_qh->element = UHCI_PTR_TERM; /* This dummy TD is to work around a bug in Intel PIIX controllers */ uhci_fill_td(uhci->skel_term_td, 0, (UHCI_NULL_DATA_SIZE << 21) | @@ -2285,7 +2263,7 @@ } - uhci->fl->frame[i] = cpu_to_le32(uhci->skeltd[irq]->dma_handle); + uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[irq]->dma_handle); } start_hc(uhci); @@ -2314,20 +2292,19 @@ del_timer_sync(&uhci->stall_timer); +err_alloc_skel: for (i = 0; i < UHCI_NUM_SKELQH; i++) if (uhci->skelqh[i]) { uhci_free_qh(uhci, uhci->skelqh[i]); uhci->skelqh[i] = NULL; } -err_alloc_skelqh: for (i = 0; i < UHCI_NUM_SKELTD; i++) if (uhci->skeltd[i]) { uhci_free_td(uhci, uhci->skeltd[i]); uhci->skeltd[i] = NULL; } -err_alloc_skeltd: usb_free_dev(udev); hcd->self.root_hub = NULL; @@ -2491,7 +2468,8 @@ } #ifdef CONFIG_PROC_FS - uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0); +#define UHCI_PROC_ENTRY_NAME "driver/uhci" + uhci_proc_root = create_proc_entry(UHCI_PROC_ENTRY_NAME, S_IFDIR, 0); if (!uhci_proc_root) goto proc_failed; #endif @@ -2514,7 +2492,7 @@ up_failed: #ifdef CONFIG_PROC_FS - remove_proc_entry("uhci", 0); + remove_proc_entry(UHCI_PROC_ENTRY_NAME, 0); proc_failed: #endif @@ -2534,7 +2512,7 @@ printk(KERN_INFO "uhci: not all urb_priv's were freed\n"); #ifdef CONFIG_PROC_FS - remove_proc_entry("uhci", 0); + remove_proc_entry(UHCI_PROC_ENTRY_NAME, 0); #endif if (errbuf) diff -ur usb-2.5/drivers/usb/host/uhci-hcd.h linux/drivers/usb/host/uhci-hcd.h --- usb-2.5/drivers/usb/host/uhci-hcd.h Fri Oct 11 16:52:49 2002 +++ linux/drivers/usb/host/uhci-hcd.h Wed Oct 9 12:24:07 2002 @@ -252,23 +252,22 @@ * is stored in those TDs, along with all other endpoint state. */ -#define UHCI_NUM_SKELTD 10 -#define skel_int1_td skeltd[0] -#define skel_int2_td skeltd[1] -#define skel_int4_td skeltd[2] -#define skel_int8_td skeltd[3] -#define skel_int16_td skeltd[4] -#define skel_int32_td skeltd[5] -#define skel_int64_td skeltd[6] -#define skel_int128_td skeltd[7] -#define skel_int256_td skeltd[8] -#define skel_term_td skeltd[9] /* To work around PIIX UHCI bug */ +#define UHCI_NUM_SKELTD 1 +#define skel_term_td skeltd[0] /* To work around PIIX UHCI bug */ -#define UHCI_NUM_SKELQH 4 -#define skel_ls_control_qh skelqh[0] -#define skel_hs_control_qh skelqh[1] -#define skel_bulk_qh skelqh[2] -#define skel_term_qh skelqh[3] +#define UHCI_NUM_SKELQH 12 +#define skel_int1_qh skelqh[0] +#define skel_int2_qh skelqh[1] +#define skel_int4_qh skelqh[2] +#define skel_int8_qh skelqh[3] +#define skel_int16_qh skelqh[4] +#define skel_int32_qh skelqh[5] +#define skel_int64_qh skelqh[6] +#define skel_int128_qh skelqh[7] +#define skel_ls_control_qh skelqh[8] +#define skel_hs_control_qh skelqh[9] +#define skel_bulk_qh skelqh[10] +#define skel_term_qh skelqh[11] /* * Search tree for determining where <interval> fits in the @@ -280,11 +279,6 @@ * * For a given <interval>, this function returns the appropriate/matching * skelqh[] index value. - * - * NOTE: For UHCI, we don't really need int256_qh since the maximum interval - * is 255 ms. However, we do need an int1_qh since 1 is a valid interval - * and we should meet that frequency when requested to do so. - * This will require some change(s) to the UHCI skeleton. */ static inline int __interval_to_skel(int interval) { ------------------------------------------------------- This sf.net email is sponsored by:ThinkGeek Welcome to geek heaven. http://thinkgeek.com/sf _______________________________________________ [EMAIL PROTECTED] To unsubscribe, use the last form field at: https://lists.sourceforge.net/lists/listinfo/linux-usb-devel