2 * ADM5120 HCD (Host Controller Driver) for USB
4 * Copyright (C) 2007,2008 Gabor Juhos <juhosg at openwrt.org>
6 * This file was derived from: drivers/usb/host/ohci-q.c
7 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
8 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
16 #include <linux/irq.h>
18 /*-------------------------------------------------------------------------*/
21 * URB goes back to driver, and isn't reissued.
22 * It's completely gone from HC data structures.
23 * PRECONDITION: ahcd lock held, irqs blocked.
26 finish_urb(struct admhcd *ahcd, struct urb *urb)
27 __releases(ahcd->lock)
28 __acquires(ahcd->lock)
30 urb_priv_free(ahcd, urb->hcpriv);
33 spin_lock(&urb->lock);
34 if (likely(urb->status == -EINPROGRESS))
37 /* report short control reads right even though the data TD always
38 * has TD_R set. (much simpler, but creates the 1-td limit.)
40 if (unlikely(urb->transfer_flags & URB_SHORT_NOT_OK)
41 && unlikely(usb_pipecontrol(urb->pipe))
42 && urb->actual_length < urb->transfer_buffer_length
43 && usb_pipein(urb->pipe)
44 && urb->status == 0) {
45 urb->status = -EREMOTEIO;
46 #ifdef ADMHC_VERBOSE_DEBUG
47 urb_print(ahcd, urb, "SHORT", usb_pipeout(urb->pipe));
50 spin_unlock(&urb->lock);
52 switch (usb_pipetype(urb->pipe)) {
53 case PIPE_ISOCHRONOUS:
54 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--;
57 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--;
61 #ifdef ADMHC_VERBOSE_DEBUG
62 urb_print(ahcd, urb, "RET", usb_pipeout (urb->pipe));
65 /* urb->complete() can reenter this HCD */
66 spin_unlock(&ahcd->lock);
67 usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb);
68 spin_lock(&ahcd->lock);
72 /*-------------------------------------------------------------------------*
73 * ED handling functions
74 *-------------------------------------------------------------------------*/
77 /* search for the right schedule branch to use for a periodic ed.
78 * does some load balancing; returns the branch, or negative errno.
80 static int balance(struct admhcd *ahcd, int interval, int load)
82 int i, branch = -ENOSPC;
84 /* iso periods can be huge; iso tds specify frame numbers */
85 if (interval > NUM_INTS)
88 /* search for the least loaded schedule branch of that period
89 * that has enough bandwidth left unreserved.
91 for (i = 0; i < interval ; i++) {
92 if (branch < 0 || ahcd->load [branch] > ahcd->load [i]) {
95 /* usb 1.1 says 90% of one frame */
96 for (j = i; j < NUM_INTS; j += interval) {
97 if ((ahcd->load [j] + load) > 900)
109 /*-------------------------------------------------------------------------*/
112 /* both iso and interrupt requests have periods; this routine puts them
113 * into the schedule tree in the apppropriate place. most iso devices use
114 * 1msec periods, but that's not required.
116 static void periodic_link (struct admhcd *ahcd, struct ed *ed)
120 admhc_vdbg (ahcd, "link %sed %p branch %d [%dus.], interval %d\n",
121 (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
122 ed, ed->branch, ed->load, ed->interval);
124 for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
125 struct ed **prev = &ahcd->periodic [i];
126 __hc32 *prev_p = &ahcd->hcca->int_table [i];
127 struct ed *here = *prev;
129 /* sorting each branch by period (slow before fast)
130 * lets us share the faster parts of the tree.
131 * (plus maybe: put interrupt eds before iso)
133 while (here && ed != here) {
134 if (ed->interval > here->interval)
136 prev = &here->ed_next;
137 prev_p = &here->hwNextED;
143 ed->hwNextED = *prev_p;
146 *prev_p = cpu_to_hc32(ahcd, ed->dma);
149 ahcd->load [i] += ed->load;
151 admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval;
155 /* link an ed into the HC chain */
157 static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
161 if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
166 old_tail = ahcd->ed_tails[ed->type];
168 ed->ed_next = old_tail->ed_next;
170 ed->ed_next->ed_prev = ed;
171 ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
173 ed->ed_prev = old_tail;
175 old_tail->ed_next = ed;
176 old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
178 ahcd->ed_tails[ed->type] = ed;
180 admhc_dma_enable(ahcd);
185 /*-------------------------------------------------------------------------*/
188 /* scan the periodic table to find and unlink this ED */
189 static void periodic_unlink (struct admhcd *ahcd, struct ed *ed)
193 for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
195 struct ed **prev = &ahcd->periodic [i];
196 __hc32 *prev_p = &ahcd->hcca->int_table [i];
198 while (*prev && (temp = *prev) != ed) {
199 prev_p = &temp->hwNextED;
200 prev = &temp->ed_next;
203 *prev_p = ed->hwNextED;
206 ahcd->load [i] -= ed->load;
209 admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval;
210 admhc_vdbg (ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n",
211 (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
212 ed, ed->branch, ed->load, ed->interval);
216 /* unlink an ed from the HC chain.
217 * just the link to the ed is unlinked.
218 * the link from the ed still points to another operational ed or 0
219 * so the HC can eventually finish the processing of the unlinked ed
220 * (assuming it already started that, which needn't be true).
222 * ED_UNLINK is a transient state: the HC may still see this ED, but soon
223 * it won't. ED_SKIP means the HC will finish its current transaction,
224 * but won't start anything new. The TD queue may still grow; device
225 * drivers don't know about this HCD-internal state.
227 * When the HC can't see the ED, something changes ED_UNLINK to one of:
229 * - ED_OPER: when there's any request queued, the ED gets rescheduled
230 * immediately. HC should be working on them.
232 * - ED_IDLE: when there's no TD queue. there's no reason for the HC
233 * to care about this ED; safe to disable the endpoint.
235 * When finish_unlinks() runs later, after SOF interrupt, it will often
236 * complete one or more URB unlinks before making that state change.
238 static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
241 #ifdef ADMHC_VERBOSE_DEBUG
242 admhc_dump_ed(ahcd, "ED-DESCHED", ed, 1);
245 ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
247 ed->state = ED_UNLINK;
249 /* remove this ED from the HC list */
250 ed->ed_prev->hwNextED = ed->hwNextED;
252 /* and remove it from our list also */
253 ed->ed_prev->ed_next = ed->ed_next;
256 ed->ed_next->ed_prev = ed->ed_prev;
258 if (ahcd->ed_tails[ed->type] == ed)
259 ahcd->ed_tails[ed->type] = ed->ed_prev;
262 /*-------------------------------------------------------------------------*/
264 static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
269 ed = ed_alloc(ahcd, GFP_ATOMIC);
273 /* dummy td; end of td list for this ed */
274 td = td_alloc(ahcd, GFP_ATOMIC);
282 case PIPE_ISOCHRONOUS:
291 ed->hwINFO = cpu_to_hc32(ahcd, info);
292 ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
293 ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
303 /* get and maybe (re)init an endpoint. init _should_ be done only as part
304 * of enumeration, usb_set_configuration() or usb_set_interface().
306 static struct ed *ed_get(struct admhcd *ahcd, struct usb_host_endpoint *ep,
307 struct usb_device *udev, unsigned int pipe, int interval)
312 spin_lock_irqsave(&ahcd->lock, flags);
318 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
319 * suceeds ... otherwise we wouldn't need "pipe".
321 info = usb_pipedevice(pipe);
322 info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT;
323 info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT;
324 if (udev->speed == USB_SPEED_FULL)
325 info |= ED_SPEED_FULL;
327 ed = ed_create(ahcd, usb_pipetype(pipe), info);
332 spin_unlock_irqrestore(&ahcd->lock, flags);
337 /*-------------------------------------------------------------------------*/
339 /* request unlinking of an endpoint from an operational HC.
340 * put the ep on the rm_list
341 * real work is done at the next start frame (SOFI) hardware interrupt
342 * caller guarantees HCD is running, so hardware access is safe,
343 * and that ed->state is ED_OPER
345 static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed)
348 #ifdef ADMHC_VERBOSE_DEBUG
349 admhc_dump_ed(ahcd, "ED-UNLINK", ed, 1);
352 ed->hwINFO |= cpu_to_hc32(ahcd, ED_DEQUEUE);
353 ed_deschedule(ahcd, ed);
355 /* add this ED into the remove list */
356 ed->ed_rm_next = ahcd->ed_rm_list;
357 ahcd->ed_rm_list = ed;
359 /* enable SOF interrupt */
360 admhc_intr_ack(ahcd, ADMHC_INTR_SOFI);
361 admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
362 /* flush those writes */
363 admhc_writel_flush(ahcd);
365 /* SOF interrupt might get delayed; record the frame counter value that
366 * indicates when the HC isn't looking at it, so concurrent unlinks
367 * behave. frame_no wraps every 2^16 msec, and changes right before
370 ed->tick = admhc_frame_no(ahcd) + 1;
373 /*-------------------------------------------------------------------------*
374 * TD handling functions
375 *-------------------------------------------------------------------------*/
377 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
380 td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
381 struct urb *urb, int index)
383 struct td *td, *td_pt;
384 struct urb_priv *urb_priv = urb->hcpriv;
389 if (index == (urb_priv->td_cnt - 1) &&
390 ((urb->transfer_flags & URB_NO_INTERRUPT) == 0))
393 if (index == (urb_priv->td_cnt - 1))
397 /* use this td as the next dummy */
398 td_pt = urb_priv->td[index];
400 /* fill the old dummy TD */
401 td = urb_priv->td[index] = urb_priv->ed->dummy;
402 urb_priv->ed->dummy = td_pt;
404 td->ed = urb_priv->ed;
405 td->next_dl_td = NULL;
413 cbl |= (len & TD_BL_MASK);
417 /* setup hardware specific fields */
418 td->hwINFO = cpu_to_hc32(ahcd, info);
419 td->hwDBP = cpu_to_hc32(ahcd, data);
420 td->hwCBL = cpu_to_hc32(ahcd, cbl);
421 td->hwNextTD = cpu_to_hc32(ahcd, td_pt->td_dma);
423 /* append to queue */
424 list_add_tail(&td->td_list, &td->ed->td_list);
426 /* hash it for later reverse mapping */
427 hash = TD_HASH_FUNC(td->td_dma);
428 td->td_hash = ahcd->td_hash[hash];
429 ahcd->td_hash[hash] = td;
431 /* HC might read the TD (or cachelines) right away ... */
433 td->ed->hwTailP = td->hwNextTD;
436 /*-------------------------------------------------------------------------*/
438 /* Prepare all TDs of a transfer, and queue them onto the ED.
439 * Caller guarantees HC is active.
440 * Usually the ED is already on the schedule, so TDs might be
441 * processed as soon as they're queued.
443 static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
445 struct urb_priv *urb_priv = urb->hcpriv;
447 int data_len = urb->transfer_buffer_length;
450 int is_out = usb_pipeout(urb->pipe);
453 /* OHCI handles the bulk/interrupt data toggles itself. We just
454 * use the device toggle bits for resetting, and rely on the fact
455 * that resetting toggle is meaningless if the endpoint is active.
458 if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) {
462 usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe),
466 urb_priv->td_idx = 0;
467 list_add(&urb_priv->pending, &ahcd->pending);
470 data = urb->transfer_dma;
474 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
475 * using TD_CC_GET, as well as by seeing them on the done list.
476 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
478 switch (urb_priv->ed->type) {
481 ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
482 : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
484 /* setup service interval and starting frame number */
485 info |= (urb->start_frame & TD_FN_MASK);
486 info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
488 td_fill(ahcd, info, data, data_len, urb, cnt);
491 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
496 ? TD_SCC_NOTACCESSED | TD_DP_OUT
497 : TD_SCC_NOTACCESSED | TD_DP_IN;
499 /* TDs _could_ transfer up to 8K each */
500 while (data_len > TD_DATALEN_MAX) {
501 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
502 data, TD_DATALEN_MAX, urb, cnt);
503 data += TD_DATALEN_MAX;
504 data_len -= TD_DATALEN_MAX;
508 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
512 if ((urb->transfer_flags & URB_ZERO_PACKET)
513 && (cnt < urb_priv->td_cnt)) {
514 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
520 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
521 * any DATA phase works normally, and the STATUS ack is special.
524 /* fill a TD for the setup */
525 info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
526 td_fill(ahcd, info, urb->setup_dma, 8, urb, cnt++);
529 /* fill a TD for the data */
530 info = TD_SCC_NOTACCESSED | TD_T_DATA1;
531 info |= is_out ? TD_DP_OUT : TD_DP_IN;
532 /* NOTE: mishandles transfers >8K, some >4K */
533 td_fill(ahcd, info, data, data_len, urb, cnt++);
536 /* fill a TD for the ACK */
537 info = (is_out || data_len == 0)
538 ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
539 : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
540 td_fill(ahcd, info, data, 0, urb, cnt++);
544 /* ISO has no retransmit, so no toggle;
545 * Each TD could handle multiple consecutive frames (interval 1);
546 * we could often reduce the number of TDs here.
548 case PIPE_ISOCHRONOUS:
550 ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
551 : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
553 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
554 int frame = urb->start_frame;
556 frame += cnt * urb->interval;
558 td_fill(ahcd, info | frame,
559 data + urb->iso_frame_desc[cnt].offset,
560 urb->iso_frame_desc[cnt].length, urb, cnt);
562 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
566 if (urb_priv->td_cnt != cnt)
567 admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
570 /*-------------------------------------------------------------------------*
571 * Done List handling functions
572 *-------------------------------------------------------------------------*/
574 /* calculate transfer length/status and update the urb
575 * PRECONDITION: irqsafe (only for urb->status locking)
577 static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
579 struct urb_priv *urb_priv = urb->hcpriv;
583 int type = usb_pipetype(urb->pipe);
586 info = hc32_to_cpup(ahcd, &td->hwINFO);
587 tdDBP = hc32_to_cpup(ahcd, &td->hwDBP);
588 bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
589 cc = TD_CC_GET(info);
591 /* ISO ... drivers see per-TD length/status */
592 if (type == PIPE_ISOCHRONOUS) {
596 /* NOTE: assumes FC in tdINFO == 0, and that
597 * only the first of 0..MAXPSW psws is used.
600 if (tdINFO & TD_CC) /* hc didn't touch? */
603 if (usb_pipeout(urb->pipe))
604 dlen = urb->iso_frame_desc[td->index].length;
606 /* short reads are always OK for ISO */
607 if (cc == TD_CC_DATAUNDERRUN)
609 dlen = tdDBP - td->data_dma + bl;
612 urb->actual_length += dlen;
613 urb->iso_frame_desc[td->index].actual_length = dlen;
614 urb->iso_frame_desc[td->index].status = cc_to_error[cc];
616 if (cc != TD_CC_NOERROR)
618 "urb %p iso td %p (%d) len %d cc %d\n",
619 urb, td, 1 + td->index, dlen, cc);
621 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
622 * except that "setup" bytes aren't counted and "short" transfers
623 * might not be reported as errors.
626 /* update packet status if needed (short is normally ok) */
627 if (cc == TD_CC_DATAUNDERRUN
628 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
631 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
632 spin_lock(&urb->lock);
633 if (urb->status == -EINPROGRESS)
634 urb->status = cc_to_error[cc];
635 spin_unlock(&urb->lock);
638 /* count all non-empty packets except control SETUP packet */
639 if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0) {
640 urb->actual_length += tdDBP - td->data_dma + bl;
643 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
645 "urb %p td %p (%d) cc %d, len=%d/%d\n",
646 urb, td, td->index, cc,
648 urb->transfer_buffer_length);
651 list_del(&td->td_list);
657 /*-------------------------------------------------------------------------*/
659 static inline struct td *
660 ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
662 struct urb *urb = td->urb;
663 struct ed *ed = td->ed;
664 struct list_head *tmp = td->td_list.next;
665 __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
667 admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
668 /* clear ed halt; this is the td that caused it, but keep it inactive
669 * until its urb->complete() has a chance to clean up.
671 ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
673 ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
675 /* put any later tds from this urb onto the donelist, after 'td',
676 * order won't matter here: no errors, and nothing was transferred.
677 * also patch the ed so it looks as if those tds completed normally.
679 while (tmp != &ed->td_list) {
683 next = list_entry(tmp, struct td, td_list);
684 tmp = next->td_list.next;
686 if (next->urb != urb)
689 /* NOTE: if multi-td control DATA segments get supported,
690 * this urb had one of them, this td wasn't the last td
691 * in that segment (TD_R clear), this ed halted because
692 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
693 * then we need to leave the control STATUS packet queued
698 info |= cpu_to_hc32(ahcd, TD_DONE);
700 info &= ~cpu_to_hc32(ahcd, TD_CC);
703 next->next_dl_td = rev;
706 ed->hwHeadP = next->hwNextTD | toggle;
709 /* help for troubleshooting: report anything that
710 * looks odd ... that doesn't include protocol stalls
711 * (or maybe some other things)
714 case TD_CC_DATAUNDERRUN:
715 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
719 if (usb_pipecontrol(urb->pipe))
724 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
725 urb, urb->dev->devpath,
726 usb_pipeendpoint (urb->pipe),
727 usb_pipein (urb->pipe) ? "in" : "out",
728 hc32_to_cpu(ahcd, td->hwINFO),
729 cc, cc_to_error [cc]);
735 /*-------------------------------------------------------------------------*/
737 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
739 finish_unlinks(struct admhcd *ahcd, u16 tick)
741 struct ed *ed, **last;
744 for (last = &ahcd->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
745 struct list_head *entry, *tmp;
746 int completed, modified;
749 /* only take off EDs that the HC isn't using, accounting for
750 * frame counter wraps and EDs with partially retired TDs
752 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) {
753 if (tick_before (tick, ed->tick)) {
755 last = &ed->ed_rm_next;
759 if (!list_empty(&ed->td_list)) {
763 td = list_entry(ed->td_list.next, struct td,
765 head = hc32_to_cpu(ahcd, ed->hwHeadP) &
768 /* INTR_WDH may need to clean up first */
769 if (td->td_dma != head)
775 /* reentrancy: if we drop the schedule lock, someone might
776 * have modified this list. normally it's just prepending
777 * entries (which we'd ignore), but paranoia won't hurt.
779 *last = ed->ed_rm_next;
780 ed->ed_rm_next = NULL;
783 /* unlink urbs as requested, but rescan the list after
784 * we call a completion since it might have unlinked
785 * another (earlier) urb
787 * When we get here, the HC doesn't see this ed. But it
788 * must not be rescheduled until all completed URBs have
789 * been given back to the driver.
794 list_for_each_safe(entry, tmp, &ed->td_list) {
797 struct urb_priv *urb_priv;
800 td = list_entry(entry, struct td, td_list);
802 urb_priv = td->urb->hcpriv;
804 if (urb->status == -EINPROGRESS) {
805 prev = &td->hwNextTD;
809 if ((urb_priv) == NULL)
812 /* patch pointer hc uses */
813 savebits = *prev & ~cpu_to_hc32(ahcd, TD_MASK);
814 *prev = td->hwNextTD | savebits;
816 /* HC may have partly processed this TD */
817 #ifdef ADMHC_VERBOSE_DEBUG
818 urb_print(ahcd, urb, "PARTIAL", 0);
820 td_done(ahcd, urb, td);
822 /* if URB is done, clean up */
823 if (urb_priv->td_idx == urb_priv->td_cnt) {
824 modified = completed = 1;
825 finish_urb(ahcd, urb);
828 if (completed && !list_empty(&ed->td_list))
831 /* ED's now officially unlinked, hc doesn't see */
833 ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
836 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP | ED_DEQUEUE);
838 /* but if there's work queued, reschedule */
839 if (!list_empty(&ed->td_list)) {
840 if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))
841 ed_schedule(ahcd, ed);
849 /*-------------------------------------------------------------------------*/
852 * Process normal completions (error or success) and clean the schedules.
854 * This is the main path for handing urbs back to drivers. The only other
855 * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
856 * scanning the (re-reversed) donelist as this does.
859 static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
861 struct list_head *entry,*tmp;
862 __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
864 #ifdef ADMHC_VERBOSE_DEBUG
865 admhc_dump_ed(ahcd, "UNHALT", ed, 0);
867 /* clear ed halt; this is the td that caused it, but keep it inactive
868 * until its urb->complete() has a chance to clean up.
870 ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
872 ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
874 list_for_each_safe(entry, tmp, &ed->td_list) {
875 struct td *td = list_entry(entry, struct td, td_list);
882 info &= ~cpu_to_hc32(ahcd, TD_CC | TD_OWN);
885 ed->hwHeadP = td->hwNextTD | toggle;
891 static void ed_intr_refill(struct admhcd *ahcd, struct ed *ed)
893 __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
895 ed->hwHeadP = ed->hwTailP | toggle;
899 static inline int is_ed_halted(struct admhcd *ahcd, struct ed *ed)
901 return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) == ED_H);
904 static inline int is_td_halted(struct admhcd *ahcd, struct ed *ed,
907 return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & TD_MASK) ==
908 (hc32_to_cpup(ahcd, &td->hwNextTD) & TD_MASK));
911 static void ed_update(struct admhcd *ahcd, struct ed *ed)
913 struct list_head *entry,*tmp;
915 #ifdef ADMHC_VERBOSE_DEBUG
916 admhc_dump_ed(ahcd, "UPDATE", ed, 1);
919 list_for_each_safe(entry, tmp, &ed->td_list) {
920 struct td *td = list_entry(entry, struct td, td_list);
921 struct urb *urb = td->urb;
922 struct urb_priv *urb_priv = urb->hcpriv;
925 if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
928 /* update URB's length and status from TD */
929 cc = td_done(ahcd, urb, td);
930 if (is_ed_halted(ahcd, ed) && is_td_halted(ahcd, ed, td))
931 ed_unhalt(ahcd, ed, urb);
933 if (ed->type == PIPE_INTERRUPT)
934 ed_intr_refill(ahcd,ed);
936 /* If all this urb's TDs are done, call complete() */
937 if (urb_priv->td_idx == urb_priv->td_cnt)
938 finish_urb(ahcd, urb);
940 /* clean schedule: unlink EDs that are no longer busy */
941 if (list_empty(&ed->td_list)) {
942 if (ed->state == ED_OPER)
943 start_ed_unlink(ahcd, ed);
945 /* ... reenabling halted EDs only after fault cleanup */
946 } else if ((ed->hwINFO & cpu_to_hc32(ahcd,
947 ED_SKIP | ED_DEQUEUE))
948 == cpu_to_hc32(ahcd, ED_SKIP)) {
949 td = list_entry(ed->td_list.next, struct td, td_list);
951 if (!(td->hwINFO & cpu_to_hc32(ahcd, TD_DONE))) {
952 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
953 /* ... hc may need waking-up */
956 admhc_writel (ahcd, OHCI_CLF,
957 &ahcd->regs->cmdstatus);
960 admhc_writel (ahcd, OHCI_BLF,
961 &ahcd->regs->cmdstatus);
966 if ((td->hwINFO & cpu_to_hc32(ahcd, TD_OWN)))
967 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
974 /* there are some tds completed; called in_irq(), with HCD locked */
975 static void admhc_td_complete(struct admhcd *ahcd)
979 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
980 if (ed->state != ED_OPER)