[adm5120] USB driver: fix a bug in isochronous transfers
[15.05/openwrt.git] / target / linux / adm5120 / files / drivers / usb / host / adm5120-q.c
1 /*
2  * ADM5120 HCD (Host Controller Driver) for USB
3  *
4  * Copyright (C) 2007,2008 Gabor Juhos <juhosg at openwrt.org>
5  *
6  * This file was derived from: drivers/usb/host/ohci-q.c
7  *   (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
8  *   (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
9  *
10  *  This program is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License version 2 as published
12  *  by the Free Software Foundation.
13  *
14  */
15
16 #include <linux/irq.h>
17
18 /*-------------------------------------------------------------------------*/
19
20 /*
21  * URB goes back to driver, and isn't reissued.
22  * It's completely gone from HC data structures.
23  * PRECONDITION:  ahcd lock held, irqs blocked.
24  */
25 static void
26 finish_urb(struct admhcd *ahcd, struct urb *urb)
27 __releases(ahcd->lock)
28 __acquires(ahcd->lock)
29 {
30         urb_priv_free(ahcd, urb->hcpriv);
31         urb->hcpriv = NULL;
32
33         spin_lock(&urb->lock);
34         if (likely(urb->status == -EINPROGRESS))
35                 urb->status = 0;
36
37         /* report short control reads right even though the data TD always
38          * has TD_R set.  (much simpler, but creates the 1-td limit.)
39          */
40         if (unlikely(urb->transfer_flags & URB_SHORT_NOT_OK)
41                         && unlikely(usb_pipecontrol(urb->pipe))
42                         && urb->actual_length < urb->transfer_buffer_length
43                         && usb_pipein(urb->pipe)
44                         && urb->status == 0) {
45                 urb->status = -EREMOTEIO;
46 #ifdef ADMHC_VERBOSE_DEBUG
47                 urb_print(ahcd, urb, "SHORT", usb_pipeout(urb->pipe));
48 #endif
49         }
50         spin_unlock(&urb->lock);
51
52         switch (usb_pipetype(urb->pipe)) {
53         case PIPE_ISOCHRONOUS:
54                 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--;
55                 break;
56         case PIPE_INTERRUPT:
57                 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--;
58                 break;
59         }
60
61 #ifdef ADMHC_VERBOSE_DEBUG
62         urb_print(ahcd, urb, "RET", usb_pipeout (urb->pipe));
63 #endif
64
65         /* urb->complete() can reenter this HCD */
66         spin_unlock(&ahcd->lock);
67         usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb);
68         spin_lock(&ahcd->lock);
69 }
70
71
72 /*-------------------------------------------------------------------------*
73  * ED handling functions
74  *-------------------------------------------------------------------------*/
75
76 #if 0   /* FIXME */
77 /* search for the right schedule branch to use for a periodic ed.
78  * does some load balancing; returns the branch, or negative errno.
79  */
80 static int balance(struct admhcd *ahcd, int interval, int load)
81 {
82         int     i, branch = -ENOSPC;
83
84         /* iso periods can be huge; iso tds specify frame numbers */
85         if (interval > NUM_INTS)
86                 interval = NUM_INTS;
87
88         /* search for the least loaded schedule branch of that period
89          * that has enough bandwidth left unreserved.
90          */
91         for (i = 0; i < interval ; i++) {
92                 if (branch < 0 || ahcd->load [branch] > ahcd->load [i]) {
93                         int     j;
94
95                         /* usb 1.1 says 90% of one frame */
96                         for (j = i; j < NUM_INTS; j += interval) {
97                                 if ((ahcd->load [j] + load) > 900)
98                                         break;
99                         }
100                         if (j < NUM_INTS)
101                                 continue;
102                         branch = i;
103                 }
104         }
105         return branch;
106 }
107 #endif
108
109 /*-------------------------------------------------------------------------*/
110
111 #if 0   /* FIXME */
112 /* both iso and interrupt requests have periods; this routine puts them
113  * into the schedule tree in the apppropriate place.  most iso devices use
114  * 1msec periods, but that's not required.
115  */
116 static void periodic_link (struct admhcd *ahcd, struct ed *ed)
117 {
118         unsigned        i;
119
120         admhc_vdbg (ahcd, "link %sed %p branch %d [%dus.], interval %d\n",
121                 (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
122                 ed, ed->branch, ed->load, ed->interval);
123
124         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
125                 struct ed       **prev = &ahcd->periodic [i];
126                 __hc32          *prev_p = &ahcd->hcca->int_table [i];
127                 struct ed       *here = *prev;
128
129                 /* sorting each branch by period (slow before fast)
130                  * lets us share the faster parts of the tree.
131                  * (plus maybe: put interrupt eds before iso)
132                  */
133                 while (here && ed != here) {
134                         if (ed->interval > here->interval)
135                                 break;
136                         prev = &here->ed_next;
137                         prev_p = &here->hwNextED;
138                         here = *prev;
139                 }
140                 if (ed != here) {
141                         ed->ed_next = here;
142                         if (here)
143                                 ed->hwNextED = *prev_p;
144                         wmb ();
145                         *prev = ed;
146                         *prev_p = cpu_to_hc32(ahcd, ed->dma);
147                         wmb();
148                 }
149                 ahcd->load [i] += ed->load;
150         }
151         admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval;
152 }
153 #endif
154
155 /* link an ed into the HC chain */
156
157 static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
158 {
159         struct ed *old_tail;
160
161         if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
162                 return -EAGAIN;
163
164         ed->state = ED_OPER;
165
166         old_tail = ahcd->ed_tails[ed->type];
167
168         ed->ed_next = old_tail->ed_next;
169         if (ed->ed_next) {
170                 ed->ed_next->ed_prev = ed;
171                 ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
172         }
173         ed->ed_prev = old_tail;
174
175         old_tail->ed_next = ed;
176         old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
177
178         ahcd->ed_tails[ed->type] = ed;
179
180         admhc_dma_enable(ahcd);
181
182         return 0;
183 }
184
185 /*-------------------------------------------------------------------------*/
186
187 #if 0   /* FIXME */
188 /* scan the periodic table to find and unlink this ED */
189 static void periodic_unlink (struct admhcd *ahcd, struct ed *ed)
190 {
191         int     i;
192
193         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
194                 struct ed       *temp;
195                 struct ed       **prev = &ahcd->periodic [i];
196                 __hc32          *prev_p = &ahcd->hcca->int_table [i];
197
198                 while (*prev && (temp = *prev) != ed) {
199                         prev_p = &temp->hwNextED;
200                         prev = &temp->ed_next;
201                 }
202                 if (*prev) {
203                         *prev_p = ed->hwNextED;
204                         *prev = ed->ed_next;
205                 }
206                 ahcd->load [i] -= ed->load;
207         }
208
209         admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval;
210         admhc_vdbg (ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n",
211                 (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
212                 ed, ed->branch, ed->load, ed->interval);
213 }
214 #endif
215
216 /* unlink an ed from the HC chain.
217  * just the link to the ed is unlinked.
218  * the link from the ed still points to another operational ed or 0
219  * so the HC can eventually finish the processing of the unlinked ed
220  * (assuming it already started that, which needn't be true).
221  *
222  * ED_UNLINK is a transient state: the HC may still see this ED, but soon
223  * it won't.  ED_SKIP means the HC will finish its current transaction,
224  * but won't start anything new.  The TD queue may still grow; device
225  * drivers don't know about this HCD-internal state.
226  *
227  * When the HC can't see the ED, something changes ED_UNLINK to one of:
228  *
229  *  - ED_OPER: when there's any request queued, the ED gets rescheduled
230  *    immediately.  HC should be working on them.
231  *
232  *  - ED_IDLE:  when there's no TD queue. there's no reason for the HC
233  *    to care about this ED; safe to disable the endpoint.
234  *
235  * When finish_unlinks() runs later, after SOF interrupt, it will often
236  * complete one or more URB unlinks before making that state change.
237  */
238 static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
239 {
240
241 #ifdef ADMHC_VERBOSE_DEBUG
242         admhc_dump_ed(ahcd, "ED-DESCHED", ed, 1);
243 #endif
244
245         ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
246         wmb();
247         ed->state = ED_UNLINK;
248
249         /* remove this ED from the HC list */
250         ed->ed_prev->hwNextED = ed->hwNextED;
251
252         /* and remove it from our list also */
253         ed->ed_prev->ed_next = ed->ed_next;
254
255         if (ed->ed_next)
256                 ed->ed_next->ed_prev = ed->ed_prev;
257
258         if (ahcd->ed_tails[ed->type] == ed)
259                 ahcd->ed_tails[ed->type] = ed->ed_prev;
260 }
261
262 /*-------------------------------------------------------------------------*/
263
264 static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
265 {
266         struct ed *ed;
267         struct td *td;
268
269         ed = ed_alloc(ahcd, GFP_ATOMIC);
270         if (!ed)
271                 goto err;
272
273         /* dummy td; end of td list for this ed */
274         td = td_alloc(ahcd, GFP_ATOMIC);
275         if (!td)
276                 goto err_free_ed;
277
278         switch (type) {
279         case PIPE_INTERRUPT:
280                 info |= ED_INT;
281                 break;
282         case PIPE_ISOCHRONOUS:
283                 info |= ED_ISO;
284                 break;
285         }
286
287         ed->dummy = td;
288         ed->state = ED_IDLE;
289         ed->type = type;
290
291         ed->hwINFO = cpu_to_hc32(ahcd, info);
292         ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
293         ed->hwHeadP = ed->hwTailP;      /* ED_C, ED_H zeroed */
294
295         return ed;
296
297 err_free_ed:
298         ed_free(ahcd, ed);
299 err:
300         return NULL;
301 }
302
303 /* get and maybe (re)init an endpoint. init _should_ be done only as part
304  * of enumeration, usb_set_configuration() or usb_set_interface().
305  */
306 static struct ed *ed_get(struct admhcd *ahcd,   struct usb_host_endpoint *ep,
307         struct usb_device *udev, unsigned int pipe, int interval)
308 {
309         struct ed               *ed;
310         unsigned long           flags;
311
312         spin_lock_irqsave(&ahcd->lock, flags);
313
314         ed = ep->hcpriv;
315         if (!ed) {
316                 u32             info;
317
318                 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
319                  * suceeds ... otherwise we wouldn't need "pipe".
320                  */
321                 info = usb_pipedevice(pipe);
322                 info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT;
323                 info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT;
324                 if (udev->speed == USB_SPEED_FULL)
325                         info |= ED_SPEED_FULL;
326
327                 ed = ed_create(ahcd, usb_pipetype(pipe), info);
328                 if (ed)
329                         ep->hcpriv = ed;
330         }
331
332         spin_unlock_irqrestore(&ahcd->lock, flags);
333
334         return ed;
335 }
336
337 /*-------------------------------------------------------------------------*/
338
339 /* request unlinking of an endpoint from an operational HC.
340  * put the ep on the rm_list
341  * real work is done at the next start frame (SOFI) hardware interrupt
342  * caller guarantees HCD is running, so hardware access is safe,
343  * and that ed->state is ED_OPER
344  */
345 static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed)
346 {
347
348 #ifdef ADMHC_VERBOSE_DEBUG
349         admhc_dump_ed(ahcd, "ED-UNLINK", ed, 1);
350 #endif
351
352         ed->hwINFO |= cpu_to_hc32(ahcd, ED_DEQUEUE);
353         ed_deschedule(ahcd, ed);
354
355         /* add this ED into the remove list */
356         ed->ed_rm_next = ahcd->ed_rm_list;
357         ahcd->ed_rm_list = ed;
358
359         /* enable SOF interrupt */
360         admhc_intr_ack(ahcd, ADMHC_INTR_SOFI);
361         admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
362         /* flush those writes */
363         admhc_writel_flush(ahcd);
364
365         /* SOF interrupt might get delayed; record the frame counter value that
366          * indicates when the HC isn't looking at it, so concurrent unlinks
367          * behave.  frame_no wraps every 2^16 msec, and changes right before
368          * SOF is triggered.
369          */
370         ed->tick = admhc_frame_no(ahcd) + 1;
371 }
372
373 /*-------------------------------------------------------------------------*
374  * TD handling functions
375  *-------------------------------------------------------------------------*/
376
377 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
378
379 static void
380 td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
381         struct urb *urb, int index)
382 {
383         struct td               *td, *td_pt;
384         struct urb_priv         *urb_priv = urb->hcpriv;
385         int                     hash;
386         u32                     cbl = 0;
387
388 #if 1
389         if (index == (urb_priv->td_cnt - 1) &&
390                         ((urb->transfer_flags & URB_NO_INTERRUPT) == 0))
391                 cbl |= TD_IE;
392 #else
393         if (index == (urb_priv->td_cnt - 1))
394                 cbl |= TD_IE;
395 #endif
396
397         /* use this td as the next dummy */
398         td_pt = urb_priv->td[index];
399
400         /* fill the old dummy TD */
401         td = urb_priv->td[index] = urb_priv->ed->dummy;
402         urb_priv->ed->dummy = td_pt;
403
404         td->ed = urb_priv->ed;
405         td->next_dl_td = NULL;
406         td->index = index;
407         td->urb = urb;
408         td->data_dma = data;
409         if (!len)
410                 data = 0;
411
412         if (data)
413                 cbl |= (len & TD_BL_MASK);
414
415         info |= TD_OWN;
416
417         /* setup hardware specific fields */
418         td->hwINFO = cpu_to_hc32(ahcd, info);
419         td->hwDBP = cpu_to_hc32(ahcd, data);
420         td->hwCBL = cpu_to_hc32(ahcd, cbl);
421         td->hwNextTD = cpu_to_hc32(ahcd, td_pt->td_dma);
422
423         /* append to queue */
424         list_add_tail(&td->td_list, &td->ed->td_list);
425
426         /* hash it for later reverse mapping */
427         hash = TD_HASH_FUNC(td->td_dma);
428         td->td_hash = ahcd->td_hash[hash];
429         ahcd->td_hash[hash] = td;
430
431         /* HC might read the TD (or cachelines) right away ... */
432         wmb();
433         td->ed->hwTailP = td->hwNextTD;
434 }
435
436 /*-------------------------------------------------------------------------*/
437
438 /* Prepare all TDs of a transfer, and queue them onto the ED.
439  * Caller guarantees HC is active.
440  * Usually the ED is already on the schedule, so TDs might be
441  * processed as soon as they're queued.
442  */
443 static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
444 {
445         struct urb_priv *urb_priv = urb->hcpriv;
446         dma_addr_t      data;
447         int             data_len = urb->transfer_buffer_length;
448         int             cnt = 0;
449         u32             info = 0;
450         int             is_out = usb_pipeout(urb->pipe);
451         u32             toggle = 0;
452
453         /* OHCI handles the bulk/interrupt data toggles itself.  We just
454          * use the device toggle bits for resetting, and rely on the fact
455          * that resetting toggle is meaningless if the endpoint is active.
456          */
457
458         if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) {
459                 toggle = TD_T_CARRY;
460         } else {
461                 toggle = TD_T_DATA0;
462                 usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe),
463                         is_out, 1);
464         }
465
466         urb_priv->td_idx = 0;
467         list_add(&urb_priv->pending, &ahcd->pending);
468
469         if (data_len)
470                 data = urb->transfer_dma;
471         else
472                 data = 0;
473
474         /* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
475          * using TD_CC_GET, as well as by seeing them on the done list.
476          * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
477          */
478         switch (urb_priv->ed->type) {
479         case PIPE_INTERRUPT:
480                 info = is_out
481                         ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
482                         : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
483
484                 /* setup service interval and starting frame number */
485                 info |= (urb->start_frame & TD_FN_MASK);
486                 info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
487
488                 td_fill(ahcd, info, data, data_len, urb, cnt);
489                 cnt++;
490
491                 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
492                 break;
493
494         case PIPE_BULK:
495                 info = is_out
496                         ? TD_SCC_NOTACCESSED | TD_DP_OUT
497                         : TD_SCC_NOTACCESSED | TD_DP_IN;
498
499                 /* TDs _could_ transfer up to 8K each */
500                 while (data_len > TD_DATALEN_MAX) {
501                         td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
502                                 data, TD_DATALEN_MAX, urb, cnt);
503                         data += TD_DATALEN_MAX;
504                         data_len -= TD_DATALEN_MAX;
505                         cnt++;
506                 }
507
508                 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
509                         data_len, urb, cnt);
510                 cnt++;
511
512                 if ((urb->transfer_flags & URB_ZERO_PACKET)
513                                 && (cnt < urb_priv->td_cnt)) {
514                         td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
515                                 0, 0, urb, cnt);
516                         cnt++;
517                 }
518                 break;
519
520         /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
521          * any DATA phase works normally, and the STATUS ack is special.
522          */
523         case PIPE_CONTROL:
524                 /* fill a TD for the setup */
525                 info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
526                 td_fill(ahcd, info, urb->setup_dma, 8, urb, cnt++);
527
528                 if (data_len > 0) {
529                         /* fill a TD for the data */
530                         info = TD_SCC_NOTACCESSED | TD_T_DATA1;
531                         info |= is_out ? TD_DP_OUT : TD_DP_IN;
532                         /* NOTE:  mishandles transfers >8K, some >4K */
533                         td_fill(ahcd, info, data, data_len, urb, cnt++);
534                 }
535
536                 /* fill a TD for the ACK */
537                 info = (is_out || data_len == 0)
538                         ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
539                         : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
540                 td_fill(ahcd, info, data, 0, urb, cnt++);
541
542                 break;
543
544         /* ISO has no retransmit, so no toggle;
545          * Each TD could handle multiple consecutive frames (interval 1);
546          * we could often reduce the number of TDs here.
547          */
548         case PIPE_ISOCHRONOUS:
549                 info = is_out
550                         ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
551                         : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
552
553                 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
554                         int frame = urb->start_frame;
555
556                         frame += cnt * urb->interval;
557                         frame &= TD_FN_MASK;
558                         td_fill(ahcd, info | frame,
559                                 data + urb->iso_frame_desc[cnt].offset,
560                                 urb->iso_frame_desc[cnt].length, urb, cnt);
561                 }
562                 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
563                 break;
564         }
565
566         if (urb_priv->td_cnt != cnt)
567                 admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
568 }
569
570 /*-------------------------------------------------------------------------*
571  * Done List handling functions
572  *-------------------------------------------------------------------------*/
573
574 /* calculate transfer length/status and update the urb
575  * PRECONDITION:  irqsafe (only for urb->status locking)
576  */
577 static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
578 {
579         struct urb_priv *urb_priv = urb->hcpriv;
580         u32     info;
581         u32     bl;
582         u32     tdDBP;
583         int     type = usb_pipetype(urb->pipe);
584         int     cc;
585
586         info = hc32_to_cpup(ahcd, &td->hwINFO);
587         tdDBP = hc32_to_cpup(ahcd, &td->hwDBP);
588         bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
589         cc = TD_CC_GET(info);
590
591         /* ISO ... drivers see per-TD length/status */
592         if (type == PIPE_ISOCHRONOUS) {
593                 /* TODO */
594                 int     dlen = 0;
595
596                 /* NOTE:  assumes FC in tdINFO == 0, and that
597                  * only the first of 0..MAXPSW psws is used.
598                  */
599 #if 0
600                 if (tdINFO & TD_CC)     /* hc didn't touch? */
601                         return;
602 #endif
603                 if (usb_pipeout(urb->pipe))
604                         dlen = urb->iso_frame_desc[td->index].length;
605                 else {
606                         /* short reads are always OK for ISO */
607                         if (cc == TD_CC_DATAUNDERRUN)
608                                 cc = TD_CC_NOERROR;
609                         dlen = tdDBP - td->data_dma + bl;
610                 }
611
612                 urb->actual_length += dlen;
613                 urb->iso_frame_desc[td->index].actual_length = dlen;
614                 urb->iso_frame_desc[td->index].status = cc_to_error[cc];
615
616                 if (cc != TD_CC_NOERROR)
617                         admhc_vdbg (ahcd,
618                                 "urb %p iso td %p (%d) len %d cc %d\n",
619                                 urb, td, 1 + td->index, dlen, cc);
620
621         /* BULK, INT, CONTROL ... drivers see aggregate length/status,
622          * except that "setup" bytes aren't counted and "short" transfers
623          * might not be reported as errors.
624          */
625         } else {
626                 /* update packet status if needed (short is normally ok) */
627                 if (cc == TD_CC_DATAUNDERRUN
628                                 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
629                         cc = TD_CC_NOERROR;
630
631                 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
632                         spin_lock(&urb->lock);
633                         if (urb->status == -EINPROGRESS)
634                                 urb->status = cc_to_error[cc];
635                         spin_unlock(&urb->lock);
636                 }
637
638                 /* count all non-empty packets except control SETUP packet */
639                 if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0) {
640                         urb->actual_length += tdDBP - td->data_dma + bl;
641                 }
642
643                 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
644                         admhc_vdbg(ahcd,
645                                 "urb %p td %p (%d) cc %d, len=%d/%d\n",
646                                 urb, td, td->index, cc,
647                                 urb->actual_length,
648                                 urb->transfer_buffer_length);
649         }
650
651         list_del(&td->td_list);
652         urb_priv->td_idx++;
653
654         return cc;
655 }
656
657 /*-------------------------------------------------------------------------*/
658
659 static inline struct td *
660 ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
661 {
662         struct urb              *urb = td->urb;
663         struct ed               *ed = td->ed;
664         struct list_head        *tmp = td->td_list.next;
665         __hc32                  toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
666
667         admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
668         /* clear ed halt; this is the td that caused it, but keep it inactive
669          * until its urb->complete() has a chance to clean up.
670          */
671         ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
672         wmb();
673         ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
674
675         /* put any later tds from this urb onto the donelist, after 'td',
676          * order won't matter here: no errors, and nothing was transferred.
677          * also patch the ed so it looks as if those tds completed normally.
678          */
679         while (tmp != &ed->td_list) {
680                 struct td       *next;
681                 __hc32          info;
682
683                 next = list_entry(tmp, struct td, td_list);
684                 tmp = next->td_list.next;
685
686                 if (next->urb != urb)
687                         break;
688
689                 /* NOTE: if multi-td control DATA segments get supported,
690                  * this urb had one of them, this td wasn't the last td
691                  * in that segment (TD_R clear), this ed halted because
692                  * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
693                  * then we need to leave the control STATUS packet queued
694                  * and clear ED_SKIP.
695                  */
696                 info = next->hwINFO;
697 #if 0           /* FIXME */
698                 info |= cpu_to_hc32(ahcd, TD_DONE);
699 #endif
700                 info &= ~cpu_to_hc32(ahcd, TD_CC);
701                 next->hwINFO = info;
702
703                 next->next_dl_td = rev;
704                 rev = next;
705
706                 ed->hwHeadP = next->hwNextTD | toggle;
707         }
708
709         /* help for troubleshooting:  report anything that
710          * looks odd ... that doesn't include protocol stalls
711          * (or maybe some other things)
712          */
713         switch (cc) {
714         case TD_CC_DATAUNDERRUN:
715                 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
716                         break;
717                 /* fallthrough */
718         case TD_CC_STALL:
719                 if (usb_pipecontrol(urb->pipe))
720                         break;
721                 /* fallthrough */
722         default:
723                 admhc_dbg (ahcd,
724                         "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
725                         urb, urb->dev->devpath,
726                         usb_pipeendpoint (urb->pipe),
727                         usb_pipein (urb->pipe) ? "in" : "out",
728                         hc32_to_cpu(ahcd, td->hwINFO),
729                         cc, cc_to_error [cc]);
730         }
731
732         return rev;
733 }
734
735 /*-------------------------------------------------------------------------*/
736
737 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
738 static void
739 finish_unlinks(struct admhcd *ahcd, u16 tick)
740 {
741         struct ed       *ed, **last;
742
743 rescan_all:
744         for (last = &ahcd->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
745                 struct list_head        *entry, *tmp;
746                 int                     completed, modified;
747                 __hc32                  *prev;
748
749                 /* only take off EDs that the HC isn't using, accounting for
750                  * frame counter wraps and EDs with partially retired TDs
751                  */
752                 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) {
753                         if (tick_before (tick, ed->tick)) {
754 skip_ed:
755                                 last = &ed->ed_rm_next;
756                                 continue;
757                         }
758 #if 0
759                         if (!list_empty(&ed->td_list)) {
760                                 struct td       *td;
761                                 u32             head;
762
763                                 td = list_entry(ed->td_list.next, struct td,
764                                                         td_list);
765                                 head = hc32_to_cpu(ahcd, ed->hwHeadP) &
766                                                                 TD_MASK;
767
768                                 /* INTR_WDH may need to clean up first */
769                                 if (td->td_dma != head)
770                                         goto skip_ed;
771                         }
772 #endif
773                 }
774
775                 /* reentrancy:  if we drop the schedule lock, someone might
776                  * have modified this list.  normally it's just prepending
777                  * entries (which we'd ignore), but paranoia won't hurt.
778                  */
779                 *last = ed->ed_rm_next;
780                 ed->ed_rm_next = NULL;
781                 modified = 0;
782
783                 /* unlink urbs as requested, but rescan the list after
784                  * we call a completion since it might have unlinked
785                  * another (earlier) urb
786                  *
787                  * When we get here, the HC doesn't see this ed.  But it
788                  * must not be rescheduled until all completed URBs have
789                  * been given back to the driver.
790                  */
791 rescan_this:
792                 completed = 0;
793                 prev = &ed->hwHeadP;
794                 list_for_each_safe(entry, tmp, &ed->td_list) {
795                         struct td       *td;
796                         struct urb      *urb;
797                         struct urb_priv *urb_priv;
798                         __hc32          savebits;
799
800                         td = list_entry(entry, struct td, td_list);
801                         urb = td->urb;
802                         urb_priv = td->urb->hcpriv;
803
804                         if (urb->status == -EINPROGRESS) {
805                                 prev = &td->hwNextTD;
806                                 continue;
807                         }
808
809                         if ((urb_priv) == NULL)
810                                 continue;
811
812                         /* patch pointer hc uses */
813                         savebits = *prev & ~cpu_to_hc32(ahcd, TD_MASK);
814                         *prev = td->hwNextTD | savebits;
815
816                         /* HC may have partly processed this TD */
817 #ifdef ADMHC_VERBOSE_DEBUG
818                         urb_print(ahcd, urb, "PARTIAL", 0);
819 #endif
820                         td_done(ahcd, urb, td);
821
822                         /* if URB is done, clean up */
823                         if (urb_priv->td_idx == urb_priv->td_cnt) {
824                                 modified = completed = 1;
825                                 finish_urb(ahcd, urb);
826                         }
827                 }
828                 if (completed && !list_empty(&ed->td_list))
829                         goto rescan_this;
830
831                 /* ED's now officially unlinked, hc doesn't see */
832                 ed->state = ED_IDLE;
833                 ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
834                 ed->hwNextED = 0;
835                 wmb();
836                 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP | ED_DEQUEUE);
837
838                 /* but if there's work queued, reschedule */
839                 if (!list_empty(&ed->td_list)) {
840                         if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))
841                                 ed_schedule(ahcd, ed);
842                 }
843
844                 if (modified)
845                         goto rescan_all;
846         }
847 }
848
849 /*-------------------------------------------------------------------------*/
850
851 /*
852  * Process normal completions (error or success) and clean the schedules.
853  *
854  * This is the main path for handing urbs back to drivers.  The only other
855  * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
856  * scanning the (re-reversed) donelist as this does.
857  */
858
859 static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
860 {
861         struct list_head *entry,*tmp;
862         __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
863
864 #ifdef ADMHC_VERBOSE_DEBUG
865         admhc_dump_ed(ahcd, "UNHALT", ed, 0);
866 #endif
867         /* clear ed halt; this is the td that caused it, but keep it inactive
868          * until its urb->complete() has a chance to clean up.
869          */
870         ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
871         wmb();
872         ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
873
874         list_for_each_safe(entry, tmp, &ed->td_list) {
875                 struct td *td = list_entry(entry, struct td, td_list);
876                 __hc32 info;
877
878                 if (td->urb != urb)
879                         break;
880
881                 info = td->hwINFO;
882                 info &= ~cpu_to_hc32(ahcd, TD_CC | TD_OWN);
883                 td->hwINFO = info;
884
885                 ed->hwHeadP = td->hwNextTD | toggle;
886                 wmb();
887         }
888
889 }
890
891 static void ed_intr_refill(struct admhcd *ahcd, struct ed *ed)
892 {
893         __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
894
895         ed->hwHeadP = ed->hwTailP | toggle;
896 }
897
898
899 static inline int is_ed_halted(struct admhcd *ahcd, struct ed *ed)
900 {
901         return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) == ED_H);
902 }
903
904 static inline int is_td_halted(struct admhcd *ahcd, struct ed *ed,
905                 struct td *td)
906 {
907         return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & TD_MASK) ==
908                 (hc32_to_cpup(ahcd, &td->hwNextTD) & TD_MASK));
909 }
910
911 static void ed_update(struct admhcd *ahcd, struct ed *ed)
912 {
913         struct list_head *entry,*tmp;
914
915 #ifdef ADMHC_VERBOSE_DEBUG
916         admhc_dump_ed(ahcd, "UPDATE", ed, 1);
917 #endif
918
919         list_for_each_safe(entry, tmp, &ed->td_list) {
920                 struct td *td = list_entry(entry, struct td, td_list);
921                 struct urb *urb = td->urb;
922                 struct urb_priv *urb_priv = urb->hcpriv;
923                 int cc;
924
925                 if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
926                         break;
927
928                 /* update URB's length and status from TD */
929                 cc = td_done(ahcd, urb, td);
930                 if (is_ed_halted(ahcd, ed) && is_td_halted(ahcd, ed, td))
931                         ed_unhalt(ahcd, ed, urb);
932
933                 if (ed->type == PIPE_INTERRUPT)
934                         ed_intr_refill(ahcd,ed);
935
936                 /* If all this urb's TDs are done, call complete() */
937                 if (urb_priv->td_idx == urb_priv->td_cnt)
938                         finish_urb(ahcd, urb);
939
940                 /* clean schedule:  unlink EDs that are no longer busy */
941                 if (list_empty(&ed->td_list)) {
942                         if (ed->state == ED_OPER)
943                                 start_ed_unlink(ahcd, ed);
944
945                 /* ... reenabling halted EDs only after fault cleanup */
946                 } else if ((ed->hwINFO & cpu_to_hc32(ahcd,
947                                                 ED_SKIP | ED_DEQUEUE))
948                                         == cpu_to_hc32(ahcd, ED_SKIP)) {
949                         td = list_entry(ed->td_list.next, struct td, td_list);
950 #if 0
951                         if (!(td->hwINFO & cpu_to_hc32(ahcd, TD_DONE))) {
952                                 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
953                                 /* ... hc may need waking-up */
954                                 switch (ed->type) {
955                                 case PIPE_CONTROL:
956                                         admhc_writel (ahcd, OHCI_CLF,
957                                                 &ahcd->regs->cmdstatus);
958                                         break;
959                                 case PIPE_BULK:
960                                         admhc_writel (ahcd, OHCI_BLF,
961                                                 &ahcd->regs->cmdstatus);
962                                         break;
963                                 }
964                         }
965 #else
966                         if ((td->hwINFO & cpu_to_hc32(ahcd, TD_OWN)))
967                                 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
968 #endif
969                 }
970
971         }
972 }
973
974 /* there are some tds completed; called in_irq(), with HCD locked */
975 static void admhc_td_complete(struct admhcd *ahcd)
976 {
977         struct ed       *ed;
978
979         for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
980                 if (ed->state != ED_OPER)
981                         continue;
982
983                 ed_update(ahcd, ed);
984         }
985 }