[adm5120] add copyright header to the USB driver's files
[15.05/openwrt.git] / target / linux / adm5120 / files / drivers / usb / host / adm5120-q.c
1 /*
2  * ADM5120 HCD (Host Controller Driver) for USB
3  *
4  * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
5  *
6  * This file was derived from: drivers/usb/host/ohci-q.c
7  *   (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
8  *   (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
9  *
10  * This file is licenced under the GPL.
11  */
12
13 #include <linux/irq.h>
14
15 /*-------------------------------------------------------------------------*/
16
17 /*
18  * URB goes back to driver, and isn't reissued.
19  * It's completely gone from HC data structures.
20  * PRECONDITION:  ahcd lock held, irqs blocked.
21  */
22 static void
23 finish_urb(struct admhcd *ahcd, struct urb *urb)
24 __releases(ahcd->lock)
25 __acquires(ahcd->lock)
26 {
27         urb_priv_free(ahcd, urb->hcpriv);
28         urb->hcpriv = NULL;
29
30         spin_lock(&urb->lock);
31         if (likely(urb->status == -EINPROGRESS))
32                 urb->status = 0;
33
34         /* report short control reads right even though the data TD always
35          * has TD_R set.  (much simpler, but creates the 1-td limit.)
36          */
37         if (unlikely(urb->transfer_flags & URB_SHORT_NOT_OK)
38                         && unlikely(usb_pipecontrol(urb->pipe))
39                         && urb->actual_length < urb->transfer_buffer_length
40                         && usb_pipein(urb->pipe)
41                         && urb->status == 0) {
42                 urb->status = -EREMOTEIO;
43 #ifdef ADMHC_VERBOSE_DEBUG
44                 urb_print(ahcd, urb, "SHORT", usb_pipeout(urb->pipe));
45 #endif
46         }
47         spin_unlock(&urb->lock);
48
49         switch (usb_pipetype(urb->pipe)) {
50         case PIPE_ISOCHRONOUS:
51                 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--;
52                 break;
53         case PIPE_INTERRUPT:
54                 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--;
55                 break;
56         }
57
58 #ifdef ADMHC_VERBOSE_DEBUG
59         urb_print(ahcd, urb, "RET", usb_pipeout (urb->pipe));
60 #endif
61
62         /* urb->complete() can reenter this HCD */
63         spin_unlock(&ahcd->lock);
64         usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb);
65         spin_lock(&ahcd->lock);
66 }
67
68
69 /*-------------------------------------------------------------------------*
70  * ED handling functions
71  *-------------------------------------------------------------------------*/
72
73 #if 0   /* FIXME */
74 /* search for the right schedule branch to use for a periodic ed.
75  * does some load balancing; returns the branch, or negative errno.
76  */
77 static int balance(struct admhcd *ahcd, int interval, int load)
78 {
79         int     i, branch = -ENOSPC;
80
81         /* iso periods can be huge; iso tds specify frame numbers */
82         if (interval > NUM_INTS)
83                 interval = NUM_INTS;
84
85         /* search for the least loaded schedule branch of that period
86          * that has enough bandwidth left unreserved.
87          */
88         for (i = 0; i < interval ; i++) {
89                 if (branch < 0 || ahcd->load [branch] > ahcd->load [i]) {
90                         int     j;
91
92                         /* usb 1.1 says 90% of one frame */
93                         for (j = i; j < NUM_INTS; j += interval) {
94                                 if ((ahcd->load [j] + load) > 900)
95                                         break;
96                         }
97                         if (j < NUM_INTS)
98                                 continue;
99                         branch = i;
100                 }
101         }
102         return branch;
103 }
104 #endif
105
106 /*-------------------------------------------------------------------------*/
107
108 #if 0   /* FIXME */
109 /* both iso and interrupt requests have periods; this routine puts them
110  * into the schedule tree in the apppropriate place.  most iso devices use
111  * 1msec periods, but that's not required.
112  */
113 static void periodic_link (struct admhcd *ahcd, struct ed *ed)
114 {
115         unsigned        i;
116
117         admhc_vdbg (ahcd, "link %sed %p branch %d [%dus.], interval %d\n",
118                 (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
119                 ed, ed->branch, ed->load, ed->interval);
120
121         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
122                 struct ed       **prev = &ahcd->periodic [i];
123                 __hc32          *prev_p = &ahcd->hcca->int_table [i];
124                 struct ed       *here = *prev;
125
126                 /* sorting each branch by period (slow before fast)
127                  * lets us share the faster parts of the tree.
128                  * (plus maybe: put interrupt eds before iso)
129                  */
130                 while (here && ed != here) {
131                         if (ed->interval > here->interval)
132                                 break;
133                         prev = &here->ed_next;
134                         prev_p = &here->hwNextED;
135                         here = *prev;
136                 }
137                 if (ed != here) {
138                         ed->ed_next = here;
139                         if (here)
140                                 ed->hwNextED = *prev_p;
141                         wmb ();
142                         *prev = ed;
143                         *prev_p = cpu_to_hc32(ahcd, ed->dma);
144                         wmb();
145                 }
146                 ahcd->load [i] += ed->load;
147         }
148         admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval;
149 }
150 #endif
151
152 /* link an ed into the HC chain */
153
154 static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
155 {
156         struct ed *old_tail;
157
158         if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
159                 return -EAGAIN;
160
161         ed->state = ED_OPER;
162
163         old_tail = ahcd->ed_tails[ed->type];
164
165         ed->ed_next = old_tail->ed_next;
166         if (ed->ed_next) {
167                 ed->ed_next->ed_prev = ed;
168                 ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
169         }
170         ed->ed_prev = old_tail;
171
172         old_tail->ed_next = ed;
173         old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
174
175         ahcd->ed_tails[ed->type] = ed;
176
177         admhc_dma_enable(ahcd);
178
179         return 0;
180 }
181
182 /*-------------------------------------------------------------------------*/
183
184 #if 0   /* FIXME */
185 /* scan the periodic table to find and unlink this ED */
186 static void periodic_unlink (struct admhcd *ahcd, struct ed *ed)
187 {
188         int     i;
189
190         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
191                 struct ed       *temp;
192                 struct ed       **prev = &ahcd->periodic [i];
193                 __hc32          *prev_p = &ahcd->hcca->int_table [i];
194
195                 while (*prev && (temp = *prev) != ed) {
196                         prev_p = &temp->hwNextED;
197                         prev = &temp->ed_next;
198                 }
199                 if (*prev) {
200                         *prev_p = ed->hwNextED;
201                         *prev = ed->ed_next;
202                 }
203                 ahcd->load [i] -= ed->load;
204         }
205
206         admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval;
207         admhc_vdbg (ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n",
208                 (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
209                 ed, ed->branch, ed->load, ed->interval);
210 }
211 #endif
212
213 /* unlink an ed from the HC chain.
214  * just the link to the ed is unlinked.
215  * the link from the ed still points to another operational ed or 0
216  * so the HC can eventually finish the processing of the unlinked ed
217  * (assuming it already started that, which needn't be true).
218  *
219  * ED_UNLINK is a transient state: the HC may still see this ED, but soon
220  * it won't.  ED_SKIP means the HC will finish its current transaction,
221  * but won't start anything new.  The TD queue may still grow; device
222  * drivers don't know about this HCD-internal state.
223  *
224  * When the HC can't see the ED, something changes ED_UNLINK to one of:
225  *
226  *  - ED_OPER: when there's any request queued, the ED gets rescheduled
227  *    immediately.  HC should be working on them.
228  *
229  *  - ED_IDLE:  when there's no TD queue. there's no reason for the HC
230  *    to care about this ED; safe to disable the endpoint.
231  *
232  * When finish_unlinks() runs later, after SOF interrupt, it will often
233  * complete one or more URB unlinks before making that state change.
234  */
235 static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
236 {
237
238 #ifdef ADMHC_VERBOSE_DEBUG
239         admhc_dump_ed(ahcd, "ED-DESCHED", ed, 1);
240 #endif
241
242         ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
243         wmb();
244         ed->state = ED_UNLINK;
245
246         /* remove this ED from the HC list */
247         ed->ed_prev->hwNextED = ed->hwNextED;
248
249         /* and remove it from our list also */
250         ed->ed_prev->ed_next = ed->ed_next;
251
252         if (ed->ed_next)
253                 ed->ed_next->ed_prev = ed->ed_prev;
254
255         if (ahcd->ed_tails[ed->type] == ed)
256                 ahcd->ed_tails[ed->type] = ed->ed_prev;
257 }
258
259 /*-------------------------------------------------------------------------*/
260
261 static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
262 {
263         struct ed *ed;
264         struct td *td;
265
266         ed = ed_alloc(ahcd, GFP_ATOMIC);
267         if (!ed)
268                 goto err;
269
270         /* dummy td; end of td list for this ed */
271         td = td_alloc(ahcd, GFP_ATOMIC);
272         if (!td)
273                 goto err_free_ed;
274
275         switch (type) {
276         case PIPE_INTERRUPT:
277                 info |= ED_INT;
278                 break;
279         case PIPE_ISOCHRONOUS:
280                 info |= ED_ISO;
281                 break;
282         }
283
284         ed->dummy = td;
285         ed->state = ED_IDLE;
286         ed->type = type;
287
288         ed->hwINFO = cpu_to_hc32(ahcd, info);
289         ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
290         ed->hwHeadP = ed->hwTailP;      /* ED_C, ED_H zeroed */
291
292         return ed;
293
294 err_free_ed:
295         ed_free(ahcd, ed);
296 err:
297         return NULL;
298 }
299
300 /* get and maybe (re)init an endpoint. init _should_ be done only as part
301  * of enumeration, usb_set_configuration() or usb_set_interface().
302  */
303 static struct ed *ed_get(struct admhcd *ahcd,   struct usb_host_endpoint *ep,
304         struct usb_device *udev, unsigned int pipe, int interval)
305 {
306         struct ed               *ed;
307         unsigned long           flags;
308
309         spin_lock_irqsave(&ahcd->lock, flags);
310
311         ed = ep->hcpriv;
312         if (!ed) {
313                 u32             info;
314
315                 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
316                  * suceeds ... otherwise we wouldn't need "pipe".
317                  */
318                 info = usb_pipedevice(pipe);
319                 info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT;
320                 info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT;
321                 if (udev->speed == USB_SPEED_FULL)
322                         info |= ED_SPEED_FULL;
323
324                 ed = ed_create(ahcd, usb_pipetype(pipe), info);
325                 if (ed)
326                         ep->hcpriv = ed;
327         }
328
329         spin_unlock_irqrestore(&ahcd->lock, flags);
330
331         return ed;
332 }
333
334 /*-------------------------------------------------------------------------*/
335
336 /* request unlinking of an endpoint from an operational HC.
337  * put the ep on the rm_list
338  * real work is done at the next start frame (SOFI) hardware interrupt
339  * caller guarantees HCD is running, so hardware access is safe,
340  * and that ed->state is ED_OPER
341  */
342 static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed)
343 {
344
345 #ifdef ADMHC_VERBOSE_DEBUG
346         admhc_dump_ed(ahcd, "ED-UNLINK", ed, 1);
347 #endif
348
349         ed->hwINFO |= cpu_to_hc32(ahcd, ED_DEQUEUE);
350         ed_deschedule(ahcd, ed);
351
352         /* add this ED into the remove list */
353         ed->ed_rm_next = ahcd->ed_rm_list;
354         ahcd->ed_rm_list = ed;
355
356         /* enable SOF interrupt */
357         admhc_intr_ack(ahcd, ADMHC_INTR_SOFI);
358         admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
359         /* flush those writes */
360         admhc_writel_flush(ahcd);
361
362         /* SOF interrupt might get delayed; record the frame counter value that
363          * indicates when the HC isn't looking at it, so concurrent unlinks
364          * behave.  frame_no wraps every 2^16 msec, and changes right before
365          * SOF is triggered.
366          */
367         ed->tick = admhc_frame_no(ahcd) + 1;
368 }
369
370 /*-------------------------------------------------------------------------*
371  * TD handling functions
372  *-------------------------------------------------------------------------*/
373
374 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
375
376 static void
377 td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
378         struct urb *urb, int index)
379 {
380         struct td               *td, *td_pt;
381         struct urb_priv         *urb_priv = urb->hcpriv;
382         int                     hash;
383         u32                     cbl = 0;
384
385 #if 1
386         if (index == (urb_priv->td_cnt - 1) &&
387                         ((urb->transfer_flags & URB_NO_INTERRUPT) == 0))
388                 cbl |= TD_IE;
389 #else
390         if (index == (urb_priv->td_cnt - 1))
391                 cbl |= TD_IE;
392 #endif
393
394         /* use this td as the next dummy */
395         td_pt = urb_priv->td[index];
396
397         /* fill the old dummy TD */
398         td = urb_priv->td[index] = urb_priv->ed->dummy;
399         urb_priv->ed->dummy = td_pt;
400
401         td->ed = urb_priv->ed;
402         td->next_dl_td = NULL;
403         td->index = index;
404         td->urb = urb;
405         td->data_dma = data;
406         if (!len)
407                 data = 0;
408
409         if (data)
410                 cbl |= (len & TD_BL_MASK);
411
412         info |= TD_OWN;
413
414         /* setup hardware specific fields */
415         td->hwINFO = cpu_to_hc32(ahcd, info);
416         td->hwDBP = cpu_to_hc32(ahcd, data);
417         td->hwCBL = cpu_to_hc32(ahcd, cbl);
418         td->hwNextTD = cpu_to_hc32(ahcd, td_pt->td_dma);
419
420         /* append to queue */
421         list_add_tail(&td->td_list, &td->ed->td_list);
422
423         /* hash it for later reverse mapping */
424         hash = TD_HASH_FUNC(td->td_dma);
425         td->td_hash = ahcd->td_hash[hash];
426         ahcd->td_hash[hash] = td;
427
428         /* HC might read the TD (or cachelines) right away ... */
429         wmb();
430         td->ed->hwTailP = td->hwNextTD;
431 }
432
433 /*-------------------------------------------------------------------------*/
434
435 /* Prepare all TDs of a transfer, and queue them onto the ED.
436  * Caller guarantees HC is active.
437  * Usually the ED is already on the schedule, so TDs might be
438  * processed as soon as they're queued.
439  */
440 static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
441 {
442         struct urb_priv *urb_priv = urb->hcpriv;
443         dma_addr_t      data;
444         int             data_len = urb->transfer_buffer_length;
445         int             cnt = 0;
446         u32             info = 0;
447         int             is_out = usb_pipeout(urb->pipe);
448         u32             toggle = 0;
449
450         /* OHCI handles the bulk/interrupt data toggles itself.  We just
451          * use the device toggle bits for resetting, and rely on the fact
452          * that resetting toggle is meaningless if the endpoint is active.
453          */
454
455         if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) {
456                 toggle = TD_T_CARRY;
457         } else {
458                 toggle = TD_T_DATA0;
459                 usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe),
460                         is_out, 1);
461         }
462
463         urb_priv->td_idx = 0;
464         list_add(&urb_priv->pending, &ahcd->pending);
465
466         if (data_len)
467                 data = urb->transfer_dma;
468         else
469                 data = 0;
470
471         /* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
472          * using TD_CC_GET, as well as by seeing them on the done list.
473          * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
474          */
475         switch (urb_priv->ed->type) {
476         case PIPE_INTERRUPT:
477                 info = is_out
478                         ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
479                         : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
480
481                 /* setup service interval and starting frame number */
482                 info |= (urb->start_frame & TD_FN_MASK);
483                 info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
484
485                 td_fill(ahcd, info, data, data_len, urb, cnt);
486                 cnt++;
487
488                 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
489                 break;
490
491         case PIPE_BULK:
492                 info = is_out
493                         ? TD_SCC_NOTACCESSED | TD_DP_OUT
494                         : TD_SCC_NOTACCESSED | TD_DP_IN;
495
496                 /* TDs _could_ transfer up to 8K each */
497                 while (data_len > TD_DATALEN_MAX) {
498                         td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
499                                 data, TD_DATALEN_MAX, urb, cnt);
500                         data += TD_DATALEN_MAX;
501                         data_len -= TD_DATALEN_MAX;
502                         cnt++;
503                 }
504
505                 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
506                         data_len, urb, cnt);
507                 cnt++;
508
509                 if ((urb->transfer_flags & URB_ZERO_PACKET)
510                                 && (cnt < urb_priv->td_cnt)) {
511                         td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
512                                 0, 0, urb, cnt);
513                         cnt++;
514                 }
515                 break;
516
517         /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
518          * any DATA phase works normally, and the STATUS ack is special.
519          */
520         case PIPE_CONTROL:
521                 /* fill a TD for the setup */
522                 info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
523                 td_fill(ahcd, info, urb->setup_dma, 8, urb, cnt++);
524
525                 if (data_len > 0) {
526                         /* fill a TD for the data */
527                         info = TD_SCC_NOTACCESSED | TD_T_DATA1;
528                         info |= is_out ? TD_DP_OUT : TD_DP_IN;
529                         /* NOTE:  mishandles transfers >8K, some >4K */
530                         td_fill(ahcd, info, data, data_len, urb, cnt++);
531                 }
532
533                 /* fill a TD for the ACK */
534                 info = (is_out || data_len == 0)
535                         ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
536                         : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
537                 td_fill(ahcd, info, data, 0, urb, cnt++);
538
539                 break;
540
541         /* ISO has no retransmit, so no toggle;
542          * Each TD could handle multiple consecutive frames (interval 1);
543          * we could often reduce the number of TDs here.
544          */
545         case PIPE_ISOCHRONOUS:
546                 info = TD_SCC_NOTACCESSED;
547                 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
548                         int frame = urb->start_frame;
549
550                         frame += cnt * urb->interval;
551                         frame &= TD_FN_MASK;
552                         td_fill(ahcd, info | frame,
553                                 data + urb->iso_frame_desc[cnt].offset,
554                                 urb->iso_frame_desc[cnt].length, urb, cnt);
555                 }
556                 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
557                 break;
558         }
559
560         if (urb_priv->td_cnt != cnt)
561                 admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
562 }
563
564 /*-------------------------------------------------------------------------*
565  * Done List handling functions
566  *-------------------------------------------------------------------------*/
567
568 /* calculate transfer length/status and update the urb
569  * PRECONDITION:  irqsafe (only for urb->status locking)
570  */
571 static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
572 {
573         struct urb_priv *urb_priv = urb->hcpriv;
574         u32     info = hc32_to_cpup(ahcd, &td->hwINFO);
575         int     type = usb_pipetype(urb->pipe);
576         int     cc;
577
578         cc = TD_CC_GET(info);
579
580         /* ISO ... drivers see per-TD length/status */
581         if (type == PIPE_ISOCHRONOUS) {
582 #if 0
583                 /* TODO */
584                 int     dlen = 0;
585
586                 /* NOTE:  assumes FC in tdINFO == 0, and that
587                  * only the first of 0..MAXPSW psws is used.
588                  */
589
590                 cc = TD_CC_GET(td);
591                 if (tdINFO & TD_CC)     /* hc didn't touch? */
592                         return;
593
594                 if (usb_pipeout (urb->pipe))
595                         dlen = urb->iso_frame_desc [td->index].length;
596                 else {
597                         /* short reads are always OK for ISO */
598                         if (cc == TD_DATAUNDERRUN)
599                                 cc = TD_CC_NOERROR;
600                         dlen = tdPSW & 0x3ff;
601                 }
602                 urb->actual_length += dlen;
603                 urb->iso_frame_desc [td->index].actual_length = dlen;
604                 urb->iso_frame_desc [td->index].status = cc_to_error [cc];
605
606                 if (cc != TD_CC_NOERROR)
607                         admhc_vdbg (ahcd,
608                                 "urb %p iso td %p (%d) len %d cc %d\n",
609                                 urb, td, 1 + td->index, dlen, cc);
610 #endif
611         /* BULK, INT, CONTROL ... drivers see aggregate length/status,
612          * except that "setup" bytes aren't counted and "short" transfers
613          * might not be reported as errors.
614          */
615         } else {
616                 u32     bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
617                 u32     tdDBP = hc32_to_cpup(ahcd, &td->hwDBP);
618
619                 /* update packet status if needed (short is normally ok) */
620                 if (cc == TD_CC_DATAUNDERRUN
621                                 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
622                         cc = TD_CC_NOERROR;
623
624                 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
625                         spin_lock(&urb->lock);
626                         if (urb->status == -EINPROGRESS)
627                                 urb->status = cc_to_error[cc];
628                         spin_unlock(&urb->lock);
629                 }
630
631                 /* count all non-empty packets except control SETUP packet */
632                 if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0) {
633                         urb->actual_length += tdDBP - td->data_dma + bl;
634                 }
635
636                 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
637                         admhc_vdbg(ahcd,
638                                 "urb %p td %p (%d) cc %d, len=%d/%d\n",
639                                 urb, td, td->index, cc,
640                                 urb->actual_length,
641                                 urb->transfer_buffer_length);
642         }
643
644         list_del(&td->td_list);
645         urb_priv->td_idx++;
646
647         return cc;
648 }
649
650 /*-------------------------------------------------------------------------*/
651
652 static inline struct td *
653 ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
654 {
655         struct urb              *urb = td->urb;
656         struct ed               *ed = td->ed;
657         struct list_head        *tmp = td->td_list.next;
658         __hc32                  toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
659
660         admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
661         /* clear ed halt; this is the td that caused it, but keep it inactive
662          * until its urb->complete() has a chance to clean up.
663          */
664         ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
665         wmb();
666         ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
667
668         /* put any later tds from this urb onto the donelist, after 'td',
669          * order won't matter here: no errors, and nothing was transferred.
670          * also patch the ed so it looks as if those tds completed normally.
671          */
672         while (tmp != &ed->td_list) {
673                 struct td       *next;
674                 __hc32          info;
675
676                 next = list_entry(tmp, struct td, td_list);
677                 tmp = next->td_list.next;
678
679                 if (next->urb != urb)
680                         break;
681
682                 /* NOTE: if multi-td control DATA segments get supported,
683                  * this urb had one of them, this td wasn't the last td
684                  * in that segment (TD_R clear), this ed halted because
685                  * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
686                  * then we need to leave the control STATUS packet queued
687                  * and clear ED_SKIP.
688                  */
689                 info = next->hwINFO;
690 #if 0           /* FIXME */
691                 info |= cpu_to_hc32(ahcd, TD_DONE);
692 #endif
693                 info &= ~cpu_to_hc32(ahcd, TD_CC);
694                 next->hwINFO = info;
695
696                 next->next_dl_td = rev;
697                 rev = next;
698
699                 ed->hwHeadP = next->hwNextTD | toggle;
700         }
701
702         /* help for troubleshooting:  report anything that
703          * looks odd ... that doesn't include protocol stalls
704          * (or maybe some other things)
705          */
706         switch (cc) {
707         case TD_CC_DATAUNDERRUN:
708                 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
709                         break;
710                 /* fallthrough */
711         case TD_CC_STALL:
712                 if (usb_pipecontrol(urb->pipe))
713                         break;
714                 /* fallthrough */
715         default:
716                 admhc_dbg (ahcd,
717                         "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
718                         urb, urb->dev->devpath,
719                         usb_pipeendpoint (urb->pipe),
720                         usb_pipein (urb->pipe) ? "in" : "out",
721                         hc32_to_cpu(ahcd, td->hwINFO),
722                         cc, cc_to_error [cc]);
723         }
724
725         return rev;
726 }
727
728 /*-------------------------------------------------------------------------*/
729
730 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
731 static void
732 finish_unlinks(struct admhcd *ahcd, u16 tick)
733 {
734         struct ed       *ed, **last;
735
736 rescan_all:
737         for (last = &ahcd->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
738                 struct list_head        *entry, *tmp;
739                 int                     completed, modified;
740                 __hc32                  *prev;
741
742                 /* only take off EDs that the HC isn't using, accounting for
743                  * frame counter wraps and EDs with partially retired TDs
744                  */
745                 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) {
746                         if (tick_before (tick, ed->tick)) {
747 skip_ed:
748                                 last = &ed->ed_rm_next;
749                                 continue;
750                         }
751 #if 0
752                         if (!list_empty(&ed->td_list)) {
753                                 struct td       *td;
754                                 u32             head;
755
756                                 td = list_entry(ed->td_list.next, struct td,
757                                                         td_list);
758                                 head = hc32_to_cpu(ahcd, ed->hwHeadP) &
759                                                                 TD_MASK;
760
761                                 /* INTR_WDH may need to clean up first */
762                                 if (td->td_dma != head)
763                                         goto skip_ed;
764                         }
765 #endif
766                 }
767
768                 /* reentrancy:  if we drop the schedule lock, someone might
769                  * have modified this list.  normally it's just prepending
770                  * entries (which we'd ignore), but paranoia won't hurt.
771                  */
772                 *last = ed->ed_rm_next;
773                 ed->ed_rm_next = NULL;
774                 modified = 0;
775
776                 /* unlink urbs as requested, but rescan the list after
777                  * we call a completion since it might have unlinked
778                  * another (earlier) urb
779                  *
780                  * When we get here, the HC doesn't see this ed.  But it
781                  * must not be rescheduled until all completed URBs have
782                  * been given back to the driver.
783                  */
784 rescan_this:
785                 completed = 0;
786                 prev = &ed->hwHeadP;
787                 list_for_each_safe(entry, tmp, &ed->td_list) {
788                         struct td       *td;
789                         struct urb      *urb;
790                         struct urb_priv *urb_priv;
791                         __hc32          savebits;
792
793                         td = list_entry(entry, struct td, td_list);
794                         urb = td->urb;
795                         urb_priv = td->urb->hcpriv;
796
797                         if (urb->status == -EINPROGRESS) {
798                                 prev = &td->hwNextTD;
799                                 continue;
800                         }
801
802                         if ((urb_priv) == NULL)
803                                 continue;
804
805                         /* patch pointer hc uses */
806                         savebits = *prev & ~cpu_to_hc32(ahcd, TD_MASK);
807                         *prev = td->hwNextTD | savebits;
808
809                         /* HC may have partly processed this TD */
810 #ifdef ADMHC_VERBOSE_DEBUG
811                         urb_print(ahcd, urb, "PARTIAL", 0);
812 #endif
813                         td_done(ahcd, urb, td);
814
815                         /* if URB is done, clean up */
816                         if (urb_priv->td_idx == urb_priv->td_cnt) {
817                                 modified = completed = 1;
818                                 finish_urb(ahcd, urb);
819                         }
820                 }
821                 if (completed && !list_empty(&ed->td_list))
822                         goto rescan_this;
823
824                 /* ED's now officially unlinked, hc doesn't see */
825                 ed->state = ED_IDLE;
826                 ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
827                 ed->hwNextED = 0;
828                 wmb();
829                 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP | ED_DEQUEUE);
830
831                 /* but if there's work queued, reschedule */
832                 if (!list_empty(&ed->td_list)) {
833                         if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))
834                                 ed_schedule(ahcd, ed);
835                 }
836
837                 if (modified)
838                         goto rescan_all;
839         }
840 }
841
842 /*-------------------------------------------------------------------------*/
843
844 /*
845  * Process normal completions (error or success) and clean the schedules.
846  *
847  * This is the main path for handing urbs back to drivers.  The only other
848  * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
849  * scanning the (re-reversed) donelist as this does.
850  */
851
852 static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
853 {
854         struct list_head *entry,*tmp;
855         __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
856
857 #ifdef ADMHC_VERBOSE_DEBUG
858         admhc_dump_ed(ahcd, "UNHALT", ed, 0);
859 #endif
860         /* clear ed halt; this is the td that caused it, but keep it inactive
861          * until its urb->complete() has a chance to clean up.
862          */
863         ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
864         wmb();
865         ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
866
867         list_for_each_safe(entry, tmp, &ed->td_list) {
868                 struct td *td = list_entry(entry, struct td, td_list);
869                 __hc32 info;
870
871                 if (td->urb != urb)
872                         break;
873
874                 info = td->hwINFO;
875                 info &= ~cpu_to_hc32(ahcd, TD_CC | TD_OWN);
876                 td->hwINFO = info;
877
878                 ed->hwHeadP = td->hwNextTD | toggle;
879                 wmb();
880         }
881
882 }
883
884 static void ed_intr_refill(struct admhcd *ahcd, struct ed *ed)
885 {
886         __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
887
888         ed->hwHeadP = ed->hwTailP | toggle;
889 }
890
891
892 static inline int is_ed_halted(struct admhcd *ahcd, struct ed *ed)
893 {
894         return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) == ED_H);
895 }
896
897 static inline int is_td_halted(struct admhcd *ahcd, struct ed *ed,
898                 struct td *td)
899 {
900         return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & TD_MASK) ==
901                 (hc32_to_cpup(ahcd, &td->hwNextTD) & TD_MASK));
902 }
903
904 static void ed_update(struct admhcd *ahcd, struct ed *ed)
905 {
906         struct list_head *entry,*tmp;
907
908 #ifdef ADMHC_VERBOSE_DEBUG
909         admhc_dump_ed(ahcd, "UPDATE", ed, 1);
910 #endif
911
912         list_for_each_safe(entry, tmp, &ed->td_list) {
913                 struct td *td = list_entry(entry, struct td, td_list);
914                 struct urb *urb = td->urb;
915                 struct urb_priv *urb_priv = urb->hcpriv;
916                 int cc;
917
918                 if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
919                         break;
920
921                 /* update URB's length and status from TD */
922                 cc = td_done(ahcd, urb, td);
923                 if (is_ed_halted(ahcd, ed) && is_td_halted(ahcd, ed, td))
924                         ed_unhalt(ahcd, ed, urb);
925
926                 if (ed->type == PIPE_INTERRUPT)
927                         ed_intr_refill(ahcd,ed);
928
929                 /* If all this urb's TDs are done, call complete() */
930                 if (urb_priv->td_idx == urb_priv->td_cnt)
931                         finish_urb(ahcd, urb);
932
933                 /* clean schedule:  unlink EDs that are no longer busy */
934                 if (list_empty(&ed->td_list)) {
935                         if (ed->state == ED_OPER)
936                                 start_ed_unlink(ahcd, ed);
937
938                 /* ... reenabling halted EDs only after fault cleanup */
939                 } else if ((ed->hwINFO & cpu_to_hc32(ahcd,
940                                                 ED_SKIP | ED_DEQUEUE))
941                                         == cpu_to_hc32(ahcd, ED_SKIP)) {
942                         td = list_entry(ed->td_list.next, struct td, td_list);
943 #if 0
944                         if (!(td->hwINFO & cpu_to_hc32(ahcd, TD_DONE))) {
945                                 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
946                                 /* ... hc may need waking-up */
947                                 switch (ed->type) {
948                                 case PIPE_CONTROL:
949                                         admhc_writel (ahcd, OHCI_CLF,
950                                                 &ahcd->regs->cmdstatus);
951                                         break;
952                                 case PIPE_BULK:
953                                         admhc_writel (ahcd, OHCI_BLF,
954                                                 &ahcd->regs->cmdstatus);
955                                         break;
956                                 }
957                         }
958 #else
959                         if ((td->hwINFO & cpu_to_hc32(ahcd, TD_OWN)))
960                                 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
961 #endif
962                 }
963
964         }
965 }
966
967 /* there are some tds completed; called in_irq(), with HCD locked */
968 static void admhc_td_complete(struct admhcd *ahcd)
969 {
970         struct ed       *ed;
971
972         for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
973                 if (ed->state != ED_OPER)
974                         continue;
975
976                 ed_update(ahcd, ed);
977         }
978 }