cns3xxx: update to linux 3.14
[openwrt.git] / target / linux / cns3xxx / files / drivers / usb / dwc / otg_hcd_intr.c
1 /* ==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $
3  * $Revision: #70 $
4  * $Date: 2008/10/16 $
5  * $Change: 1117667 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
34
35 #include <linux/version.h>
36
37 #include "otg_driver.h"
38 #include "otg_hcd.h"
39 #include "otg_regs.h"
40
41 /** @file
42  * This file contains the implementation of the HCD Interrupt handlers.
43  */
44
45 /** This function handles interrupts for the HCD. */
46 int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd)
47 {
48         int retval = 0;
49
50         dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
51         gintsts_data_t gintsts;
52 #ifdef DEBUG
53         dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
54 #endif
55
56         /* Check if HOST Mode */
57         if (dwc_otg_is_host_mode(core_if)) {
58                 gintsts.d32 = dwc_otg_read_core_intr(core_if);
59                 if (!gintsts.d32) {
60                         return 0;
61                 }
62
63 #ifdef DEBUG
64                 /* Don't print debug message in the interrupt handler on SOF */
65 # ifndef DEBUG_SOF
66                 if (gintsts.d32 != DWC_SOF_INTR_MASK)
67 # endif
68                         DWC_DEBUGPL(DBG_HCD_FLOOD, "\n");
69 #endif
70
71 #ifdef DEBUG
72 # ifndef DEBUG_SOF
73                 if (gintsts.d32 != DWC_SOF_INTR_MASK)
74 # endif
75                         DWC_DEBUGPL(DBG_HCD_FLOOD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32);
76 #endif
77                 if (gintsts.b.usbreset) {
78                         DWC_PRINT("Usb Reset In Host Mode\n");
79                 }
80                 if (gintsts.b.sofintr) {
81                         retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd);
82                 }
83                 if (gintsts.b.rxstsqlvl) {
84                         retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd);
85                 }
86                 if (gintsts.b.nptxfempty) {
87                         retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd);
88                 }
89                 if (gintsts.b.i2cintr) {
90                         /** @todo Implement i2cintr handler. */
91                 }
92                 if (gintsts.b.portintr) {
93                         retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd);
94                 }
95                 if (gintsts.b.hcintr) {
96                         retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd);
97                 }
98                 if (gintsts.b.ptxfempty) {
99                         retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd);
100                 }
101 #ifdef DEBUG
102 # ifndef DEBUG_SOF
103                 if (gintsts.d32 != DWC_SOF_INTR_MASK)
104 # endif
105                 {
106                         DWC_DEBUGPL(DBG_HCD_FLOOD, "DWC OTG HCD Finished Servicing Interrupts\n");
107                         DWC_DEBUGPL(DBG_HCD_FLOOD, "DWC OTG HCD gintsts=0x%08x\n",
108                                     dwc_read_reg32(&global_regs->gintsts));
109                         DWC_DEBUGPL(DBG_HCD_FLOOD, "DWC OTG HCD gintmsk=0x%08x\n",
110                                     dwc_read_reg32(&global_regs->gintmsk));
111                 }
112 #endif
113
114 #ifdef DEBUG
115 # ifndef DEBUG_SOF
116         if (gintsts.d32 != DWC_SOF_INTR_MASK)
117 # endif
118                 DWC_DEBUGPL(DBG_HCD_FLOOD, "\n");
119 #endif
120
121         }
122         S3C2410X_CLEAR_EINTPEND();
123
124         return retval;
125 }
126
127 #ifdef DWC_TRACK_MISSED_SOFS
128 #warning Compiling code to track missed SOFs
129 #define FRAME_NUM_ARRAY_SIZE 1000
130 /**
131  * This function is for debug only.
132  */
133 static inline void track_missed_sofs(uint16_t curr_frame_number)
134 {
135         static uint16_t         frame_num_array[FRAME_NUM_ARRAY_SIZE];
136         static uint16_t         last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
137         static int              frame_num_idx = 0;
138         static uint16_t         last_frame_num = DWC_HFNUM_MAX_FRNUM;
139         static int              dumped_frame_num_array = 0;
140
141         if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
142                 if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != curr_frame_number) {
143                         frame_num_array[frame_num_idx] = curr_frame_number;
144                         last_frame_num_array[frame_num_idx++] = last_frame_num;
145                 }
146         } else if (!dumped_frame_num_array) {
147                 int i;
148                 printk(KERN_EMERG USB_DWC "Frame     Last Frame\n");
149                 printk(KERN_EMERG USB_DWC "-----     ----------\n");
150                 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
151                         printk(KERN_EMERG USB_DWC "0x%04x    0x%04x\n",
152                                frame_num_array[i], last_frame_num_array[i]);
153                 }
154                 dumped_frame_num_array = 1;
155         }
156         last_frame_num = curr_frame_number;
157 }
158 #endif
159
160 /**
161  * Handles the start-of-frame interrupt in host mode. Non-periodic
162  * transactions may be queued to the DWC_otg controller for the current
163  * (micro)frame. Periodic transactions may be queued to the controller for the
164  * next (micro)frame.
165  */
166 int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *hcd)
167 {
168         hfnum_data_t            hfnum;
169         struct list_head        *qh_entry;
170         dwc_otg_qh_t            *qh;
171         dwc_otg_transaction_type_e tr_type;
172         gintsts_data_t gintsts = {.d32 = 0};
173
174         hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum);
175
176 #ifdef DEBUG_SOF
177         DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
178 #endif
179         hcd->frame_number = hfnum.b.frnum;
180
181 #ifdef DEBUG
182         hcd->frrem_accum += hfnum.b.frrem;
183         hcd->frrem_samples++;
184 #endif
185
186 #ifdef DWC_TRACK_MISSED_SOFS
187         track_missed_sofs(hcd->frame_number);
188 #endif
189
190         /* Determine whether any periodic QHs should be executed. */
191         qh_entry = hcd->periodic_sched_inactive.next;
192         while (qh_entry != &hcd->periodic_sched_inactive) {
193                 qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry);
194                 qh_entry = qh_entry->next;
195                 if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) {
196                         /*
197                          * Move QH to the ready list to be executed next
198                          * (micro)frame.
199                          */
200                         list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready);
201                 }
202         }
203
204         tr_type = dwc_otg_hcd_select_transactions(hcd);
205         if (tr_type != DWC_OTG_TRANSACTION_NONE) {
206                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
207         }
208
209         /* Clear interrupt */
210         gintsts.b.sofintr = 1;
211         dwc_write_reg32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32);
212
213         return 1;
214 }
215
216 /** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
217  * least one packet in the Rx FIFO.  The packets are moved from the FIFO to
218  * memory if the DWC_otg controller is operating in Slave mode. */
219 int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd)
220 {
221         host_grxsts_data_t grxsts;
222         dwc_hc_t *hc = NULL;
223
224         DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
225
226         grxsts.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp);
227
228         hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
229
230         /* Packet Status */
231         DWC_DEBUGPL(DBG_HCDV, "    Ch num = %d\n", grxsts.b.chnum);
232         DWC_DEBUGPL(DBG_HCDV, "    Count = %d\n", grxsts.b.bcnt);
233         DWC_DEBUGPL(DBG_HCDV, "    DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start);
234         DWC_DEBUGPL(DBG_HCDV, "    PStatus = %d\n", grxsts.b.pktsts);
235
236         switch (grxsts.b.pktsts) {
237         case DWC_GRXSTS_PKTSTS_IN:
238                 /* Read the data into the host buffer. */
239                 if (grxsts.b.bcnt > 0) {
240                         dwc_otg_read_packet(dwc_otg_hcd->core_if,
241                                             hc->xfer_buff,
242                                             grxsts.b.bcnt);
243
244                         /* Update the HC fields for the next packet received. */
245                         hc->xfer_count += grxsts.b.bcnt;
246                         hc->xfer_buff += grxsts.b.bcnt;
247                 }
248
249         case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
250         case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
251         case DWC_GRXSTS_PKTSTS_CH_HALTED:
252                 /* Handled in interrupt, just ignore data */
253                 break;
254         default:
255                 DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts);
256                 break;
257         }
258
259         return 1;
260 }
261
262 /** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
263  * data packets may be written to the FIFO for OUT transfers. More requests
264  * may be written to the non-periodic request queue for IN transfers. This
265  * interrupt is enabled only in Slave mode. */
266 int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd)
267 {
268         DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
269         dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
270                                        DWC_OTG_TRANSACTION_NON_PERIODIC);
271         return 1;
272 }
273
274 /** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
275  * packets may be written to the FIFO for OUT transfers. More requests may be
276  * written to the periodic request queue for IN transfers. This interrupt is
277  * enabled only in Slave mode. */
278 int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd)
279 {
280         DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
281         dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
282                                        DWC_OTG_TRANSACTION_PERIODIC);
283         return 1;
284 }
285
286 /** There are multiple conditions that can cause a port interrupt. This function
287  * determines which interrupt conditions have occurred and handles them
288  * appropriately. */
289 int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd)
290 {
291         int retval = 0;
292         hprt0_data_t hprt0;
293         hprt0_data_t hprt0_modify;
294
295         hprt0.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0);
296         hprt0_modify.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0);
297
298         /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
299          * GINTSTS */
300
301         hprt0_modify.b.prtena = 0;
302         hprt0_modify.b.prtconndet = 0;
303         hprt0_modify.b.prtenchng = 0;
304         hprt0_modify.b.prtovrcurrchng = 0;
305
306         /* Port Connect Detected
307          * Set flag and clear if detected */
308         if (hprt0.b.prtconndet) {
309                 DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
310                             "Port Connect Detected--\n", hprt0.d32);
311                 dwc_otg_hcd->flags.b.port_connect_status_change = 1;
312                 dwc_otg_hcd->flags.b.port_connect_status = 1;
313                 hprt0_modify.b.prtconndet = 1;
314
315                 /* B-Device has connected, Delete the connection timer. */
316                 del_timer( &dwc_otg_hcd->conn_timer );
317
318                 /* The Hub driver asserts a reset when it sees port connect
319                  * status change flag */
320                 retval |= 1;
321         }
322
323         /* Port Enable Changed
324          * Clear if detected - Set internal flag if disabled */
325         if (hprt0.b.prtenchng) {
326                 DWC_DEBUGPL(DBG_HCD, "  --Port Interrupt HPRT0=0x%08x "
327                             "Port Enable Changed--\n", hprt0.d32);
328                 hprt0_modify.b.prtenchng = 1;
329                 if (hprt0.b.prtena == 1) {
330                         int do_reset = 0;
331                         dwc_otg_core_params_t *params = dwc_otg_hcd->core_if->core_params;
332                         dwc_otg_core_global_regs_t *global_regs = dwc_otg_hcd->core_if->core_global_regs;
333                         dwc_otg_host_if_t *host_if = dwc_otg_hcd->core_if->host_if;
334
335                         /* Check if we need to adjust the PHY clock speed for
336                          * low power and adjust it */
337                         if (params->host_support_fs_ls_low_power) {
338                                 gusbcfg_data_t usbcfg;
339
340                                 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
341
342                                 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED ||
343                                     hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED) {
344                                         /*
345                                          * Low power
346                                          */
347                                         hcfg_data_t hcfg;
348                                         if (usbcfg.b.phylpwrclksel == 0) {
349                                                 /* Set PHY low power clock select for FS/LS devices */
350                                                 usbcfg.b.phylpwrclksel = 1;
351                                                 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
352                                                 do_reset = 1;
353                                         }
354
355                                         hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
356
357                                         if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED &&
358                                             params->host_ls_low_power_phy_clk ==
359                                              DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
360                                                 /* 6 MHZ */
361                                                 DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
362                                                 if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) {
363                                                         hcfg.b.fslspclksel = DWC_HCFG_6_MHZ;
364                                                         dwc_write_reg32(&host_if->host_global_regs->hcfg,
365                                                                         hcfg.d32);
366                                                         do_reset = 1;
367                                                 }
368                                         } else {
369                                                 /* 48 MHZ */
370                                                 DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n");
371                                                 if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) {
372                                                         hcfg.b.fslspclksel = DWC_HCFG_48_MHZ;
373                                                         dwc_write_reg32(&host_if->host_global_regs->hcfg,
374                                                                         hcfg.d32);
375                                                         do_reset = 1;
376                                                 }
377                                         }
378                                 } else {
379                                         /*
380                                          * Not low power
381                                          */
382                                         if (usbcfg.b.phylpwrclksel == 1) {
383                                                 usbcfg.b.phylpwrclksel = 0;
384                                                 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
385                                                 do_reset = 1;
386                                         }
387                                 }
388
389                                 if (do_reset) {
390                                         tasklet_schedule(dwc_otg_hcd->reset_tasklet);
391                                 }
392                         }
393
394                         if (!do_reset) {
395                                 /* Port has been enabled set the reset change flag */
396                                 dwc_otg_hcd->flags.b.port_reset_change = 1;
397                         }
398                 } else {
399                         dwc_otg_hcd->flags.b.port_enable_change = 1;
400                 }
401                 retval |= 1;
402         }
403
404         /** Overcurrent Change Interrupt */
405         if (hprt0.b.prtovrcurrchng) {
406                 DWC_DEBUGPL(DBG_HCD, "  --Port Interrupt HPRT0=0x%08x "
407                             "Port Overcurrent Changed--\n", hprt0.d32);
408                 dwc_otg_hcd->flags.b.port_over_current_change = 1;
409                 hprt0_modify.b.prtovrcurrchng = 1;
410                 retval |= 1;
411         }
412
413         /* Clear Port Interrupts */
414         dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
415
416         return retval;
417 }
418
419 /** This interrupt indicates that one or more host channels has a pending
420  * interrupt. There are multiple conditions that can cause each host channel
421  * interrupt. This function determines which conditions have occurred for each
422  * host channel interrupt and handles them appropriately. */
423 int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd)
424 {
425         int i;
426         int retval = 0;
427         haint_data_t haint;
428
429         /* Clear appropriate bits in HCINTn to clear the interrupt bit in
430          * GINTSTS */
431
432         haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if);
433
434         for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) {
435                 if (haint.b2.chint & (1 << i)) {
436                         retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i);
437                 }
438         }
439
440         return retval;
441 }
442
443 /* Macro used to clear one channel interrupt */
444 #define clear_hc_int(_hc_regs_, _intr_) \
445 do { \
446         hcint_data_t hcint_clear = {.d32 = 0}; \
447         hcint_clear.b._intr_ = 1; \
448         dwc_write_reg32(&(_hc_regs_)->hcint, hcint_clear.d32); \
449 } while (0)
450
451 /*
452  * Macro used to disable one channel interrupt. Channel interrupts are
453  * disabled when the channel is halted or released by the interrupt handler.
454  * There is no need to handle further interrupts of that type until the
455  * channel is re-assigned. In fact, subsequent handling may cause crashes
456  * because the channel structures are cleaned up when the channel is released.
457  */
458 #define disable_hc_int(_hc_regs_, _intr_) \
459 do { \
460         hcintmsk_data_t hcintmsk = {.d32 = 0}; \
461         hcintmsk.b._intr_ = 1; \
462         dwc_modify_reg32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \
463 } while (0)
464
465 /**
466  * Gets the actual length of a transfer after the transfer halts. _halt_status
467  * holds the reason for the halt.
468  *
469  * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE,
470  * *short_read is set to 1 upon return if less than the requested
471  * number of bytes were transferred. Otherwise, *short_read is set to 0 upon
472  * return. short_read may also be NULL on entry, in which case it remains
473  * unchanged.
474  */
475 static uint32_t get_actual_xfer_length(dwc_hc_t *hc,
476                                        dwc_otg_hc_regs_t *hc_regs,
477                                        dwc_otg_qtd_t *qtd,
478                                        dwc_otg_halt_status_e halt_status,
479                                        int *short_read)
480 {
481         hctsiz_data_t   hctsiz;
482         uint32_t        length;
483
484         if (short_read != NULL) {
485                 *short_read = 0;
486         }
487         hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
488
489         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
490                 if (hc->ep_is_in) {
491                         length = hc->xfer_len - hctsiz.b.xfersize;
492                         if (short_read != NULL) {
493                                 *short_read = (hctsiz.b.xfersize != 0);
494                         }
495                 } else if (hc->qh->do_split) {
496                         length = qtd->ssplit_out_xfer_count;
497                 } else {
498                         length = hc->xfer_len;
499                 }
500         } else {
501                 /*
502                  * Must use the hctsiz.pktcnt field to determine how much data
503                  * has been transferred. This field reflects the number of
504                  * packets that have been transferred via the USB. This is
505                  * always an integral number of packets if the transfer was
506                  * halted before its normal completion. (Can't use the
507                  * hctsiz.xfersize field because that reflects the number of
508                  * bytes transferred via the AHB, not the USB).
509                  */
510                 length = (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet;
511         }
512
513         return length;
514 }
515
516 /**
517  * Updates the state of the URB after a Transfer Complete interrupt on the
518  * host channel. Updates the actual_length field of the URB based on the
519  * number of bytes transferred via the host channel. Sets the URB status
520  * if the data transfer is finished.
521  *
522  * @return 1 if the data transfer specified by the URB is completely finished,
523  * 0 otherwise.
524  */
525 static int update_urb_state_xfer_comp(dwc_hc_t *hc,
526                                       dwc_otg_hc_regs_t *hc_regs,
527                                       struct urb *urb,
528                                       dwc_otg_qtd_t *qtd)
529 {
530         int             xfer_done = 0;
531         int             short_read = 0;
532
533         urb->actual_length += get_actual_xfer_length(hc, hc_regs, qtd,
534                                                      DWC_OTG_HC_XFER_COMPLETE,
535                                                      &short_read);
536
537         if (short_read || urb->actual_length >= urb->transfer_buffer_length) {
538                 xfer_done = 1;
539                 if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) {
540                         urb->status = -EREMOTEIO;
541                 } else {
542                         urb->status = 0;
543                 }
544         }
545
546 #ifdef DEBUG
547         {
548                 hctsiz_data_t   hctsiz;
549                 hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
550                 DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
551                             __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num);
552                 DWC_DEBUGPL(DBG_HCDV, "  hc->xfer_len %d\n", hc->xfer_len);
553                 DWC_DEBUGPL(DBG_HCDV, "  hctsiz.xfersize %d\n", hctsiz.b.xfersize);
554                 DWC_DEBUGPL(DBG_HCDV, "  urb %p\n", urb);
555                 DWC_DEBUGPL(DBG_HCDV, "  urb->transfer_buffer_length %d\n",
556                             urb->transfer_buffer_length);
557                 DWC_DEBUGPL(DBG_HCDV, "  urb->actual_length %d\n", urb->actual_length);
558                 DWC_DEBUGPL(DBG_HCDV, "  short_read %d, xfer_done %d\n",
559                             short_read, xfer_done);
560         }
561 #endif
562
563         return xfer_done;
564 }
565
566 /*
567  * Save the starting data toggle for the next transfer. The data toggle is
568  * saved in the QH for non-control transfers and it's saved in the QTD for
569  * control transfers.
570  */
571 static void save_data_toggle(dwc_hc_t *hc,
572                              dwc_otg_hc_regs_t *hc_regs,
573                              dwc_otg_qtd_t *qtd)
574 {
575         hctsiz_data_t hctsiz;
576         hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
577
578         if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
579                 dwc_otg_qh_t *qh = hc->qh;
580                 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
581                         qh->data_toggle = DWC_OTG_HC_PID_DATA0;
582                 } else {
583                         qh->data_toggle = DWC_OTG_HC_PID_DATA1;
584                 }
585         } else {
586                 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
587                         qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
588                 } else {
589                         qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
590                 }
591         }
592 }
593
594 /**
595  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
596  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
597  * still linked to the QH, the QH is added to the end of the inactive
598  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
599  * schedule if no more QTDs are linked to the QH.
600  */
601 static void deactivate_qh(dwc_otg_hcd_t *hcd,
602                           dwc_otg_qh_t *qh,
603                           int free_qtd)
604 {
605         int continue_split = 0;
606         dwc_otg_qtd_t *qtd;
607         unsigned long flags;
608
609         DWC_DEBUGPL(DBG_HCDV, "  %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
610
611         SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
612
613         qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
614
615         if (qtd->complete_split) {
616                 continue_split = 1;
617         } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID ||
618                    qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) {
619                 continue_split = 1;
620         }
621
622         if (free_qtd) {
623                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd);
624                 continue_split = 0;
625         }
626
627         qh->channel = NULL;
628         qh->qtd_in_process = NULL;
629
630         dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split);
631         SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
632 }
633
634 /**
635  * Updates the state of an Isochronous URB when the transfer is stopped for
636  * any reason. The fields of the current entry in the frame descriptor array
637  * are set based on the transfer state and the input _halt_status. Completes
638  * the Isochronous URB if all the URB frames have been completed.
639  *
640  * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
641  * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
642  */
643 static dwc_otg_halt_status_e
644 update_isoc_urb_state(dwc_otg_hcd_t *hcd,
645                       dwc_hc_t *hc,
646                       dwc_otg_hc_regs_t *hc_regs,
647                       dwc_otg_qtd_t *qtd,
648                       dwc_otg_halt_status_e halt_status)
649 {
650         struct urb *urb = qtd->urb;
651         dwc_otg_halt_status_e ret_val = halt_status;
652         struct usb_iso_packet_descriptor *frame_desc;
653
654         frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
655         switch (halt_status) {
656         case DWC_OTG_HC_XFER_COMPLETE:
657                 frame_desc->status = 0;
658                 frame_desc->actual_length =
659                         get_actual_xfer_length(hc, hc_regs, qtd,
660                                                halt_status, NULL);
661                 break;
662         case DWC_OTG_HC_XFER_FRAME_OVERRUN:
663                 urb->error_count++;
664                 if (hc->ep_is_in) {
665                         frame_desc->status = -ENOSR;
666                 } else {
667                         frame_desc->status = -ECOMM;
668                 }
669                 frame_desc->actual_length = 0;
670                 break;
671         case DWC_OTG_HC_XFER_BABBLE_ERR:
672                 urb->error_count++;
673                 frame_desc->status = -EOVERFLOW;
674                 /* Don't need to update actual_length in this case. */
675                 break;
676         case DWC_OTG_HC_XFER_XACT_ERR:
677                 urb->error_count++;
678                 frame_desc->status = -EPROTO;
679                 frame_desc->actual_length =
680                         get_actual_xfer_length(hc, hc_regs, qtd,
681                                                halt_status, NULL);
682         default:
683                 DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__,
684                           halt_status);
685                 BUG();
686                 break;
687         }
688
689         if (++qtd->isoc_frame_index == urb->number_of_packets) {
690                 /*
691                  * urb->status is not used for isoc transfers.
692                  * The individual frame_desc statuses are used instead.
693                  */
694                 dwc_otg_hcd_complete_urb(hcd, urb, 0);
695                 ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
696         } else {
697                 ret_val = DWC_OTG_HC_XFER_COMPLETE;
698         }
699
700         return ret_val;
701 }
702
703 /**
704  * Releases a host channel for use by other transfers. Attempts to select and
705  * queue more transactions since at least one host channel is available.
706  *
707  * @param hcd The HCD state structure.
708  * @param hc The host channel to release.
709  * @param qtd The QTD associated with the host channel. This QTD may be freed
710  * if the transfer is complete or an error has occurred.
711  * @param halt_status Reason the channel is being released. This status
712  * determines the actions taken by this function.
713  */
714 static void release_channel(dwc_otg_hcd_t *hcd,
715                             dwc_hc_t *hc,
716                             dwc_otg_qtd_t *qtd,
717                             dwc_otg_halt_status_e halt_status)
718 {
719         dwc_otg_transaction_type_e tr_type;
720         int free_qtd;
721
722         DWC_DEBUGPL(DBG_HCDV, "  %s: channel %d, halt_status %d\n",
723                     __func__, hc->hc_num, halt_status);
724
725         switch (halt_status) {
726         case DWC_OTG_HC_XFER_URB_COMPLETE:
727                 free_qtd = 1;
728                 break;
729         case DWC_OTG_HC_XFER_AHB_ERR:
730         case DWC_OTG_HC_XFER_STALL:
731         case DWC_OTG_HC_XFER_BABBLE_ERR:
732                 free_qtd = 1;
733                 break;
734         case DWC_OTG_HC_XFER_XACT_ERR:
735                 if (qtd->error_count >= 3) {
736                         DWC_DEBUGPL(DBG_HCDV, "  Complete URB with transaction error\n");
737                         free_qtd = 1;
738                         qtd->urb->status = -EPROTO;
739                         dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPROTO);
740                 } else {
741                         free_qtd = 0;
742                 }
743                 break;
744         case DWC_OTG_HC_XFER_URB_DEQUEUE:
745                 /*
746                  * The QTD has already been removed and the QH has been
747                  * deactivated. Don't want to do anything except release the
748                  * host channel and try to queue more transfers.
749                  */
750                 goto cleanup;
751         case DWC_OTG_HC_XFER_NO_HALT_STATUS:
752                 DWC_ERROR("%s: No halt_status, channel %d\n", __func__, hc->hc_num);
753                 free_qtd = 0;
754                 break;
755         default:
756                 free_qtd = 0;
757                 break;
758         }
759
760         deactivate_qh(hcd, hc->qh, free_qtd);
761
762  cleanup:
763         /*
764          * Release the host channel for use by other transfers. The cleanup
765          * function clears the channel interrupt enables and conditions, so
766          * there's no need to clear the Channel Halted interrupt separately.
767          */
768         dwc_otg_hc_cleanup(hcd->core_if, hc);
769         list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list);
770
771         if (!hc->halt_on_queue && !hc->halt_pending && hc->qh->nak_frame != 0xffff)
772                 hcd->nakking_channels--;
773
774         switch (hc->ep_type) {
775         case DWC_OTG_EP_TYPE_CONTROL:
776         case DWC_OTG_EP_TYPE_BULK:
777                 hcd->non_periodic_channels--;
778
779                 /* This condition has once been observed, but the cause
780                  * was never determined. Check for it here, to collect
781                  * debug data if it occurs again. */
782                 WARN_ON_ONCE(hcd->non_periodic_channels < 0);
783                 break;
784
785         default:
786                 /*
787                  * Don't release reservations for periodic channels here.
788                  * That's done when a periodic transfer is descheduled (i.e.
789                  * when the QH is removed from the periodic schedule).
790                  */
791                 break;
792         }
793
794         if (halt_status != DWC_OTG_HC_XFER_NAK)
795                 hc->qh->nak_frame = 0xffff;
796
797         /* Try to queue more transfers now that there's a free channel. */
798         tr_type = dwc_otg_hcd_select_transactions(hcd);
799         if (tr_type != DWC_OTG_TRANSACTION_NONE) {
800                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
801         }
802 }
803
804 /**
805  * Halts a host channel. If the channel cannot be halted immediately because
806  * the request queue is full, this function ensures that the FIFO empty
807  * interrupt for the appropriate queue is enabled so that the halt request can
808  * be queued when there is space in the request queue.
809  *
810  * This function may also be called in DMA mode. In that case, the channel is
811  * simply released since the core always halts the channel automatically in
812  * DMA mode.
813  */
814 static void halt_channel(dwc_otg_hcd_t *hcd,
815                          dwc_hc_t *hc,
816                          dwc_otg_qtd_t *qtd,
817                          dwc_otg_halt_status_e halt_status)
818 {
819         if (hcd->core_if->dma_enable) {
820                 release_channel(hcd, hc, qtd, halt_status);
821                 return;
822         }
823
824         /* Slave mode processing... */
825         dwc_otg_hc_halt(hcd, hc, halt_status);
826
827         if (hc->halt_on_queue) {
828                 gintmsk_data_t gintmsk = {.d32 = 0};
829                 dwc_otg_core_global_regs_t *global_regs;
830                 global_regs = hcd->core_if->core_global_regs;
831
832                 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
833                     hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
834                         /*
835                          * Make sure the Non-periodic Tx FIFO empty interrupt
836                          * is enabled so that the non-periodic schedule will
837                          * be processed.
838                          */
839                         gintmsk.b.nptxfempty = 1;
840                         dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
841                 } else {
842                         /*
843                          * Move the QH from the periodic queued schedule to
844                          * the periodic assigned schedule. This allows the
845                          * halt to be queued when the periodic schedule is
846                          * processed.
847                          */
848                         list_move(&hc->qh->qh_list_entry,
849                                   &hcd->periodic_sched_assigned);
850
851                         /*
852                          * Make sure the Periodic Tx FIFO Empty interrupt is
853                          * enabled so that the periodic schedule will be
854                          * processed.
855                          */
856                         gintmsk.b.ptxfempty = 1;
857                         dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
858                 }
859         }
860 }
861
862 /**
863  * Performs common cleanup for non-periodic transfers after a Transfer
864  * Complete interrupt. This function should be called after any endpoint type
865  * specific handling is finished to release the host channel.
866  */
867 static void complete_non_periodic_xfer(dwc_otg_hcd_t *hcd,
868                                        dwc_hc_t *hc,
869                                        dwc_otg_hc_regs_t *hc_regs,
870                                        dwc_otg_qtd_t *qtd,
871                                        dwc_otg_halt_status_e halt_status)
872 {
873         hcint_data_t hcint;
874
875         qtd->error_count = 0;
876
877         hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
878         if (hcint.b.nyet) {
879                 /*
880                  * Got a NYET on the last transaction of the transfer. This
881                  * means that the endpoint should be in the PING state at the
882                  * beginning of the next transfer.
883                  */
884                 hc->qh->ping_state = 1;
885                 clear_hc_int(hc_regs, nyet);
886         }
887
888         /*
889          * Always halt and release the host channel to make it available for
890          * more transfers. There may still be more phases for a control
891          * transfer or more data packets for a bulk transfer at this point,
892          * but the host channel is still halted. A channel will be reassigned
893          * to the transfer when the non-periodic schedule is processed after
894          * the channel is released. This allows transactions to be queued
895          * properly via dwc_otg_hcd_queue_transactions, which also enables the
896          * Tx FIFO Empty interrupt if necessary.
897          */
898         if (hc->ep_is_in) {
899                 /*
900                  * IN transfers in Slave mode require an explicit disable to
901                  * halt the channel. (In DMA mode, this call simply releases
902                  * the channel.)
903                  */
904                 halt_channel(hcd, hc, qtd, halt_status);
905         } else {
906                 /*
907                  * The channel is automatically disabled by the core for OUT
908                  * transfers in Slave mode.
909                  */
910                 release_channel(hcd, hc, qtd, halt_status);
911         }
912 }
913
914 /**
915  * Performs common cleanup for periodic transfers after a Transfer Complete
916  * interrupt. This function should be called after any endpoint type specific
917  * handling is finished to release the host channel.
918  */
919 static void complete_periodic_xfer(dwc_otg_hcd_t *hcd,
920                                    dwc_hc_t *hc,
921                                    dwc_otg_hc_regs_t *hc_regs,
922                                    dwc_otg_qtd_t *qtd,
923                                    dwc_otg_halt_status_e halt_status)
924 {
925         hctsiz_data_t hctsiz;
926         qtd->error_count = 0;
927
928         hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
929         if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) {
930                 /* Core halts channel in these cases. */
931                 release_channel(hcd, hc, qtd, halt_status);
932         } else {
933                 /* Flush any outstanding requests from the Tx queue. */
934                 halt_channel(hcd, hc, qtd, halt_status);
935         }
936 }
937
938 /**
939  * Handles a host channel Transfer Complete interrupt. This handler may be
940  * called in either DMA mode or Slave mode.
941  */
942 static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *hcd,
943                                        dwc_hc_t *hc,
944                                        dwc_otg_hc_regs_t *hc_regs,
945                                        dwc_otg_qtd_t *qtd)
946 {
947         int                     urb_xfer_done;
948         dwc_otg_halt_status_e   halt_status = DWC_OTG_HC_XFER_COMPLETE;
949         struct urb              *urb = qtd->urb;
950         int                     pipe_type = usb_pipetype(urb->pipe);
951
952         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
953                     "Transfer Complete--\n", hc->hc_num);
954
955         /*
956          * Handle xfer complete on CSPLIT.
957          */
958         if (hc->qh->do_split) {
959                 qtd->complete_split = 0;
960         }
961
962         /* Update the QTD and URB states. */
963         switch (pipe_type) {
964         case PIPE_CONTROL:
965                 switch (qtd->control_phase) {
966                 case DWC_OTG_CONTROL_SETUP:
967                         if (urb->transfer_buffer_length > 0) {
968                                 qtd->control_phase = DWC_OTG_CONTROL_DATA;
969                         } else {
970                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
971                         }
972                         DWC_DEBUGPL(DBG_HCDV, "  Control setup transaction done\n");
973                         halt_status = DWC_OTG_HC_XFER_COMPLETE;
974                         break;
975                 case DWC_OTG_CONTROL_DATA: {
976                         urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
977                         if (urb_xfer_done) {
978                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
979                                 DWC_DEBUGPL(DBG_HCDV, "  Control data transfer done\n");
980                         } else {
981                                 save_data_toggle(hc, hc_regs, qtd);
982                         }
983                         halt_status = DWC_OTG_HC_XFER_COMPLETE;
984                         break;
985                 }
986                 case DWC_OTG_CONTROL_STATUS:
987                         DWC_DEBUGPL(DBG_HCDV, "  Control transfer complete\n");
988                         if (urb->status == -EINPROGRESS) {
989                                 urb->status = 0;
990                         }
991                         dwc_otg_hcd_complete_urb(hcd, urb, urb->status);
992                         halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
993                         break;
994                 }
995
996                 complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
997                 break;
998         case PIPE_BULK:
999                 DWC_DEBUGPL(DBG_HCDV, "  Bulk transfer complete\n");
1000                 urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
1001                 if (urb_xfer_done) {
1002                         dwc_otg_hcd_complete_urb(hcd, urb, urb->status);
1003                         halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1004                 } else {
1005                         halt_status = DWC_OTG_HC_XFER_COMPLETE;
1006                 }
1007
1008                 save_data_toggle(hc, hc_regs, qtd);
1009                 complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1010                 break;
1011         case PIPE_INTERRUPT:
1012                 DWC_DEBUGPL(DBG_HCDV, "  Interrupt transfer complete\n");
1013                 update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
1014
1015                 /*
1016                  * Interrupt URB is done on the first transfer complete
1017                  * interrupt.
1018                  */
1019                 dwc_otg_hcd_complete_urb(hcd, urb, urb->status);
1020                 save_data_toggle(hc, hc_regs, qtd);
1021                 complete_periodic_xfer(hcd, hc, hc_regs, qtd,
1022                                        DWC_OTG_HC_XFER_URB_COMPLETE);
1023                 break;
1024         case PIPE_ISOCHRONOUS:
1025                 DWC_DEBUGPL(DBG_HCDV,  "  Isochronous transfer complete\n");
1026                 if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) {
1027                         halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1028                                                             DWC_OTG_HC_XFER_COMPLETE);
1029                 }
1030                 complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1031                 break;
1032         }
1033
1034         disable_hc_int(hc_regs, xfercompl);
1035
1036         return 1;
1037 }
1038
1039 /**
1040  * Handles a host channel STALL interrupt. This handler may be called in
1041  * either DMA mode or Slave mode.
1042  */
1043 static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *hcd,
1044                                     dwc_hc_t *hc,
1045                                     dwc_otg_hc_regs_t *hc_regs,
1046                                     dwc_otg_qtd_t *qtd)
1047 {
1048         struct urb *urb = qtd->urb;
1049         int pipe_type = usb_pipetype(urb->pipe);
1050
1051         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1052                     "STALL Received--\n", hc->hc_num);
1053
1054         if (pipe_type == PIPE_CONTROL) {
1055                 dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE);
1056         }
1057
1058         if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) {
1059                 dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE);
1060                 /*
1061                  * USB protocol requires resetting the data toggle for bulk
1062                  * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1063                  * setup command is issued to the endpoint. Anticipate the
1064                  * CLEAR_FEATURE command since a STALL has occurred and reset
1065                  * the data toggle now.
1066                  */
1067                 hc->qh->data_toggle = 0;
1068         }
1069
1070         halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL);
1071
1072         disable_hc_int(hc_regs, stall);
1073
1074         return 1;
1075 }
1076
1077 /*
1078  * Updates the state of the URB when a transfer has been stopped due to an
1079  * abnormal condition before the transfer completes. Modifies the
1080  * actual_length field of the URB to reflect the number of bytes that have
1081  * actually been transferred via the host channel.
1082  */
1083 static void update_urb_state_xfer_intr(dwc_hc_t *hc,
1084                                        dwc_otg_hc_regs_t *hc_regs,
1085                                        struct urb *urb,
1086                                        dwc_otg_qtd_t *qtd,
1087                                        dwc_otg_halt_status_e halt_status)
1088 {
1089         uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd,
1090                                                             halt_status, NULL);
1091         urb->actual_length += bytes_transferred;
1092
1093 #ifdef DEBUG
1094         {
1095                 hctsiz_data_t   hctsiz;
1096                 hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
1097                 DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
1098                             __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num);
1099                 DWC_DEBUGPL(DBG_HCDV, "  hc->start_pkt_count %d\n", hc->start_pkt_count);
1100                 DWC_DEBUGPL(DBG_HCDV, "  hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
1101                 DWC_DEBUGPL(DBG_HCDV, "  hc->max_packet %d\n", hc->max_packet);
1102                 DWC_DEBUGPL(DBG_HCDV, "  bytes_transferred %d\n", bytes_transferred);
1103                 DWC_DEBUGPL(DBG_HCDV, "  urb %p\n", urb);
1104                 DWC_DEBUGPL(DBG_HCDV, "  urb->actual_length %d\n", urb->actual_length);
1105                 DWC_DEBUGPL(DBG_HCDV, "  urb->transfer_buffer_length %d\n",
1106                             urb->transfer_buffer_length);
1107         }
1108 #endif
1109 }
1110
1111 /**
1112  * Handles a host channel NAK interrupt. This handler may be called in either
1113  * DMA mode or Slave mode.
1114  */
1115 static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd,
1116                                   dwc_hc_t *hc,
1117                                   dwc_otg_hc_regs_t *hc_regs,
1118                                   dwc_otg_qtd_t *qtd)
1119 {
1120         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1121                     "NAK Received--\n", hc->hc_num);
1122         /*
1123         * When we get bulk NAKs then remember this so we holdoff on this qh until
1124         * the beginning of the next frame
1125         */
1126         switch (usb_pipetype(qtd->urb->pipe)) {
1127                 case PIPE_BULK:
1128                         /* xfer_started can be 0 when a halted interrupt
1129                          * occurs with the nak flag set, then first the
1130                          * halted handler runs and then this nak
1131                          * handler. In this case, also don't update
1132                          * nak_frame, since the qh might already be
1133                          * assigned to another host channel. */
1134                         if (!hc->halt_on_queue && !hc->halt_pending && hc->xfer_started && hc->qh->nak_frame == 0xffff)
1135                                 hcd->nakking_channels++;
1136                         if (hc->xfer_started)
1137                                 hc->qh->nak_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
1138         }
1139
1140         /*
1141          * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1142          * interrupt.  Re-start the SSPLIT transfer.
1143          */
1144         if (hc->do_split) {
1145                 if (hc->complete_split) {
1146                         qtd->error_count = 0;
1147                 }
1148                 qtd->complete_split = 0;
1149                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1150                 goto handle_nak_done;
1151         }
1152
1153         switch (usb_pipetype(qtd->urb->pipe)) {
1154         case PIPE_CONTROL:
1155         case PIPE_BULK:
1156                 if (hcd->core_if->dma_enable && hc->ep_is_in) {
1157                         /*
1158                          * NAK interrupts are enabled on bulk/control IN
1159                          * transfers in DMA mode for the sole purpose of
1160                          * resetting the error count after a transaction error
1161                          * occurs. The core will continue transferring data.
1162                          */
1163                         qtd->error_count = 0;
1164                         goto handle_nak_done;
1165                 }
1166
1167                 /*
1168                  * NAK interrupts normally occur during OUT transfers in DMA
1169                  * or Slave mode. For IN transfers, more requests will be
1170                  * queued as request queue space is available.
1171                  */
1172                 qtd->error_count = 0;
1173
1174                 if (!hc->qh->ping_state) {
1175                         update_urb_state_xfer_intr(hc, hc_regs, qtd->urb,
1176                                                    qtd, DWC_OTG_HC_XFER_NAK);
1177                         save_data_toggle(hc, hc_regs, qtd);
1178                         if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
1179                                 hc->qh->ping_state = 1;
1180                         }
1181                 }
1182
1183                 /*
1184                  * Halt the channel so the transfer can be re-started from
1185                  * the appropriate point or the PING protocol will
1186                  * start/continue.
1187                  */
1188                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1189                 break;
1190         case PIPE_INTERRUPT:
1191                 qtd->error_count = 0;
1192                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1193                 break;
1194         case PIPE_ISOCHRONOUS:
1195                 /* Should never get called for isochronous transfers. */
1196                 BUG();
1197                 break;
1198         }
1199
1200  handle_nak_done:
1201         disable_hc_int(hc_regs, nak);
1202
1203         return 1;
1204 }
1205
1206 /**
1207  * Handles a host channel ACK interrupt. This interrupt is enabled when
1208  * performing the PING protocol in Slave mode, when errors occur during
1209  * either Slave mode or DMA mode, and during Start Split transactions.
1210  */
1211 static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *hcd,
1212                                   dwc_hc_t *hc,
1213                                   dwc_otg_hc_regs_t *hc_regs,
1214                                   dwc_otg_qtd_t *qtd)
1215 {
1216         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1217                     "ACK Received--\n", hc->hc_num);
1218
1219         if (hc->do_split) {
1220                 /*
1221                  * Handle ACK on SSPLIT.
1222                  * ACK should not occur in CSPLIT.
1223                  */
1224                 if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) {
1225                         qtd->ssplit_out_xfer_count = hc->xfer_len;
1226                 }
1227                 if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) {
1228                         /* Don't need complete for isochronous out transfers. */
1229                         qtd->complete_split = 1;
1230                 }
1231
1232                 /* ISOC OUT */
1233                 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
1234                         switch (hc->xact_pos) {
1235                         case DWC_HCSPLIT_XACTPOS_ALL:
1236                                 break;
1237                         case DWC_HCSPLIT_XACTPOS_END:
1238                                 qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
1239                                 qtd->isoc_split_offset = 0;
1240                                 break;
1241                         case DWC_HCSPLIT_XACTPOS_BEGIN:
1242                         case DWC_HCSPLIT_XACTPOS_MID:
1243                                 /*
1244                                  * For BEGIN or MID, calculate the length for
1245                                  * the next microframe to determine the correct
1246                                  * SSPLIT token, either MID or END.
1247                                  */
1248                                 {
1249                                         struct usb_iso_packet_descriptor *frame_desc;
1250
1251                                         frame_desc = &qtd->urb->iso_frame_desc[qtd->isoc_frame_index];
1252                                         qtd->isoc_split_offset += 188;
1253
1254                                         if ((frame_desc->length - qtd->isoc_split_offset) <= 188) {
1255                                                 qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END;
1256                                         } else {
1257                                                 qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID;
1258                                         }
1259
1260                                 }
1261                                 break;
1262                         }
1263                 } else {
1264                         halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1265                 }
1266         } else {
1267                 qtd->error_count = 0;
1268
1269                 if (hc->qh->ping_state) {
1270                         hc->qh->ping_state = 0;
1271                         /*
1272                          * Halt the channel so the transfer can be re-started
1273                          * from the appropriate point. This only happens in
1274                          * Slave mode. In DMA mode, the ping_state is cleared
1275                          * when the transfer is started because the core
1276                          * automatically executes the PING, then the transfer.
1277                          */
1278                         halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1279                 }
1280         }
1281
1282         /*
1283          * If the ACK occurred when _not_ in the PING state, let the channel
1284          * continue transferring data after clearing the error count.
1285          */
1286
1287         disable_hc_int(hc_regs, ack);
1288
1289         return 1;
1290 }
1291
1292 /**
1293  * Handles a host channel NYET interrupt. This interrupt should only occur on
1294  * Bulk and Control OUT endpoints and for complete split transactions. If a
1295  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1296  * handled in the xfercomp interrupt handler, not here. This handler may be
1297  * called in either DMA mode or Slave mode.
1298  */
1299 static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *hcd,
1300                                    dwc_hc_t *hc,
1301                                    dwc_otg_hc_regs_t *hc_regs,
1302                                    dwc_otg_qtd_t *qtd)
1303 {
1304         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1305                     "NYET Received--\n", hc->hc_num);
1306
1307         /*
1308          * NYET on CSPLIT
1309          * re-do the CSPLIT immediately on non-periodic
1310          */
1311         if (hc->do_split && hc->complete_split) {
1312                 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1313                     hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1314                         int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
1315
1316                         if (dwc_full_frame_num(frnum) !=
1317                             dwc_full_frame_num(hc->qh->sched_frame)) {
1318                                 /*
1319                                  * No longer in the same full speed frame.
1320                                  * Treat this as a transaction error.
1321                                  */
1322 #if 0
1323                                 /** @todo Fix system performance so this can
1324                                  * be treated as an error. Right now complete
1325                                  * splits cannot be scheduled precisely enough
1326                                  * due to other system activity, so this error
1327                                  * occurs regularly in Slave mode.
1328                                  */
1329                                 qtd->error_count++;
1330 #endif
1331                                 qtd->complete_split = 0;
1332                                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1333                                 /** @todo add support for isoc release */
1334                                 goto handle_nyet_done;
1335                         }
1336                 }
1337
1338                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1339                 goto handle_nyet_done;
1340         }
1341
1342         hc->qh->ping_state = 1;
1343         qtd->error_count = 0;
1344
1345         update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd,
1346                                    DWC_OTG_HC_XFER_NYET);
1347         save_data_toggle(hc, hc_regs, qtd);
1348
1349         /*
1350          * Halt the channel and re-start the transfer so the PING
1351          * protocol will start.
1352          */
1353         halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1354
1355 handle_nyet_done:
1356         disable_hc_int(hc_regs, nyet);
1357         return 1;
1358 }
1359
1360 /**
1361  * Handles a host channel babble interrupt. This handler may be called in
1362  * either DMA mode or Slave mode.
1363  */
1364 static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *hcd,
1365                                      dwc_hc_t *hc,
1366                                      dwc_otg_hc_regs_t *hc_regs,
1367                                      dwc_otg_qtd_t *qtd)
1368 {
1369         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1370                     "Babble Error--\n", hc->hc_num);
1371         if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1372                 dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EOVERFLOW);
1373                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR);
1374         } else {
1375                 dwc_otg_halt_status_e halt_status;
1376                 halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1377                                                     DWC_OTG_HC_XFER_BABBLE_ERR);
1378                 halt_channel(hcd, hc, qtd, halt_status);
1379         }
1380         disable_hc_int(hc_regs, bblerr);
1381         return 1;
1382 }
1383
1384 /**
1385  * Handles a host channel AHB error interrupt. This handler is only called in
1386  * DMA mode.
1387  */
1388 static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd,
1389                                      dwc_hc_t *hc,
1390                                      dwc_otg_hc_regs_t *hc_regs,
1391                                      dwc_otg_qtd_t *qtd)
1392 {
1393         hcchar_data_t   hcchar;
1394         hcsplt_data_t   hcsplt;
1395         hctsiz_data_t   hctsiz;
1396         uint32_t        hcdma;
1397         struct urb      *urb = qtd->urb;
1398
1399         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1400                     "AHB Error--\n", hc->hc_num);
1401
1402         hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1403         hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
1404         hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
1405         hcdma = dwc_read_reg32(&hc_regs->hcdma);
1406
1407         DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num);
1408         DWC_ERROR("  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
1409         DWC_ERROR("  hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
1410                   DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
1411         DWC_ERROR("  Device address: %d\n", usb_pipedevice(urb->pipe));
1412         DWC_ERROR("  Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
1413                   (usb_pipein(urb->pipe) ? "IN" : "OUT"));
1414         DWC_ERROR("  Endpoint type: %s\n",
1415                   ({char *pipetype;
1416                     switch (usb_pipetype(urb->pipe)) {
1417                     case PIPE_CONTROL: pipetype = "CONTROL"; break;
1418                     case PIPE_BULK: pipetype = "BULK"; break;
1419                     case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
1420                     case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
1421                     default: pipetype = "UNKNOWN"; break;
1422                    }; pipetype;}));
1423         DWC_ERROR("  Speed: %s\n",
1424                   ({char *speed;
1425                     switch (urb->dev->speed) {
1426                     case USB_SPEED_HIGH: speed = "HIGH"; break;
1427                     case USB_SPEED_FULL: speed = "FULL"; break;
1428                     case USB_SPEED_LOW: speed = "LOW"; break;
1429                     default: speed = "UNKNOWN"; break;
1430                    }; speed;}));
1431         DWC_ERROR("  Max packet size: %d\n",
1432                   usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
1433         DWC_ERROR("  Data buffer length: %d\n", urb->transfer_buffer_length);
1434         DWC_ERROR("  Transfer buffer: %p, Transfer DMA: %p\n",
1435                   urb->transfer_buffer, (void *)urb->transfer_dma);
1436         DWC_ERROR("  Setup buffer: %p, Setup DMA: %p\n",
1437                   urb->setup_packet, (void *)urb->setup_dma);
1438         DWC_ERROR("  Interval: %d\n", urb->interval);
1439
1440         dwc_otg_hcd_complete_urb(hcd, urb, -EIO);
1441
1442         /*
1443          * Force a channel halt. Don't call halt_channel because that won't
1444          * write to the HCCHARn register in DMA mode to force the halt.
1445          */
1446         dwc_otg_hc_halt(hcd, hc, DWC_OTG_HC_XFER_AHB_ERR);
1447
1448         disable_hc_int(hc_regs, ahberr);
1449         return 1;
1450 }
1451
1452 /**
1453  * Handles a host channel transaction error interrupt. This handler may be
1454  * called in either DMA mode or Slave mode.
1455  */
1456 static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *hcd,
1457                                       dwc_hc_t *hc,
1458                                       dwc_otg_hc_regs_t *hc_regs,
1459                                       dwc_otg_qtd_t *qtd)
1460 {
1461         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1462                     "Transaction Error--\n", hc->hc_num);
1463
1464         switch (usb_pipetype(qtd->urb->pipe)) {
1465         case PIPE_CONTROL:
1466         case PIPE_BULK:
1467                 qtd->error_count++;
1468                 if (!hc->qh->ping_state) {
1469                         update_urb_state_xfer_intr(hc, hc_regs, qtd->urb,
1470                                                    qtd, DWC_OTG_HC_XFER_XACT_ERR);
1471                         save_data_toggle(hc, hc_regs, qtd);
1472                         if (!hc->ep_is_in && qtd->urb->dev->speed == USB_SPEED_HIGH) {
1473                                 hc->qh->ping_state = 1;
1474                         }
1475                 }
1476
1477                 /*
1478                  * Halt the channel so the transfer can be re-started from
1479                  * the appropriate point or the PING protocol will start.
1480                  */
1481                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1482                 break;
1483         case PIPE_INTERRUPT:
1484                 qtd->error_count++;
1485                 if (hc->do_split && hc->complete_split) {
1486                         qtd->complete_split = 0;
1487                 }
1488                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1489                 break;
1490         case PIPE_ISOCHRONOUS:
1491                 {
1492                         dwc_otg_halt_status_e halt_status;
1493                         halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1494                                                             DWC_OTG_HC_XFER_XACT_ERR);
1495
1496                         halt_channel(hcd, hc, qtd, halt_status);
1497                 }
1498                 break;
1499         }
1500
1501         disable_hc_int(hc_regs, xacterr);
1502
1503         return 1;
1504 }
1505
1506 /**
1507  * Handles a host channel frame overrun interrupt. This handler may be called
1508  * in either DMA mode or Slave mode.
1509  */
1510 static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *hcd,
1511                                        dwc_hc_t *hc,
1512                                        dwc_otg_hc_regs_t *hc_regs,
1513                                        dwc_otg_qtd_t *qtd)
1514 {
1515         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1516                     "Frame Overrun--\n", hc->hc_num);
1517
1518         switch (usb_pipetype(qtd->urb->pipe)) {
1519         case PIPE_CONTROL:
1520         case PIPE_BULK:
1521                 break;
1522         case PIPE_INTERRUPT:
1523                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN);
1524                 break;
1525         case PIPE_ISOCHRONOUS:
1526                 {
1527                         dwc_otg_halt_status_e halt_status;
1528                         halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1529                                                             DWC_OTG_HC_XFER_FRAME_OVERRUN);
1530
1531                         halt_channel(hcd, hc, qtd, halt_status);
1532                 }
1533                 break;
1534         }
1535
1536         disable_hc_int(hc_regs, frmovrun);
1537
1538         return 1;
1539 }
1540
1541 /**
1542  * Handles a host channel data toggle error interrupt. This handler may be
1543  * called in either DMA mode or Slave mode.
1544  */
1545 static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd,
1546                                          dwc_hc_t *hc,
1547                                          dwc_otg_hc_regs_t *hc_regs,
1548                                          dwc_otg_qtd_t *qtd)
1549 {
1550         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1551                 "Data Toggle Error on %s transfer--\n",
1552                 hc->hc_num, (hc->ep_is_in ? "IN" : "OUT"));
1553
1554         /* Data toggles on split transactions cause the hc to halt.
1555          * restart transfer */
1556         if (hc->qh->do_split) {
1557                 qtd->error_count++;
1558                 save_data_toggle(hc, hc_regs, qtd);
1559                 update_urb_state_xfer_intr(hc, hc_regs,
1560                         qtd->urb, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1561                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1562         } else if (hc->ep_is_in) {
1563                 /* An unmasked data toggle error on a non-split DMA transaction 
1564 is
1565                  * for the sole purpose of resetting error counts. Disable other
1566                  * interrupts unmasked for the same reason.
1567                  */
1568                 if (hcd->core_if->dma_enable) {
1569                         disable_hc_int(hc_regs, ack);
1570                         disable_hc_int(hc_regs, nak);
1571                 }
1572                 qtd->error_count = 0;
1573         }
1574
1575         disable_hc_int(hc_regs, datatglerr);
1576
1577         return 1;
1578 }
1579
1580 #ifdef DEBUG
1581 /**
1582  * This function is for debug only. It checks that a valid halt status is set
1583  * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
1584  * taken and a warning is issued.
1585  * @return 1 if halt status is ok, 0 otherwise.
1586  */
1587 static inline int halt_status_ok(dwc_otg_hcd_t *hcd,
1588                                  dwc_hc_t *hc,
1589                                  dwc_otg_hc_regs_t *hc_regs,
1590                                  dwc_otg_qtd_t *qtd)
1591 {
1592         hcchar_data_t hcchar;
1593         hctsiz_data_t hctsiz;
1594         hcint_data_t hcint;
1595         hcintmsk_data_t hcintmsk;
1596         hcsplt_data_t hcsplt;
1597
1598         if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
1599                 /*
1600                  * This code is here only as a check. This condition should
1601                  * never happen. Ignore the halt if it does occur.
1602                  */
1603                 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1604                 hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
1605                 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1606                 hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
1607                 hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
1608                 DWC_WARN("%s: hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
1609                          "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
1610                          "hcint 0x%08x, hcintmsk 0x%08x, "
1611                          "hcsplt 0x%08x, qtd->complete_split %d\n",
1612                          __func__, hc->hc_num, hcchar.d32, hctsiz.d32,
1613                          hcint.d32, hcintmsk.d32,
1614                          hcsplt.d32, qtd->complete_split);
1615
1616                 DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
1617                          __func__, hc->hc_num);
1618                 DWC_WARN("\n");
1619                 clear_hc_int(hc_regs, chhltd);
1620                 return 0;
1621         }
1622
1623         /*
1624          * This code is here only as a check. hcchar.chdis should
1625          * never be set when the halt interrupt occurs. Halt the
1626          * channel again if it does occur.
1627          */
1628         hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1629         if (hcchar.b.chdis) {
1630                 DWC_WARN("%s: hcchar.chdis set unexpectedly, "
1631                          "hcchar 0x%08x, trying to halt again\n",
1632                          __func__, hcchar.d32);
1633                 clear_hc_int(hc_regs, chhltd);
1634                 if (hc->halt_pending && !hc->halt_on_queue && hc->qh->nak_frame != 0xffff)
1635                         hcd->nakking_channels++;
1636                 hc->halt_pending = 0;
1637                 halt_channel(hcd, hc, qtd, hc->halt_status);
1638                 return 0;
1639         }
1640
1641         return 1;
1642 }
1643 #endif
1644
1645 /**
1646  * Handles a host Channel Halted interrupt in DMA mode. This handler
1647  * determines the reason the channel halted and proceeds accordingly.
1648  */
1649 static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd,
1650                                       dwc_hc_t *hc,
1651                                       dwc_otg_hc_regs_t *hc_regs,
1652                                       dwc_otg_qtd_t *qtd)
1653 {
1654         hcint_data_t hcint;
1655         hcintmsk_data_t hcintmsk;
1656         int out_nak_enh = 0;
1657
1658         /* For core with OUT NAK enhancement, the flow for high-
1659          * speed CONTROL/BULK OUT is handled a little differently.
1660          */
1661         if (hcd->core_if->snpsid >= 0x4F54271A) {
1662                 if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in &&
1663                     (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
1664                      hc->ep_type == DWC_OTG_EP_TYPE_BULK)) {
1665                         DWC_DEBUGPL(DBG_HCD_FLOOD, "OUT NAK enhancement enabled\n");
1666                         out_nak_enh = 1;
1667                 } else {
1668                         DWC_DEBUGPL(DBG_HCD_FLOOD, "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n");
1669                 }
1670         } else {
1671                 DWC_DEBUGPL(DBG_HCD_FLOOD, "OUT NAK enhancement disabled, no core support\n");
1672         }
1673
1674         if (hc->halt_status == DWC_OTG_HC_XFER_NAK) {
1675                 /* The channel was nakking and halted to free up the
1676                  * channel for another transfer. If this channel has
1677                  * already received data, we need to skip that amount on
1678                  * the next try.
1679                  */
1680                 update_urb_state_xfer_intr(hc, hc_regs, qtd->urb,
1681                                                    qtd, DWC_OTG_HC_XFER_NAK);
1682
1683                 save_data_toggle(hc, hc_regs, qtd);
1684
1685                 /* It turns out that sometimes a channel is halted just
1686                  * as it receives its last packet. This causes the
1687                  * to trigger a channel halted interrupt without a
1688                  * transfer complete flag, even though the transfer is
1689                  * actually complete. If we don't handle that here, the
1690                  * qtd will be resubmitted and since bulk in can't have
1691                  * empty packets, this will cause one full packet of
1692                  * "extra" data to be transfered. So we check here to
1693                  * see if the transfer is complete and handle that
1694                  * accordingly.
1695                  */
1696                 if (usb_pipebulk(qtd->urb->pipe) &&
1697                     usb_pipein(qtd->urb->pipe) &&
1698                     qtd->urb->actual_length == qtd->urb->transfer_buffer_length) {
1699                         dwc_otg_hcd_complete_urb(hcd, qtd->urb, 0);
1700                         complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
1701                 } else {
1702                         release_channel(hcd, hc, qtd, hc->halt_status);
1703                 }
1704                 return;
1705         }
1706
1707         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
1708             hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
1709                 /*
1710                  * Just release the channel. A dequeue can happen on a
1711                  * transfer timeout. In the case of an AHB Error, the channel
1712                  * was forced to halt because there's no way to gracefully
1713                  * recover.
1714                  */
1715                 release_channel(hcd, hc, qtd, hc->halt_status);
1716                 return;
1717         }
1718
1719         /* Read the HCINTn register to determine the cause for the halt. */
1720         hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1721         hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
1722
1723         if (hcint.b.xfercomp) {
1724                 /** @todo This is here because of a possible hardware bug.  Spec
1725                  * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1726                  * interrupt w/ACK bit set should occur, but I only see the
1727                  * XFERCOMP bit, even with it masked out.  This is a workaround
1728                  * for that behavior.  Should fix this when hardware is fixed.
1729                  */
1730                 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
1731                         handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
1732                 }
1733                 handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
1734         } else if (hcint.b.stall) {
1735                 handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
1736         } else if (hcint.b.xacterr) {
1737                 if (out_nak_enh) {
1738                         if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) {
1739                                 printk(KERN_DEBUG "XactErr with NYET/NAK/ACK\n");
1740                                 qtd->error_count = 0;
1741                         } else {
1742                                 printk(KERN_DEBUG "XactErr without NYET/NAK/ACK\n");
1743                         }
1744                 }
1745
1746                 /*
1747                  * Must handle xacterr before nak or ack. Could get a xacterr
1748                  * at the same time as either of these on a BULK/CONTROL OUT
1749                  * that started with a PING. The xacterr takes precedence.
1750                  */
1751                 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
1752         } else if (hcint.b.datatglerr) {
1753                 handle_hc_datatglerr_intr(hcd, hc, hc_regs, qtd);
1754         } else if (!out_nak_enh) {
1755                 if (hcint.b.nyet) {
1756                         /*
1757                          * Must handle nyet before nak or ack. Could get a nyet at the
1758                          * same time as either of those on a BULK/CONTROL OUT that
1759                          * started with a PING. The nyet takes precedence.
1760                          */
1761                         handle_hc_nyet_intr(hcd, hc, hc_regs, qtd);
1762                 } else if (hcint.b.bblerr) {
1763                         handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
1764                 } else if (hcint.b.frmovrun) {
1765                         handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd);
1766                 } else if (hcint.b.nak && !hcintmsk.b.nak) {
1767                         /*
1768                          * If nak is not masked, it's because a non-split IN transfer
1769                          * is in an error state. In that case, the nak is handled by
1770                          * the nak interrupt handler, not here. Handle nak here for
1771                          * BULK/CONTROL OUT transfers, which halt on a NAK to allow
1772                          * rewinding the buffer pointer.
1773                          */
1774                         handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
1775                 } else if (hcint.b.ack && !hcintmsk.b.ack) {
1776                         /*
1777                          * If ack is not masked, it's because a non-split IN transfer
1778                          * is in an error state. In that case, the ack is handled by
1779                          * the ack interrupt handler, not here. Handle ack here for
1780                          * split transfers. Start splits halt on ACK.
1781                          */
1782                         handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
1783                 } else {
1784                         if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1785                             hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1786                                 /*
1787                                  * A periodic transfer halted with no other channel
1788                                  * interrupts set. Assume it was halted by the core
1789                                  * because it could not be completed in its scheduled
1790                                  * (micro)frame.
1791                                  */
1792 #ifdef DEBUG
1793                                 DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n",
1794                                           __func__, hc->hc_num);
1795 #endif
1796                                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE);
1797                         } else {
1798                                 DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
1799                                           "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n",
1800                                           __func__, hc->hc_num, hcint.d32,
1801                                           dwc_read_reg32(&hcd->core_if->core_global_regs->gintsts));
1802                         }
1803                 }
1804         } else {
1805                 printk(KERN_DEBUG "NYET/NAK/ACK/other in non-error case, 0x%08x\n", hcint.d32);
1806         }
1807 }
1808
1809 /**
1810  * Handles a host channel Channel Halted interrupt.
1811  *
1812  * In slave mode, this handler is called only when the driver specifically
1813  * requests a halt. This occurs during handling other host channel interrupts
1814  * (e.g. nak, xacterr, stall, nyet, etc.).
1815  *
1816  * In DMA mode, this is the interrupt that occurs when the core has finished
1817  * processing a transfer on a channel. Other host channel interrupts (except
1818  * ahberr) are disabled in DMA mode.
1819  */
1820 static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *hcd,
1821                                      dwc_hc_t *hc,
1822                                      dwc_otg_hc_regs_t *hc_regs,
1823                                      dwc_otg_qtd_t *qtd)
1824 {
1825         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1826                     "Channel Halted--\n", hc->hc_num);
1827
1828         if (hcd->core_if->dma_enable) {
1829                 handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd);
1830         } else {
1831 #ifdef DEBUG
1832                 if (!halt_status_ok(hcd, hc, hc_regs, qtd)) {
1833                         return 1;
1834                 }
1835 #endif
1836                 release_channel(hcd, hc, qtd, hc->halt_status);
1837         }
1838
1839         return 1;
1840 }
1841
1842 /** Handles interrupt for a specific Host Channel */
1843 int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num)
1844 {
1845         int retval = 0;
1846         hcint_data_t hcint;
1847         hcintmsk_data_t hcintmsk;
1848         dwc_hc_t *hc;
1849         dwc_otg_hc_regs_t *hc_regs;
1850         dwc_otg_qtd_t *qtd;
1851
1852         DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
1853
1854         hc = dwc_otg_hcd->hc_ptr_array[num];
1855
1856         check_nakking(dwc_otg_hcd, __FUNCTION__,  "start");
1857
1858
1859         hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num];
1860         qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
1861
1862         hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1863         hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
1864         DWC_DEBUGPL(DBG_HCDV, "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1865                     hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
1866
1867         hcint.d32 = hcint.d32 & hcintmsk.d32;
1868
1869         if (!dwc_otg_hcd->core_if->dma_enable) {
1870                 if (hcint.b.chhltd && hcint.d32 != 0x2) {
1871                         hcint.b.chhltd = 0;
1872                 }
1873         }
1874
1875         if (hcint.b.xfercomp) {
1876                 retval |= handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1877                 /*
1878                  * If NYET occurred at same time as Xfer Complete, the NYET is
1879                  * handled by the Xfer Complete interrupt handler. Don't want
1880                  * to call the NYET interrupt handler in this case.
1881                  */
1882                 hcint.b.nyet = 0;
1883         }
1884         if (hcint.b.chhltd) {
1885                 retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1886         }
1887         if (hcint.b.ahberr) {
1888                 retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1889         }
1890         if (hcint.b.stall) {
1891                 retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1892         }
1893         if (hcint.b.nak) {
1894                 retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1895         }
1896         if (hcint.b.ack && !hcint.b.chhltd) {
1897                 retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1898         }
1899         if (hcint.b.nyet) {
1900                 retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1901         }
1902         if (hcint.b.xacterr) {
1903                 retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1904         }
1905         if (hcint.b.bblerr) {
1906                 retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1907         }
1908         if (hcint.b.frmovrun) {
1909                 retval |= handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1910         }
1911         if (hcint.b.datatglerr) {
1912                 retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
1913         }
1914         if (check_nakking(dwc_otg_hcd, __FUNCTION__,  "end")) {
1915                 DWC_WARN("--Host Channel Interrupt--, Channel %d\n", num);
1916                 DWC_WARN("  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1917                     hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
1918         }
1919
1920         return retval;
1921 }
1922
1923 #endif /* DWC_DEVICE_ONLY */