1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
38 * This file contains the implementation of the HCD. In Linux, the HCD
39 * implements the hc_driver API.
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/platform_device.h>
47 #include <linux/errno.h>
48 #include <linux/list.h>
49 #include <linux/interrupt.h>
50 #include <linux/string.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/version.h>
54 #include <mach/irqs.h>
56 #include "otg_driver.h"
60 static const char dwc_otg_hcd_name[] = "dwc_otg_hcd";
62 static const struct hc_driver dwc_otg_hc_driver = {
64 .description = dwc_otg_hcd_name,
65 .product_desc = "DWC OTG Controller",
66 .hcd_priv_size = sizeof(dwc_otg_hcd_t),
67 .irq = dwc_otg_hcd_irq,
68 .flags = HCD_MEMORY | HCD_USB2,
69 .start = dwc_otg_hcd_start,
70 .stop = dwc_otg_hcd_stop,
71 .urb_enqueue = dwc_otg_hcd_urb_enqueue,
72 .urb_dequeue = dwc_otg_hcd_urb_dequeue,
73 .endpoint_disable = dwc_otg_hcd_endpoint_disable,
74 .get_frame_number = dwc_otg_hcd_get_frame_number,
75 .hub_status_data = dwc_otg_hcd_hub_status_data,
76 .hub_control = dwc_otg_hcd_hub_control,
80 * Work queue function for starting the HCD when A-Cable is connected.
81 * The dwc_otg_hcd_start() must be called in a process context.
83 static void hcd_start_func(struct work_struct *_work)
85 struct delayed_work *dw = container_of(_work, struct delayed_work, work);
86 struct dwc_otg_hcd *otg_hcd = container_of(dw, struct dwc_otg_hcd, start_work);
87 struct usb_hcd *usb_hcd = container_of((void *)otg_hcd, struct usb_hcd, hcd_priv);
88 DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd);
90 dwc_otg_hcd_start(usb_hcd);
95 * HCD Callback function for starting the HCD when A-Cable is
98 * @param p void pointer to the <code>struct usb_hcd</code>
100 static int32_t dwc_otg_hcd_start_cb(void *p)
102 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
103 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
106 if (core_if->op_state == B_HOST) {
108 * Reset the port. During a HNP mode switch the reset
109 * needs to occur within 1ms and have a duration of at
112 hprt0.d32 = dwc_otg_read_hprt0(core_if);
114 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
115 ((struct usb_hcd *)p)->self.is_b_host = 1;
117 ((struct usb_hcd *)p)->self.is_b_host = 0;
120 /* Need to start the HCD in a non-interrupt context. */
121 // INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
122 INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
123 // schedule_work(&dwc_otg_hcd->start_work);
124 queue_delayed_work(core_if->wq_otg, &dwc_otg_hcd->start_work, 50 * HZ / 1000);
130 * HCD Callback function for stopping the HCD.
132 * @param p void pointer to the <code>struct usb_hcd</code>
134 static int32_t dwc_otg_hcd_stop_cb(void *p)
136 struct usb_hcd *usb_hcd = (struct usb_hcd *)p;
137 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
138 dwc_otg_hcd_stop(usb_hcd);
142 static void del_xfer_timers(dwc_otg_hcd_t *hcd)
146 int num_channels = hcd->core_if->core_params->host_channels;
147 for (i = 0; i < num_channels; i++) {
148 del_timer(&hcd->core_if->hc_xfer_timer[i]);
153 static void del_timers(dwc_otg_hcd_t *hcd)
155 del_xfer_timers(hcd);
156 del_timer(&hcd->conn_timer);
160 * Processes all the URBs in a single list of QHs. Completes them with
161 * -ETIMEDOUT and frees the QTD.
163 static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list)
165 struct list_head *qh_item;
167 struct list_head *qtd_item;
171 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
172 list_for_each(qh_item, qh_list) {
173 qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry);
174 for (qtd_item = qh->qtd_list.next;
175 qtd_item != &qh->qtd_list;
176 qtd_item = qh->qtd_list.next) {
177 qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry);
178 if (qtd->urb != NULL) {
179 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
180 dwc_otg_hcd_complete_urb(hcd, qtd->urb,
182 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
184 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd);
187 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
191 * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
192 * and periodic schedules. The QTD associated with each URB is removed from
193 * the schedule and freed. This function may be called when a disconnect is
194 * detected or when the HCD is being stopped.
196 static void kill_all_urbs(dwc_otg_hcd_t *hcd)
198 kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive);
199 kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active);
200 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive);
201 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready);
202 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned);
203 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued);
207 * HCD Callback function for disconnect of the HCD.
209 * @param p void pointer to the <code>struct usb_hcd</code>
211 static int32_t dwc_otg_hcd_disconnect_cb(void *p)
214 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
216 //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
219 * Set status flags for the hub driver.
221 dwc_otg_hcd->flags.b.port_connect_status_change = 1;
222 dwc_otg_hcd->flags.b.port_connect_status = 0;
225 * Shutdown any transfers in process by clearing the Tx FIFO Empty
226 * interrupt mask and status bits and disabling subsequent host
227 * channel interrupts.
230 intr.b.nptxfempty = 1;
231 intr.b.ptxfempty = 1;
233 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0);
234 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0);
236 del_timers(dwc_otg_hcd);
239 * Turn off the vbus power only if the core has transitioned to device
240 * mode. If still in host mode, need to keep power on to detect a
243 if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
244 if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) {
245 hprt0_data_t hprt0 = { .d32=0 };
246 DWC_PRINT("Disconnect: PortPower off\n");
248 dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
251 dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
254 /* Respond with an error status to all URBs in the schedule. */
255 kill_all_urbs(dwc_otg_hcd);
257 if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
258 /* Clean up any host channels that were in use. */
262 dwc_otg_hc_regs_t *hc_regs;
263 hcchar_data_t hcchar;
265 num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
267 if (!dwc_otg_hcd->core_if->dma_enable) {
268 /* Flush out any channel requests in slave mode. */
269 for (i = 0; i < num_channels; i++) {
270 channel = dwc_otg_hcd->hc_ptr_array[i];
271 if (list_empty(&channel->hc_list_entry)) {
272 hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
273 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
278 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
284 for (i = 0; i < num_channels; i++) {
285 channel = dwc_otg_hcd->hc_ptr_array[i];
286 if (list_empty(&channel->hc_list_entry)) {
287 hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
288 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
290 /* Halt the channel. */
292 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
295 dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel);
296 list_add_tail(&channel->hc_list_entry,
297 &dwc_otg_hcd->free_hc_list);
302 /* A disconnect will end the session so the B-Device is no
303 * longer a B-host. */
304 ((struct usb_hcd *)p)->self.is_b_host = 0;
309 * Connection timeout function. An OTG host is required to display a
310 * message if the device does not connect within 10 seconds.
312 void dwc_otg_hcd_connect_timeout(unsigned long ptr)
314 DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr);
315 DWC_PRINT("Connect Timeout\n");
316 DWC_ERROR("Device Not Connected/Responding\n");
320 * Start the connection timer. An OTG host is required to display a
321 * message if the device does not connect within 10 seconds. The
322 * timer is deleted if a port connect interrupt occurs before the
325 static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t *hcd)
327 init_timer(&hcd->conn_timer);
328 hcd->conn_timer.function = dwc_otg_hcd_connect_timeout;
329 hcd->conn_timer.data = 0;
330 hcd->conn_timer.expires = jiffies + (HZ * 10);
331 add_timer(&hcd->conn_timer);
335 * HCD Callback function for disconnect of the HCD.
337 * @param p void pointer to the <code>struct usb_hcd</code>
339 static int32_t dwc_otg_hcd_session_start_cb(void *p)
341 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
342 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
343 dwc_otg_hcd_start_connect_timer(dwc_otg_hcd);
348 * HCD Callback structure for handling mode switching.
350 static dwc_otg_cil_callbacks_t hcd_cil_callbacks = {
351 .start = dwc_otg_hcd_start_cb,
352 .stop = dwc_otg_hcd_stop_cb,
353 .disconnect = dwc_otg_hcd_disconnect_cb,
354 .session_start = dwc_otg_hcd_session_start_cb,
359 * Reset tasklet function
361 static void reset_tasklet_func(unsigned long data)
363 dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *)data;
364 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
367 DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
369 hprt0.d32 = dwc_otg_read_hprt0(core_if);
371 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
375 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
376 dwc_otg_hcd->flags.b.port_reset_change = 1;
379 static struct tasklet_struct reset_tasklet = {
382 .count = ATOMIC_INIT(0),
383 .func = reset_tasklet_func,
388 * Initializes the HCD. This function allocates memory for and initializes the
389 * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the
390 * USB bus with the core and calls the hc_driver->start() function. It returns
391 * a negative error on failure.
393 int dwc_otg_hcd_init(struct platform_device *pdev)
395 struct usb_hcd *hcd = NULL;
396 dwc_otg_hcd_t *dwc_otg_hcd = NULL;
397 dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev);
405 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
407 /* Set device flags indicating whether the HCD supports DMA. */
408 if (otg_dev->core_if->dma_enable) {
409 DWC_PRINT("Using DMA mode\n");
411 if (otg_dev->core_if->dma_desc_enable) {
412 DWC_PRINT("Device using Descriptor DMA mode\n");
414 DWC_PRINT("Device using Buffer DMA mode\n");
418 * Allocate memory for the base HCD plus the DWC OTG HCD.
419 * Initialize the base HCD.
422 hcd = usb_create_hcd(&dwc_otg_hc_driver, &pdev->dev, "gadget");
428 hcd->regs = otg_dev->base;
429 hcd->self.otg_port = 1;
431 /* Integrate TT in root hub, by default this is disbled. */
434 /* Initialize the DWC OTG HCD. */
435 dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
436 dwc_otg_hcd->core_if = otg_dev->core_if;
437 otg_dev->hcd = dwc_otg_hcd;
438 init_hcd_usecs(dwc_otg_hcd);
441 spin_lock_init(&dwc_otg_hcd->lock);
443 /* Register the HCD CIL Callbacks */
444 dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if,
445 &hcd_cil_callbacks, hcd);
447 /* Initialize the non-periodic schedule. */
448 INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive);
449 INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active);
451 /* Initialize the periodic schedule. */
452 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
453 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
454 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
455 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
458 * Create a host channel descriptor for each host channel implemented
459 * in the controller. Initialize the channel descriptor array.
461 INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list);
462 num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
463 memset(dwc_otg_hcd->hc_ptr_array, 0, sizeof(dwc_otg_hcd->hc_ptr_array));
464 for (i = 0; i < num_channels; i++) {
465 channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL);
466 if (channel == NULL) {
468 DWC_ERROR("%s: host channel allocation failed\n", __func__);
471 memset(channel, 0, sizeof(dwc_hc_t));
473 dwc_otg_hcd->hc_ptr_array[i] = channel;
475 init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]);
477 DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel);
480 /* Initialize the Connection timeout timer. */
481 init_timer(&dwc_otg_hcd->conn_timer);
483 /* Initialize reset tasklet. */
484 reset_tasklet.data = (unsigned long) dwc_otg_hcd;
485 dwc_otg_hcd->reset_tasklet = &reset_tasklet;
488 * Finish generic HCD initialization and start the HCD. This function
489 * allocates the DMA buffer pool, registers the USB bus, requests the
490 * IRQ line, and calls dwc_otg_hcd_start method.
492 retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED);
498 * Allocate space for storing data on status transactions. Normally no
499 * data is sent, but this space acts as a bit bucket. This must be
500 * done after usb_add_hcd since that function allocates the DMA buffer
503 if (otg_dev->core_if->dma_enable) {
504 dwc_otg_hcd->status_buf =
505 dma_alloc_coherent(&pdev->dev,
506 DWC_OTG_HCD_STATUS_BUF_SIZE,
507 &dwc_otg_hcd->status_buf_dma,
508 GFP_KERNEL | GFP_DMA);
510 dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE,
513 if (!dwc_otg_hcd->status_buf) {
515 DWC_ERROR("%s: status_buf allocation failed\n", __func__);
519 dwc_otg_hcd->otg_dev = otg_dev;
521 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, usbbus=%d\n",
525 /* Error conditions */
529 dwc_otg_hcd_free(hcd);
537 * Frees memory and resources associated with the HCD and deregisters the bus.
539 void dwc_otg_hcd_remove(struct platform_device *pdev)
541 dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev);
542 dwc_otg_hcd_t *dwc_otg_hcd;
545 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");
548 DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__);
552 dwc_otg_hcd = otg_dev->hcd;
555 DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__);
559 hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
562 DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__);
566 /* Turn off all interrupts */
567 dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0);
568 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0);
571 dwc_otg_hcd_free(hcd);
575 /* =========================================================================
576 * Linux HC Driver Functions
577 * ========================================================================= */
580 * Initializes dynamic portions of the DWC_otg HCD state.
582 static void hcd_reinit(dwc_otg_hcd_t *hcd)
584 struct list_head *item;
591 hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
592 hcd->non_periodic_channels = 0;
593 hcd->periodic_channels = 0;
594 hcd->nakking_channels = 0;
597 * Put all channels in the free channel list and clean up channel
600 item = hcd->free_hc_list.next;
601 while (item != &hcd->free_hc_list) {
603 item = hcd->free_hc_list.next;
605 num_channels = hcd->core_if->core_params->host_channels;
606 for (i = 0; i < num_channels; i++) {
607 channel = hcd->hc_ptr_array[i];
608 list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list);
609 dwc_otg_hc_cleanup(hcd->core_if, channel);
612 /* Initialize the DWC core for host mode operation. */
613 dwc_otg_core_host_init(hcd->core_if);
616 /** Initializes the DWC_otg controller and its root hub and prepares it for host
617 * mode operation. Activates the root port. Returns 0 on success and a negative
618 * error code on failure. */
619 int dwc_otg_hcd_start(struct usb_hcd *hcd)
621 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
622 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
626 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
628 bus = hcd_to_bus(hcd);
630 /* Initialize the bus state. If the core is in Device Mode
631 * HALT the USB bus and return. */
632 if (dwc_otg_is_device_mode(core_if)) {
633 hcd->state = HC_STATE_RUNNING;
636 hcd->state = HC_STATE_RUNNING;
638 /* Initialize and connect root hub if one is not already attached */
640 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n");
641 /* Inform the HUB driver to resume. */
642 usb_hcd_resume_root_hub(hcd);
645 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Does Not Have Root Hub\n");
648 hcd_reinit(dwc_otg_hcd);
653 static void qh_list_free(dwc_otg_hcd_t *hcd, struct list_head *qh_list)
655 struct list_head *item;
659 if (!qh_list->next) {
660 /* The list hasn't been initialized yet. */
664 /* Ensure there are no QTDs or URBs left. */
665 kill_urbs_in_qh_list(hcd, qh_list);
667 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
668 for (item = qh_list->next; item != qh_list; item = qh_list->next) {
669 qh = list_entry(item, dwc_otg_qh_t, qh_list_entry);
670 dwc_otg_hcd_qh_remove_and_free(hcd, qh);
672 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
676 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
679 void dwc_otg_hcd_stop(struct usb_hcd *hcd)
681 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
682 hprt0_data_t hprt0 = { .d32=0 };
684 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
686 /* Turn off all host-specific interrupts. */
687 dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
690 * The root hub should be disconnected before this function is called.
691 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
692 * and the QH lists (via ..._hcd_endpoint_disable).
695 /* Turn off the vbus power */
696 DWC_PRINT("PortPower off\n");
698 dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
701 /** Returns the current frame number. */
702 int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd)
704 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
707 hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->
708 host_if->host_global_regs->hfnum);
711 DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum);
713 return hfnum.b.frnum;
717 * Frees secondary storage associated with the dwc_otg_hcd structure contained
718 * in the struct usb_hcd field.
720 void dwc_otg_hcd_free(struct usb_hcd *hcd)
722 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
725 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
727 del_timers(dwc_otg_hcd);
729 /* Free memory for QH/QTD lists */
730 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive);
731 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
732 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
733 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
734 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
735 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
737 /* Free memory for the host channels. */
738 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
739 dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i];
741 DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc);
746 if (dwc_otg_hcd->core_if->dma_enable) {
747 if (dwc_otg_hcd->status_buf_dma) {
748 dma_free_coherent(hcd->self.controller,
749 DWC_OTG_HCD_STATUS_BUF_SIZE,
750 dwc_otg_hcd->status_buf,
751 dwc_otg_hcd->status_buf_dma);
753 } else if (dwc_otg_hcd->status_buf != NULL) {
754 kfree(dwc_otg_hcd->status_buf);
759 static void dump_urb_info(struct urb *urb, char* fn_name)
761 DWC_PRINT("%s, urb %p\n", fn_name, urb);
762 DWC_PRINT(" Device address: %d\n", usb_pipedevice(urb->pipe));
763 DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
764 (usb_pipein(urb->pipe) ? "IN" : "OUT"));
765 DWC_PRINT(" Endpoint type: %s\n",
767 switch (usb_pipetype(urb->pipe)) {
768 case PIPE_CONTROL: pipetype = "CONTROL"; break;
769 case PIPE_BULK: pipetype = "BULK"; break;
770 case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
771 case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
772 default: pipetype = "UNKNOWN"; break;
774 DWC_PRINT(" Speed: %s\n",
776 switch (urb->dev->speed) {
777 case USB_SPEED_HIGH: speed = "HIGH"; break;
778 case USB_SPEED_FULL: speed = "FULL"; break;
779 case USB_SPEED_LOW: speed = "LOW"; break;
780 default: speed = "UNKNOWN"; break;
782 DWC_PRINT(" Max packet size: %d\n",
783 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
784 DWC_PRINT(" Data buffer length: %d\n", urb->transfer_buffer_length);
785 DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n",
786 urb->transfer_buffer, (void *)urb->transfer_dma);
787 DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n",
788 urb->setup_packet, (void *)urb->setup_dma);
789 DWC_PRINT(" Interval: %d\n", urb->interval);
790 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
792 for (i = 0; i < urb->number_of_packets; i++) {
793 DWC_PRINT(" ISO Desc %d:\n", i);
794 DWC_PRINT(" offset: %d, length %d\n",
795 urb->iso_frame_desc[i].offset,
796 urb->iso_frame_desc[i].length);
801 static void dump_channel_info(dwc_otg_hcd_t *hcd,
804 if (qh->channel != NULL) {
805 dwc_hc_t *hc = qh->channel;
806 struct list_head *item;
807 dwc_otg_qh_t *qh_item;
808 int num_channels = hcd->core_if->core_params->host_channels;
811 dwc_otg_hc_regs_t *hc_regs;
812 hcchar_data_t hcchar;
813 hcsplt_data_t hcsplt;
814 hctsiz_data_t hctsiz;
817 hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
818 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
819 hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
820 hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
821 hcdma = dwc_read_reg32(&hc_regs->hcdma);
823 DWC_PRINT(" Assigned to channel %p:\n", hc);
824 DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
825 DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
826 DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
827 hc->dev_addr, hc->ep_num, hc->ep_is_in);
828 DWC_PRINT(" ep_type: %d\n", hc->ep_type);
829 DWC_PRINT(" max_packet: %d\n", hc->max_packet);
830 DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
831 DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
832 DWC_PRINT(" halt_status: %d\n", hc->halt_status);
833 DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
834 DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
835 DWC_PRINT(" qh: %p\n", hc->qh);
836 DWC_PRINT(" NP inactive sched:\n");
837 list_for_each(item, &hcd->non_periodic_sched_inactive) {
838 qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
839 DWC_PRINT(" %p\n", qh_item);
841 DWC_PRINT(" NP active sched:\n");
842 list_for_each(item, &hcd->non_periodic_sched_active) {
843 qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
844 DWC_PRINT(" %p\n", qh_item);
846 DWC_PRINT(" Channels: \n");
847 for (i = 0; i < num_channels; i++) {
848 dwc_hc_t *hc = hcd->hc_ptr_array[i];
849 DWC_PRINT(" %2d: %p\n", i, hc);
856 //OTG host require the DMA addr is DWORD-aligned,
857 //patch it if the buffer is not DWORD-aligned
859 int hcd_check_and_patch_dma_addr(struct urb *urb){
861 if((!urb->transfer_buffer)||!urb->transfer_dma||urb->transfer_dma==0xffffffff)
864 if(((u32)urb->transfer_buffer)& 0x3){
868 "transfer_buffer=%.8x, "
869 "transfer_dma=%.8x, "
870 "transfer_buffer_length=%d, "
871 "actual_length=%d(%x), "
873 ((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT)?"OUT":"IN",
875 urb->transfer_buffer,
877 urb->transfer_buffer_length,
878 urb->actual_length,urb->actual_length
881 if(!urb->aligned_transfer_buffer||urb->aligned_transfer_buffer_length<urb->transfer_buffer_length){
882 urb->aligned_transfer_buffer_length=urb->transfer_buffer_length;
883 if(urb->aligned_transfer_buffer) {
884 kfree(urb->aligned_transfer_buffer);
886 urb->aligned_transfer_buffer=kmalloc(urb->aligned_transfer_buffer_length,GFP_KERNEL|GFP_DMA|GFP_ATOMIC);
887 if(!urb->aligned_transfer_buffer){
888 DWC_ERROR("Cannot alloc required buffer!!\n");
892 urb->aligned_transfer_dma=dma_map_single(NULL,(void *)(urb->aligned_transfer_buffer),(urb->aligned_transfer_buffer_length),DMA_FROM_DEVICE);
893 //printk(" new allocated aligned_buf=%.8x aligned_buf_len=%d\n", (u32)urb->aligned_transfer_buffer, urb->aligned_transfer_buffer_length);
895 urb->transfer_dma=urb->aligned_transfer_dma;
896 if((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT) {
897 memcpy(urb->aligned_transfer_buffer,urb->transfer_buffer,urb->transfer_buffer_length);
898 dma_sync_single_for_device(NULL,urb->transfer_dma,urb->transfer_buffer_length,DMA_TO_DEVICE);
906 /** Starts processing a USB transfer request specified by a USB Request Block
907 * (URB). mem_flags indicates the type of memory allocation to use while
908 * processing this URB. */
909 int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
910 // struct usb_host_endpoint *ep,
916 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
920 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
922 if (urb->hcpriv != NULL) {
923 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
928 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
929 dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue");
932 if (!dwc_otg_hcd->flags.b.port_connect_status) {
933 /* No longer connected. */
934 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
938 if (hcd_check_and_patch_dma_addr(urb)) {
939 DWC_ERROR("Unable to check and patch dma addr\n");
940 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
943 qtd = dwc_otg_hcd_qtd_create(urb);
945 DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
946 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
950 retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd);
952 DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
953 "Error status %d\n", retval);
954 dwc_otg_hcd_qtd_free(qtd);
956 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
960 /** Aborts/cancels a USB transfer request. Always returns 0 to indicate
962 int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
963 struct urb *urb, int status)
966 dwc_otg_hcd_t *dwc_otg_hcd;
967 dwc_otg_qtd_t *urb_qtd;
969 struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb);
972 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
974 dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
976 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
978 urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv;
979 qh = (dwc_otg_qh_t *)ep->hcpriv;
982 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
983 dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue");
984 if (urb_qtd == qh->qtd_in_process) {
985 dump_channel_info(dwc_otg_hcd, qh);
990 if (qh && urb_qtd == qh->qtd_in_process) {
991 /* The QTD is in process (it has been assigned to a channel). */
993 if (dwc_otg_hcd->flags.b.port_connect_status) {
995 * If still connected (i.e. in host mode), halt the
996 * channel so it can be used for other transfers. If
997 * no longer connected, the host registers can't be
998 * written to halt the channel since the core is in
1001 dwc_otg_hc_halt(dwc_otg_hcd, qh->channel,
1002 DWC_OTG_HC_XFER_URB_DEQUEUE);
1007 * Free the QTD and clean up the associated QH. Leave the QH in the
1008 * schedule if it has any remaining QTDs.
1010 dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd);
1011 if (qh && urb_qtd == qh->qtd_in_process) {
1012 dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0);
1014 qh->qtd_in_process = NULL;
1016 if (qh && list_empty(&qh->qtd_list)) {
1017 dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
1022 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1025 usb_hcd_unlink_urb_from_ep(hcd, urb);
1028 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1031 usb_hcd_giveback_urb(hcd, urb, status);
1033 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
1034 DWC_PRINT("Called usb_hcd_giveback_urb()\n");
1035 DWC_PRINT(" urb->status = %d\n", urb->status);
1041 /** Frees resources in the DWC_otg controller related to a given endpoint. Also
1042 * clears state in the HCD related to the endpoint. Any URBs for the endpoint
1043 * must already be dequeued. */
1044 void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
1045 struct usb_host_endpoint *ep)
1047 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1050 unsigned long flags;
1053 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, "
1054 "endpoint=%d\n", ep->desc.bEndpointAddress,
1055 dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress));
1058 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
1059 qh = (dwc_otg_qh_t *)(ep->hcpriv);
1063 /** Check that the QTD list is really empty */
1064 if (!list_empty(&qh->qtd_list)) {
1065 if (retry++ < 250) {
1066 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1067 schedule_timeout_uninterruptible(1);
1071 DWC_WARN("DWC OTG HCD EP DISABLE:"
1072 " QTD List for this endpoint is not empty\n");
1075 dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh);
1078 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1081 /** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
1082 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
1085 * This function is called by the USB core when an interrupt occurs */
1086 irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd)
1089 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1090 retVal = dwc_otg_hcd_handle_intr(dwc_otg_hcd);
1091 if (dwc_otg_hcd->flags.b.port_connect_status_change == 1)
1092 usb_hcd_poll_rh_status(hcd);
1093 return IRQ_RETVAL(retVal);
1096 /** Creates Status Change bitmap for the root hub and root port. The bitmap is
1097 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
1098 * is the status change indicator for the single root port. Returns 1 if either
1099 * change indicator is 1, otherwise returns 0. */
1100 int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
1102 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1105 buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change ||
1106 dwc_otg_hcd->flags.b.port_reset_change ||
1107 dwc_otg_hcd->flags.b.port_enable_change ||
1108 dwc_otg_hcd->flags.b.port_suspend_change ||
1109 dwc_otg_hcd->flags.b.port_over_current_change) << 1;
1113 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
1114 " Root port status changed\n");
1115 DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
1116 dwc_otg_hcd->flags.b.port_connect_status_change);
1117 DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
1118 dwc_otg_hcd->flags.b.port_reset_change);
1119 DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
1120 dwc_otg_hcd->flags.b.port_enable_change);
1121 DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
1122 dwc_otg_hcd->flags.b.port_suspend_change);
1123 DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
1124 dwc_otg_hcd->flags.b.port_over_current_change);
1127 return (buf[0] != 0);
1130 #ifdef DWC_HS_ELECT_TST
1132 * Quick and dirty hack to implement the HS Electrical Test
1133 * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
1135 * This code was copied from our userspace app "hset". It sends a
1136 * Get Device Descriptor control sequence in two parts, first the
1137 * Setup packet by itself, followed some time later by the In and
1138 * Ack packets. Rather than trying to figure out how to add this
1139 * functionality to the normal driver code, we just hijack the
1140 * hardware, using these two function to drive the hardware
1144 dwc_otg_core_global_regs_t *global_regs;
1145 dwc_otg_host_global_regs_t *hc_global_regs;
1146 dwc_otg_hc_regs_t *hc_regs;
1147 uint32_t *data_fifo;
1149 static void do_setup(void)
1151 gintsts_data_t gintsts;
1152 hctsiz_data_t hctsiz;
1153 hcchar_data_t hcchar;
1158 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
1161 dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
1164 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1165 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1168 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1169 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1172 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1173 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1176 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1177 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1180 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1183 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1186 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1189 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1190 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1193 * Send Setup packet (Get Device Descriptor)
1196 /* Make sure channel is disabled */
1197 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1198 if (hcchar.b.chen) {
1199 //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32);
1201 // hcchar.b.chen = 1;
1202 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1207 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1208 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1211 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1212 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1215 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1216 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1219 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1220 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1223 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1226 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1229 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1231 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1232 //if (hcchar.b.chen) {
1233 // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32);
1239 hctsiz.b.xfersize = 8;
1240 hctsiz.b.pktcnt = 1;
1241 hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
1242 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1245 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1246 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1251 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1253 /* Fill FIFO with Setup data for Get Device Descriptor */
1254 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1255 dwc_write_reg32(data_fifo++, 0x01000680);
1256 dwc_write_reg32(data_fifo++, 0x00080000);
1258 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1259 //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
1261 /* Wait for host channel interrupt */
1263 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1264 } while (gintsts.b.hcintr == 0);
1266 //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
1268 /* Disable HCINTs */
1269 dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
1271 /* Disable HAINTs */
1272 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
1275 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1276 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1279 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1280 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1283 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1284 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1287 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1290 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1293 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1296 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1297 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1300 static void do_in_ack(void)
1302 gintsts_data_t gintsts;
1303 hctsiz_data_t hctsiz;
1304 hcchar_data_t hcchar;
1307 host_grxsts_data_t grxsts;
1310 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
1313 dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
1316 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1317 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1320 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1321 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1324 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1325 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1328 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1329 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1332 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1335 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1338 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1341 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1342 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1345 * Receive Control In packet
1348 /* Make sure channel is disabled */
1349 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1350 if (hcchar.b.chen) {
1351 //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32);
1354 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1359 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1360 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1363 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1364 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1367 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1368 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1371 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1372 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1375 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1378 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1381 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1383 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1384 //if (hcchar.b.chen) {
1385 // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32);
1391 hctsiz.b.xfersize = 8;
1392 hctsiz.b.pktcnt = 1;
1393 hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
1394 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1397 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1398 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1403 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1405 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1406 //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
1408 /* Wait for receive status queue interrupt */
1410 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1411 } while (gintsts.b.rxstsqlvl == 0);
1413 //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
1416 grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
1417 //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
1419 /* Clear RXSTSQLVL in GINTSTS */
1421 gintsts.b.rxstsqlvl = 1;
1422 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1424 switch (grxsts.b.pktsts) {
1425 case DWC_GRXSTS_PKTSTS_IN:
1426 /* Read the data into the host buffer */
1427 if (grxsts.b.bcnt > 0) {
1429 int word_count = (grxsts.b.bcnt + 3) / 4;
1431 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1433 for (i = 0; i < word_count; i++) {
1434 (void)dwc_read_reg32(data_fifo++);
1438 //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt);
1442 //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n");
1446 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1447 //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
1449 /* Wait for receive status queue interrupt */
1451 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1452 } while (gintsts.b.rxstsqlvl == 0);
1454 //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
1457 grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
1458 //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
1460 /* Clear RXSTSQLVL in GINTSTS */
1462 gintsts.b.rxstsqlvl = 1;
1463 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1465 switch (grxsts.b.pktsts) {
1466 case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
1470 //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n");
1474 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1475 //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
1477 /* Wait for host channel interrupt */
1479 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1480 } while (gintsts.b.hcintr == 0);
1482 //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
1485 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1486 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1489 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1490 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1493 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1494 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1497 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1500 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1503 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1506 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1507 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1514 * Send handshake packet
1518 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1519 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1522 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1523 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1526 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1527 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1530 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1533 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1536 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1539 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1540 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1542 /* Make sure channel is disabled */
1543 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1544 if (hcchar.b.chen) {
1545 //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32);
1548 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1553 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1554 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1557 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1558 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1561 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1562 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1565 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1566 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1569 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1572 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1575 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1577 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1578 //if (hcchar.b.chen) {
1579 // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32);
1585 hctsiz.b.xfersize = 0;
1586 hctsiz.b.pktcnt = 1;
1587 hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
1588 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1591 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1592 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1597 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1599 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1600 //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
1602 /* Wait for host channel interrupt */
1604 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1605 } while (gintsts.b.hcintr == 0);
1607 //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
1609 /* Disable HCINTs */
1610 dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
1612 /* Disable HAINTs */
1613 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
1616 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1617 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1620 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1621 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1624 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1625 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1628 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1631 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1634 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1637 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1638 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1640 #endif /* DWC_HS_ELECT_TST */
1642 /** Handles hub class-specific requests. */
1643 int dwc_otg_hcd_hub_control(struct usb_hcd *hcd,
1652 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1653 dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if;
1654 struct usb_hub_descriptor *desc;
1655 hprt0_data_t hprt0 = {.d32 = 0};
1657 uint32_t port_status;
1660 case ClearHubFeature:
1661 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1662 "ClearHubFeature 0x%x\n", wValue);
1664 case C_HUB_LOCAL_POWER:
1665 case C_HUB_OVER_CURRENT:
1666 /* Nothing required here */
1670 DWC_ERROR("DWC OTG HCD - "
1671 "ClearHubFeature request %xh unknown\n", wValue);
1674 case ClearPortFeature:
1675 if (!wIndex || wIndex > 1)
1679 case USB_PORT_FEAT_ENABLE:
1680 DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - "
1681 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
1682 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1684 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1686 case USB_PORT_FEAT_SUSPEND:
1687 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1688 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
1689 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1691 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1692 /* Clear Resume bit */
1695 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1697 case USB_PORT_FEAT_POWER:
1698 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1699 "ClearPortFeature USB_PORT_FEAT_POWER\n");
1700 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1702 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1704 case USB_PORT_FEAT_INDICATOR:
1705 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1706 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
1707 /* Port inidicator not supported */
1709 case USB_PORT_FEAT_C_CONNECTION:
1710 /* Clears drivers internal connect status change
1712 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1713 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
1714 dwc_otg_hcd->flags.b.port_connect_status_change = 0;
1716 case USB_PORT_FEAT_C_RESET:
1717 /* Clears the driver's internal Port Reset Change
1719 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1720 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
1721 dwc_otg_hcd->flags.b.port_reset_change = 0;
1723 case USB_PORT_FEAT_C_ENABLE:
1724 /* Clears the driver's internal Port
1725 * Enable/Disable Change flag */
1726 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1727 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
1728 dwc_otg_hcd->flags.b.port_enable_change = 0;
1730 case USB_PORT_FEAT_C_SUSPEND:
1731 /* Clears the driver's internal Port Suspend
1732 * Change flag, which is set when resume signaling on
1733 * the host port is complete */
1734 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1735 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
1736 dwc_otg_hcd->flags.b.port_suspend_change = 0;
1738 case USB_PORT_FEAT_C_OVER_CURRENT:
1739 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1740 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
1741 dwc_otg_hcd->flags.b.port_over_current_change = 0;
1745 DWC_ERROR("DWC OTG HCD - "
1746 "ClearPortFeature request %xh "
1747 "unknown or unsupported\n", wValue);
1750 case GetHubDescriptor:
1751 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1752 "GetHubDescriptor\n");
1753 desc = (struct usb_hub_descriptor *)buf;
1754 desc->bDescLength = 9;
1755 desc->bDescriptorType = 0x29;
1756 desc->bNbrPorts = 1;
1757 desc->wHubCharacteristics = 0x08;
1758 desc->bPwrOn2PwrGood = 1;
1759 desc->bHubContrCurrent = 0;
1760 desc->u.hs.DeviceRemovable[0] = 0;
1761 desc->u.hs.DeviceRemovable[1] = 0xff;
1764 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1769 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1772 if (!wIndex || wIndex > 1)
1777 if (dwc_otg_hcd->flags.b.port_connect_status_change)
1778 port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
1780 if (dwc_otg_hcd->flags.b.port_enable_change)
1781 port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
1783 if (dwc_otg_hcd->flags.b.port_suspend_change)
1784 port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
1786 if (dwc_otg_hcd->flags.b.port_reset_change)
1787 port_status |= (1 << USB_PORT_FEAT_C_RESET);
1789 if (dwc_otg_hcd->flags.b.port_over_current_change) {
1790 DWC_ERROR("Device Not Supported\n");
1791 port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT);
1794 if (!dwc_otg_hcd->flags.b.port_connect_status) {
1796 * The port is disconnected, which means the core is
1797 * either in device mode or it soon will be. Just
1798 * return 0's for the remainder of the port status
1799 * since the port register can't be read if the core
1800 * is in device mode.
1802 *((__le32 *) buf) = cpu_to_le32(port_status);
1806 hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
1807 DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
1809 if (hprt0.b.prtconnsts)
1810 port_status |= (1 << USB_PORT_FEAT_CONNECTION);
1813 port_status |= (1 << USB_PORT_FEAT_ENABLE);
1815 if (hprt0.b.prtsusp)
1816 port_status |= (1 << USB_PORT_FEAT_SUSPEND);
1818 if (hprt0.b.prtovrcurract)
1819 port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
1822 port_status |= (1 << USB_PORT_FEAT_RESET);
1825 port_status |= (1 << USB_PORT_FEAT_POWER);
1827 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
1828 port_status |= (USB_PORT_STAT_HIGH_SPEED);
1829 else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
1830 port_status |= (USB_PORT_STAT_LOW_SPEED);
1832 if (hprt0.b.prttstctl)
1833 port_status |= (1 << USB_PORT_FEAT_TEST);
1835 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
1837 *((__le32 *) buf) = cpu_to_le32(port_status);
1841 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1843 /* No HUB features supported */
1845 case SetPortFeature:
1846 if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1))
1849 if (!dwc_otg_hcd->flags.b.port_connect_status) {
1851 * The port is disconnected, which means the core is
1852 * either in device mode or it soon will be. Just
1853 * return without doing anything since the port
1854 * register can't be written if the core is in device
1861 case USB_PORT_FEAT_SUSPEND:
1862 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1863 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
1864 if (hcd->self.otg_port == wIndex &&
1865 hcd->self.b_hnp_enable) {
1866 gotgctl_data_t gotgctl = {.d32=0};
1867 gotgctl.b.hstsethnpen = 1;
1868 dwc_modify_reg32(&core_if->core_global_regs->gotgctl,
1870 core_if->op_state = A_SUSPEND;
1872 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1873 hprt0.b.prtsusp = 1;
1874 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1875 //DWC_PRINT("SUSPEND: HPRT0=%0x\n", hprt0.d32);
1876 /* Suspend the Phy Clock */
1878 pcgcctl_data_t pcgcctl = {.d32=0};
1879 pcgcctl.b.stoppclk = 1;
1880 dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32);
1883 /* For HNP the bus must be suspended for at least 200ms. */
1884 if (hcd->self.b_hnp_enable) {
1886 //DWC_PRINT("SUSPEND: wait complete! (%d)\n", _hcd->state);
1889 case USB_PORT_FEAT_POWER:
1890 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1891 "SetPortFeature - USB_PORT_FEAT_POWER\n");
1892 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1894 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1896 case USB_PORT_FEAT_RESET:
1897 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1898 "SetPortFeature - USB_PORT_FEAT_RESET\n");
1899 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1900 /* When B-Host the Port reset bit is set in
1901 * the Start HCD Callback function, so that
1902 * the reset is started within 1ms of the HNP
1903 * success interrupt. */
1904 if (!hcd->self.is_b_host) {
1906 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1908 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
1911 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1914 #ifdef DWC_HS_ELECT_TST
1915 case USB_PORT_FEAT_TEST:
1918 gintmsk_data_t gintmsk;
1920 t = (wIndex >> 8); /* MSB wIndex USB */
1921 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1922 "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t);
1923 warn("USB_PORT_FEAT_TEST %d\n", t);
1925 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1926 hprt0.b.prttstctl = t;
1927 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1929 /* Setup global vars with reg addresses (quick and
1930 * dirty hack, should be cleaned up)
1932 global_regs = core_if->core_global_regs;
1933 hc_global_regs = core_if->host_if->host_global_regs;
1934 hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500);
1935 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1937 if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */
1938 /* Save current interrupt mask */
1939 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1941 /* Disable all interrupts while we muck with
1942 * the hardware directly
1944 dwc_write_reg32(&global_regs->gintmsk, 0);
1946 /* 15 second delay per the test spec */
1949 /* Drive suspend on the root port */
1950 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1951 hprt0.b.prtsusp = 1;
1953 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1955 /* 15 second delay per the test spec */
1958 /* Drive resume on the root port */
1959 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1960 hprt0.b.prtsusp = 0;
1962 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1965 /* Clear the resume bit */
1967 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1969 /* Restore interrupts */
1970 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
1971 } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
1972 /* Save current interrupt mask */
1973 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1975 /* Disable all interrupts while we muck with
1976 * the hardware directly
1978 dwc_write_reg32(&global_regs->gintmsk, 0);
1980 /* 15 second delay per the test spec */
1983 /* Send the Setup packet */
1986 /* 15 second delay so nothing else happens for awhile */
1989 /* Restore interrupts */
1990 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
1991 } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
1992 /* Save current interrupt mask */
1993 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1995 /* Disable all interrupts while we muck with
1996 * the hardware directly
1998 dwc_write_reg32(&global_regs->gintmsk, 0);
2000 /* Send the Setup packet */
2003 /* 15 second delay so nothing else happens for awhile */
2006 /* Send the In and Ack packets */
2009 /* 15 second delay so nothing else happens for awhile */
2012 /* Restore interrupts */
2013 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
2018 #endif /* DWC_HS_ELECT_TST */
2020 case USB_PORT_FEAT_INDICATOR:
2021 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
2022 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
2027 DWC_ERROR("DWC OTG HCD - "
2028 "SetPortFeature request %xh "
2029 "unknown or unsupported\n", wValue);
2036 DWC_WARN("DWC OTG HCD - "
2037 "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
2038 typeReq, wIndex, wValue);
2046 * Assigns transactions from a QTD to a free host channel and initializes the
2047 * host channel to perform the transactions. The host channel is removed from
2050 * @param hcd The HCD state structure.
2051 * @param qh Transactions from the first QTD for this QH are selected and
2052 * assigned to a free host channel.
2054 static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
2060 DWC_DEBUGPL(DBG_HCD_FLOOD, "%s(%p,%p)\n", __func__, hcd, qh);
2061 hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry);
2063 qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
2070 /* Remove the host channel from the free list. */
2071 list_del_init(&hc->hc_list_entry);
2074 qh->qtd_in_process = qtd;
2077 * Use usb_pipedevice to determine device address. This address is
2078 * 0 before the SET_ADDRESS command and the correct address afterward.
2080 hc->dev_addr = usb_pipedevice(urb->pipe);
2081 hc->ep_num = usb_pipeendpoint(urb->pipe);
2083 if (urb->dev->speed == USB_SPEED_LOW) {
2084 hc->speed = DWC_OTG_EP_SPEED_LOW;
2085 } else if (urb->dev->speed == USB_SPEED_FULL) {
2086 hc->speed = DWC_OTG_EP_SPEED_FULL;
2088 hc->speed = DWC_OTG_EP_SPEED_HIGH;
2091 hc->max_packet = dwc_max_packet(qh->maxp);
2093 hc->xfer_started = 0;
2094 hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
2095 hc->error_state = (qtd->error_count > 0);
2096 hc->halt_on_queue = 0;
2097 hc->halt_pending = 0;
2101 * The following values may be modified in the transfer type section
2102 * below. The xfer_len value may be reduced when the transfer is
2103 * started to accommodate the max widths of the XferSize and PktCnt
2104 * fields in the HCTSIZn register.
2106 hc->do_ping = qh->ping_state;
2107 hc->ep_is_in = (usb_pipein(urb->pipe) != 0);
2108 hc->data_pid_start = qh->data_toggle;
2109 hc->multi_count = 1;
2111 if (hcd->core_if->dma_enable) {
2112 hc->xfer_buff = (uint8_t *)urb->transfer_dma + urb->actual_length;
2114 hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length;
2116 hc->xfer_len = urb->transfer_buffer_length - urb->actual_length;
2120 * Set the split attributes
2125 hc->xact_pos = qtd->isoc_split_pos;
2126 hc->complete_split = qtd->complete_split;
2127 hc->hub_addr = urb->dev->tt->hub->devnum;
2128 hc->port_addr = urb->dev->ttport;
2131 switch (usb_pipetype(urb->pipe)) {
2133 hc->ep_type = DWC_OTG_EP_TYPE_CONTROL;
2134 switch (qtd->control_phase) {
2135 case DWC_OTG_CONTROL_SETUP:
2136 DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n");
2139 hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
2140 if (hcd->core_if->dma_enable) {
2141 hc->xfer_buff = (uint8_t *)urb->setup_dma;
2143 hc->xfer_buff = (uint8_t *)urb->setup_packet;
2147 case DWC_OTG_CONTROL_DATA:
2148 DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n");
2149 hc->data_pid_start = qtd->data_toggle;
2151 case DWC_OTG_CONTROL_STATUS:
2153 * Direction is opposite of data direction or IN if no
2156 DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n");
2157 if (urb->transfer_buffer_length == 0) {
2160 hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN);
2165 hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
2167 if (hcd->core_if->dma_enable) {
2168 hc->xfer_buff = (uint8_t *)hcd->status_buf_dma;
2170 hc->xfer_buff = (uint8_t *)hcd->status_buf;
2176 hc->ep_type = DWC_OTG_EP_TYPE_BULK;
2178 case PIPE_INTERRUPT:
2179 hc->ep_type = DWC_OTG_EP_TYPE_INTR;
2181 case PIPE_ISOCHRONOUS:
2183 struct usb_iso_packet_descriptor *frame_desc;
2184 frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
2185 hc->ep_type = DWC_OTG_EP_TYPE_ISOC;
2186 if (hcd->core_if->dma_enable) {
2187 hc->xfer_buff = (uint8_t *)urb->transfer_dma;
2189 hc->xfer_buff = (uint8_t *)urb->transfer_buffer;
2191 hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset;
2192 hc->xfer_len = frame_desc->length - qtd->isoc_split_offset;
2194 if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) {
2195 if (hc->xfer_len <= 188) {
2196 hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL;
2199 hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN;
2206 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
2207 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
2209 * This value may be modified when the transfer is started to
2210 * reflect the actual transfer length.
2212 hc->multi_count = dwc_hb_mult(qh->maxp);
2215 dwc_otg_hc_init(hcd->core_if, hc);
2220 * This function selects transactions from the HCD transfer schedule and
2221 * assigns them to available host channels. It is called from HCD interrupt
2222 * handler functions.
2224 * @param hcd The HCD state structure.
2226 * @return The types of new transactions that were assigned to host channels.
2228 dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd)
2230 struct list_head *qh_ptr;
2231 dwc_otg_qh_t *qh = NULL;
2233 dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
2234 uint16_t cur_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
2235 unsigned long flags;
2236 int include_nakd, channels_full;
2237 /* This condition has once been observed, but the cause was
2238 * never determined. Check for it here, to collect debug data if
2239 * it occurs again. */
2240 WARN_ON_ONCE(hcd->non_periodic_channels < 0);
2241 check_nakking(hcd, __FUNCTION__, "start");
2244 DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
2247 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
2248 /* Process entries in the periodic ready list. */
2249 qh_ptr = hcd->periodic_sched_ready.next;
2250 while (qh_ptr != &hcd->periodic_sched_ready &&
2251 !list_empty(&hcd->free_hc_list)) {
2253 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2254 assign_and_init_hc(hcd, qh);
2257 * Move the QH from the periodic ready schedule to the
2258 * periodic assigned schedule.
2260 qh_ptr = qh_ptr->next;
2261 list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned);
2263 ret_val = DWC_OTG_TRANSACTION_PERIODIC;
2267 * Process entries in the inactive portion of the non-periodic
2268 * schedule. Some free host channels may not be used if they are
2269 * reserved for periodic transfers.
2271 num_channels = hcd->core_if->core_params->host_channels;
2273 /* Go over the queue twice: Once while not including nak'd
2274 * entries, one while including them. This is so a retransmit of
2275 * an entry that has received a nak is scheduled only after all
2279 for (include_nakd = 0; include_nakd < 2 && !channels_full; ++include_nakd) {
2280 qh_ptr = hcd->non_periodic_sched_inactive.next;
2281 while (qh_ptr != &hcd->non_periodic_sched_inactive) {
2282 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2283 qh_ptr = qh_ptr->next;
2285 /* If a nak'd frame is in the queue for 100ms, forget
2286 * about its nak status, to prevent the situation where
2287 * a nak'd frame never gets resubmitted because there
2288 * are continously non-nakking tranfsfers available.
2290 if (qh->nak_frame != 0xffff &&
2291 dwc_frame_num_gt(cur_frame, qh->nak_frame + 800))
2292 qh->nak_frame = 0xffff;
2294 /* In the first pass, ignore NAK'd retransmit
2295 * alltogether, to give them lower priority. */
2296 if (!include_nakd && qh->nak_frame != 0xffff)
2300 * Check to see if this is a NAK'd retransmit, in which case ignore for retransmission
2301 * we hold off on bulk retransmissions to reduce NAK interrupt overhead for
2302 * cheeky devices that just hold off using NAKs
2304 if (dwc_full_frame_num(qh->nak_frame) == dwc_full_frame_num(dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd))))
2307 /* Ok, we found a candidate for scheduling. Is there a
2309 if (hcd->non_periodic_channels >=
2310 num_channels - hcd->periodic_channels ||
2311 list_empty(&hcd->free_hc_list)) {
2316 /* When retrying a NAK'd transfer, we give it a fair
2317 * chance of completing again. */
2318 qh->nak_frame = 0xffff;
2319 assign_and_init_hc(hcd, qh);
2322 * Move the QH from the non-periodic inactive schedule to the
2323 * non-periodic active schedule.
2325 list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active);
2327 if (ret_val == DWC_OTG_TRANSACTION_NONE) {
2328 ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
2330 ret_val = DWC_OTG_TRANSACTION_ALL;
2333 hcd->non_periodic_channels++;
2335 if (hcd->core_if->dma_enable && channels_full &&
2336 hcd->periodic_channels + hcd->nakking_channels >= num_channels) {
2337 /* There are items queued, but all channels are either
2338 * reserved for periodic or have received NAKs. This
2339 * means that it could take an indefinite amount of time
2340 * before a channel is actually freed (since in DMA
2341 * mode, the hardware takes care of retries), so we take
2342 * action here by forcing a nakking channel to halt to
2343 * give other transfers a chance to run. */
2344 dwc_otg_qtd_t *qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
2345 struct urb *urb = qtd->urb;
2346 dwc_hc_t *hc = dwc_otg_halt_nakking_channel(hcd);
2349 DWC_DEBUGPL(DBG_HCD "Out of Host Channels for non-periodic transfer - Halting channel %d (dev %d ep%d%s) to service qh %p (dev %d ep%d%s)\n", hc->hc_num, hc->dev_addr, hc->ep_num, (hc->ep_is_in ? "in" : "out"), qh, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), (usb_pipein(urb->pipe) != 0) ? "in" : "out");
2354 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
2360 * Halt a bulk channel that is blocking on NAKs to free up space.
2362 * This will decrement hcd->nakking_channels immediately, but
2363 * hcd->non_periodic_channels is not decremented until the channel is
2366 * Returns the halted channel.
2368 dwc_hc_t *dwc_otg_halt_nakking_channel(dwc_otg_hcd_t *hcd) {
2369 int num_channels, i;
2372 cur_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
2373 num_channels = hcd->core_if->core_params->host_channels;
2375 for (i = 0; i < num_channels; i++) {
2376 int channel = (hcd->last_channel_halted + 1 + i) % num_channels;
2377 dwc_hc_t *hc = hcd->hc_ptr_array[channel];
2378 if (hc->xfer_started
2379 && !hc->halt_on_queue
2380 && !hc->halt_pending
2381 && hc->qh->nak_frame != 0xffff) {
2382 dwc_otg_hc_halt(hcd, hc, DWC_OTG_HC_XFER_NAK);
2383 /* Store the last channel halted to
2384 * fairly rotate the channel to halt.
2385 * This prevent the scenario where there
2386 * are three blocking endpoints and only
2387 * two free host channels, where the
2388 * blocking endpoint that gets hc 3 will
2389 * never be halted, while the other two
2390 * endpoints will be fighting over the
2391 * other host channel. */
2392 hcd->last_channel_halted = channel;
2393 /* Update nak_frame, so this frame is
2394 * kept at low priority for a period of
2395 * time starting now. */
2396 hc->qh->nak_frame = cur_frame;
2400 dwc_otg_hcd_dump_state(hcd);
2405 * Attempts to queue a single transaction request for a host channel
2406 * associated with either a periodic or non-periodic transfer. This function
2407 * assumes that there is space available in the appropriate request queue. For
2408 * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
2409 * is available in the appropriate Tx FIFO.
2411 * @param hcd The HCD state structure.
2412 * @param hc Host channel descriptor associated with either a periodic or
2413 * non-periodic transfer.
2414 * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx
2415 * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
2418 * @return 1 if a request is queued and more requests may be needed to
2419 * complete the transfer, 0 if no more requests are required for this
2420 * transfer, -1 if there is insufficient space in the Tx FIFO.
2422 static int queue_transaction(dwc_otg_hcd_t *hcd,
2424 uint16_t fifo_dwords_avail)
2428 if (hcd->core_if->dma_enable) {
2429 if (!hc->xfer_started) {
2430 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2431 hc->qh->ping_state = 0;
2434 } else if (hc->halt_pending) {
2435 /* Don't queue a request if the channel has been halted. */
2437 } else if (hc->halt_on_queue) {
2438 dwc_otg_hc_halt(hcd, hc, hc->halt_status);
2440 } else if (hc->do_ping) {
2441 if (!hc->xfer_started) {
2442 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2445 } else if (!hc->ep_is_in ||
2446 hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
2447 if ((fifo_dwords_avail * 4) >= hc->max_packet) {
2448 if (!hc->xfer_started) {
2449 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2452 retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc);
2458 if (!hc->xfer_started) {
2459 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2462 retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc);
2470 * Processes active non-periodic channels and queues transactions for these
2471 * channels to the DWC_otg controller. After queueing transactions, the NP Tx
2472 * FIFO Empty interrupt is enabled if there are more transactions to queue as
2473 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
2474 * FIFO Empty interrupt is disabled.
2476 static void process_non_periodic_channels(dwc_otg_hcd_t *hcd)
2478 gnptxsts_data_t tx_status;
2479 struct list_head *orig_qh_ptr;
2482 int no_queue_space = 0;
2483 int no_fifo_space = 0;
2486 dwc_otg_core_global_regs_t *global_regs = hcd->core_if->core_global_regs;
2488 DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
2490 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2491 DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n",
2492 tx_status.b.nptxqspcavail);
2493 DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n",
2494 tx_status.b.nptxfspcavail);
2497 * Keep track of the starting point. Skip over the start-of-list
2500 if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
2501 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2503 orig_qh_ptr = hcd->non_periodic_qh_ptr;
2506 * Process once through the active list or until no more space is
2507 * available in the request queue or the Tx FIFO.
2510 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2511 if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) {
2516 qh = list_entry(hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry);
2517 status = queue_transaction(hcd, qh->channel, tx_status.b.nptxfspcavail);
2521 } else if (status < 0) {
2526 /* Advance to next QH, skipping start-of-list entry. */
2527 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2528 if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
2529 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2532 } while (hcd->non_periodic_qh_ptr != orig_qh_ptr);
2534 if (!hcd->core_if->dma_enable) {
2535 gintmsk_data_t intr_mask = {.d32 = 0};
2536 intr_mask.b.nptxfempty = 1;
2539 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2540 DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n",
2541 tx_status.b.nptxqspcavail);
2542 DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n",
2543 tx_status.b.nptxfspcavail);
2545 if (more_to_do || no_queue_space || no_fifo_space) {
2547 * May need to queue more transactions as the request
2548 * queue or Tx FIFO empties. Enable the non-periodic
2549 * Tx FIFO empty interrupt. (Always use the half-empty
2550 * level to ensure that new requests are loaded as
2551 * soon as possible.)
2553 dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
2556 * Disable the Tx FIFO empty interrupt since there are
2557 * no more transactions that need to be queued right
2558 * now. This function is called from interrupt
2559 * handlers to queue more transactions as transfer
2562 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
2568 * Processes periodic channels for the next frame and queues transactions for
2569 * these channels to the DWC_otg controller. After queueing transactions, the
2570 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
2571 * to queue as Periodic Tx FIFO or request queue space becomes available.
2572 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
2574 static void process_periodic_channels(dwc_otg_hcd_t *hcd)
2576 hptxsts_data_t tx_status;
2577 struct list_head *qh_ptr;
2580 int no_queue_space = 0;
2581 int no_fifo_space = 0;
2583 dwc_otg_host_global_regs_t *host_regs;
2584 host_regs = hcd->core_if->host_if->host_global_regs;
2586 DWC_DEBUGPL(DBG_HCD_FLOOD, "Queue periodic transactions\n");
2588 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2589 DWC_DEBUGPL(DBG_HCD_FLOOD, " P Tx Req Queue Space Avail (before queue): %d\n",
2590 tx_status.b.ptxqspcavail);
2591 DWC_DEBUGPL(DBG_HCD_FLOOD, " P Tx FIFO Space Avail (before queue): %d\n",
2592 tx_status.b.ptxfspcavail);
2595 qh_ptr = hcd->periodic_sched_assigned.next;
2596 while (qh_ptr != &hcd->periodic_sched_assigned) {
2597 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2598 if (tx_status.b.ptxqspcavail == 0) {
2603 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2606 * Set a flag if we're queuing high-bandwidth in slave mode.
2607 * The flag prevents any halts to get into the request queue in
2608 * the middle of multiple high-bandwidth packets getting queued.
2610 if (!hcd->core_if->dma_enable &&
2611 qh->channel->multi_count > 1)
2613 hcd->core_if->queuing_high_bandwidth = 1;
2616 status = queue_transaction(hcd, qh->channel, tx_status.b.ptxfspcavail);
2623 * In Slave mode, stay on the current transfer until there is
2624 * nothing more to do or the high-bandwidth request count is
2625 * reached. In DMA mode, only need to queue one request. The
2626 * controller automatically handles multiple packets for
2627 * high-bandwidth transfers.
2629 if (hcd->core_if->dma_enable || status == 0 ||
2630 qh->channel->requests == qh->channel->multi_count) {
2631 qh_ptr = qh_ptr->next;
2633 * Move the QH from the periodic assigned schedule to
2634 * the periodic queued schedule.
2636 list_move(&qh->qh_list_entry, &hcd->periodic_sched_queued);
2638 /* done queuing high bandwidth */
2639 hcd->core_if->queuing_high_bandwidth = 0;
2643 if (!hcd->core_if->dma_enable) {
2644 dwc_otg_core_global_regs_t *global_regs;
2645 gintmsk_data_t intr_mask = {.d32 = 0};
2647 global_regs = hcd->core_if->core_global_regs;
2648 intr_mask.b.ptxfempty = 1;
2650 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2651 DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n",
2652 tx_status.b.ptxqspcavail);
2653 DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n",
2654 tx_status.b.ptxfspcavail);
2656 if (!list_empty(&hcd->periodic_sched_assigned) ||
2657 no_queue_space || no_fifo_space) {
2659 * May need to queue more transactions as the request
2660 * queue or Tx FIFO empties. Enable the periodic Tx
2661 * FIFO empty interrupt. (Always use the half-empty
2662 * level to ensure that new requests are loaded as
2663 * soon as possible.)
2665 dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
2668 * Disable the Tx FIFO empty interrupt since there are
2669 * no more transactions that need to be queued right
2670 * now. This function is called from interrupt
2671 * handlers to queue more transactions as transfer
2674 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
2680 * This function processes the currently active host channels and queues
2681 * transactions for these channels to the DWC_otg controller. It is called
2682 * from HCD interrupt handler functions.
2684 * @param hcd The HCD state structure.
2685 * @param tr_type The type(s) of transactions to queue (non-periodic,
2686 * periodic, or both).
2688 void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd,
2689 dwc_otg_transaction_type_e tr_type)
2692 DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
2694 /* Process host channels associated with periodic transfers. */
2695 if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC ||
2696 tr_type == DWC_OTG_TRANSACTION_ALL) &&
2697 !list_empty(&hcd->periodic_sched_assigned)) {
2699 process_periodic_channels(hcd);
2702 /* Process host channels associated with non-periodic transfers. */
2703 if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC ||
2704 tr_type == DWC_OTG_TRANSACTION_ALL) {
2705 if (!list_empty(&hcd->non_periodic_sched_active)) {
2706 process_non_periodic_channels(hcd);
2709 * Ensure NP Tx FIFO empty interrupt is disabled when
2710 * there are no non-periodic transfers to process.
2712 gintmsk_data_t gintmsk = {.d32 = 0};
2713 gintmsk.b.nptxfempty = 1;
2714 dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk,
2721 * Sets the final status of an URB and returns it to the device driver. Any
2722 * required cleanup of the URB is performed.
2724 void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status)
2726 unsigned long flags;
2728 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
2732 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
2733 DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
2734 __func__, urb, usb_pipedevice(urb->pipe),
2735 usb_pipeendpoint(urb->pipe),
2736 usb_pipein(urb->pipe) ? "IN" : "OUT", status);
2737 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2739 for (i = 0; i < urb->number_of_packets; i++) {
2740 DWC_PRINT(" ISO Desc %d status: %d\n",
2741 i, urb->iso_frame_desc[i].status);
2747 //if we use the aligned buffer instead of the original unaligned buffer,
2748 //for IN data, we have to move the data to the original buffer
2749 if((urb->transfer_dma==urb->aligned_transfer_dma)&&((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_IN)){
2750 dma_sync_single_for_device(NULL,urb->transfer_dma,urb->actual_length,DMA_FROM_DEVICE);
2751 memcpy(urb->transfer_buffer,urb->aligned_transfer_buffer,urb->actual_length);
2754 usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(hcd), urb);
2755 urb->status = status;
2757 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
2758 usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status);
2763 * Returns the Queue Head for an URB.
2765 dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb)
2767 struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb);
2768 return (dwc_otg_qh_t *)ep->hcpriv;
2772 void dwc_print_setup_data(uint8_t *setup)
2775 if (CHK_DEBUG_LEVEL(DBG_HCD)){
2776 DWC_PRINT("Setup Data = MSB ");
2777 for (i = 7; i >= 0; i--) DWC_PRINT("%02x ", setup[i]);
2779 DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0] & 0x80) ? "Device-to-Host" : "Host-to-Device");
2780 DWC_PRINT(" bmRequestType Type = ");
2781 switch ((setup[0] & 0x60) >> 5) {
2782 case 0: DWC_PRINT("Standard\n"); break;
2783 case 1: DWC_PRINT("Class\n"); break;
2784 case 2: DWC_PRINT("Vendor\n"); break;
2785 case 3: DWC_PRINT("Reserved\n"); break;
2787 DWC_PRINT(" bmRequestType Recipient = ");
2788 switch (setup[0] & 0x1f) {
2789 case 0: DWC_PRINT("Device\n"); break;
2790 case 1: DWC_PRINT("Interface\n"); break;
2791 case 2: DWC_PRINT("Endpoint\n"); break;
2792 case 3: DWC_PRINT("Other\n"); break;
2793 default: DWC_PRINT("Reserved\n"); break;
2795 DWC_PRINT(" bRequest = 0x%0x\n", setup[1]);
2796 DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2]));
2797 DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4]));
2798 DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6]));
2803 void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd) {
2806 void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd)
2811 gnptxsts_data_t np_tx_status;
2812 hptxsts_data_t p_tx_status;
2814 num_channels = hcd->core_if->core_params->host_channels;
2816 DWC_PRINT("************************************************************\n");
2817 DWC_PRINT("HCD State:\n");
2818 DWC_PRINT(" Num channels: %d\n", num_channels);
2819 for (i = 0; i < num_channels; i++) {
2820 dwc_hc_t *hc = hcd->hc_ptr_array[i];
2821 DWC_PRINT(" Channel %d: %p\n", i, hc);
2822 DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
2823 hc->dev_addr, hc->ep_num, hc->ep_is_in);
2824 DWC_PRINT(" speed: %d\n", hc->speed);
2825 DWC_PRINT(" ep_type: %d\n", hc->ep_type);
2826 DWC_PRINT(" max_packet: %d\n", hc->max_packet);
2827 DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
2828 DWC_PRINT(" multi_count: %d\n", hc->multi_count);
2829 DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
2830 DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
2831 DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
2832 DWC_PRINT(" xfer_count: %d\n", hc->xfer_count);
2833 DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue);
2834 DWC_PRINT(" halt_pending: %d\n", hc->halt_pending);
2835 DWC_PRINT(" halt_status: %d\n", hc->halt_status);
2836 DWC_PRINT(" do_split: %d\n", hc->do_split);
2837 DWC_PRINT(" complete_split: %d\n", hc->complete_split);
2838 DWC_PRINT(" hub_addr: %d\n", hc->hub_addr);
2839 DWC_PRINT(" port_addr: %d\n", hc->port_addr);
2840 DWC_PRINT(" xact_pos: %d\n", hc->xact_pos);
2841 DWC_PRINT(" requests: %d\n", hc->requests);
2842 DWC_PRINT(" qh: %p\n", hc->qh);
2844 DWC_PRINT(" nak_frame: %x\n", hc->qh->nak_frame);
2845 if (hc->xfer_started) {
2847 hcchar_data_t hcchar;
2848 hctsiz_data_t hctsiz;
2850 hcintmsk_data_t hcintmsk;
2851 hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum);
2852 hcchar.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcchar);
2853 hctsiz.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hctsiz);
2854 hcint.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcint);
2855 hcintmsk.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcintmsk);
2856 DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32);
2857 DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32);
2858 DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32);
2859 DWC_PRINT(" hcint: 0x%08x\n", hcint.d32);
2860 DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32);
2862 if (hc->xfer_started && hc->qh && hc->qh->qtd_in_process) {
2865 qtd = hc->qh->qtd_in_process;
2867 DWC_PRINT(" URB Info:\n");
2868 DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb);
2870 DWC_PRINT(" Dev: %d, EP: %d %s\n",
2871 usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe),
2872 usb_pipein(urb->pipe) ? "IN" : "OUT");
2873 DWC_PRINT(" Max packet size: %d\n",
2874 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
2875 DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer);
2876 DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma);
2877 DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length);
2878 DWC_PRINT(" actual_length: %d\n", urb->actual_length);
2882 DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels);
2883 DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels);
2884 DWC_PRINT(" nakking_channels: %d\n", hcd->nakking_channels);
2885 DWC_PRINT(" last_channel_halted: %d\n", hcd->last_channel_halted);
2886 DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs);
2887 np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts);
2888 DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail);
2889 DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail);
2890 p_tx_status.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts);
2891 DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail);
2892 DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
2893 dwc_otg_hcd_dump_frrem(hcd);
2894 dwc_otg_dump_global_registers(hcd->core_if);
2895 dwc_otg_dump_host_registers(hcd->core_if);
2896 DWC_PRINT("************************************************************\n");
2900 #endif /* DWC_DEVICE_ONLY */