2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 //-----------------------------------------------------------------------
19 * Driver for Infineon Amazon DMA
21 //-----------------------------------------------------------------------
22 /* Author: Wu Qi Ming[Qi-Ming.Wu@infineon.com]
23 * Created: 7-April-2004
25 //-----------------------------------------------------------------------
27 * Last changed on: 4-May-2004
28 * Last changed by: <peng.liu@infineon.com>
31 //-----------------------------------------------------------------------
32 /* Last changed on: 03-Dec-2004
33 * Last changed by: peng.liu@infineon.com
34 * Reason: recover from TPE bug
37 //000004:fchang 2005/6/2 Modified by Linpeng as described below
38 //-----------------------------------------------------------------------
39 /* Last changed on: 28-Jan-2004
40 * Last changed by: peng.liu@infineon.com
42 * - handle "out of memory" bug
44 //000003:tc.chen 2005/06/16 fix memory leak when Tx buffer full (heaving traffic).
45 //507261:tc.chen 2005/07/26 re-organize code address map to improve performance.
47 #if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
51 #if defined(MODVERSIONS) && !defined(__GENKSYMS__)
52 #include <linux/modversions.h>
56 #define EXPORT_SYMTAB /* need this one 'cause we export symbols */
61 /* no TX interrupt handling */
63 /* need for DMA workaround */
64 #undef AMAZON_DMA_TPE_AAL5_RECOVERY
66 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
67 #define MAX_SYNC_FAILS 1000000 // 000004:fchang
68 unsigned int dma_sync_fails = 0;
69 unsigned int total_dma_tpe_reset = 0;
70 int (*tpe_reset) (void);
71 int (*tpe_start) (void);
72 int (*tpe_inject) (void);
73 #endif // AMAZON_DMA_TPE_AAL5_RECOVERY
76 #include <linux/version.h>
77 #include <linux/module.h>
78 #include <linux/init.h>
79 #include <linux/sched.h>
80 #include <linux/kernel.h>
81 #include <linux/slab.h>
82 #include <linux/string.h>
83 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/proc_fs.h>
87 #include <linux/stat.h>
89 #include <linux/tty.h>
90 #include <linux/selection.h>
91 #include <linux/kmod.h>
92 #include <linux/vmalloc.h>
93 #include <linux/interrupt.h>
94 #include <linux/delay.h>
95 #include <asm/uaccess.h>
96 #include <linux/errno.h>
99 #include <asm/amazon/amazon.h>
100 #include <asm/amazon/irq.h>
101 #include <asm/amazon/amazon_dma.h>
102 #include "dma-core.h"
104 #define AMAZON_DMA_EMSG(fmt, args...) printk( KERN_ERR "%s: " fmt,__FUNCTION__, ## args)
106 static irqreturn_t dma_interrupt(int irq, void *dev_id);
107 extern void mask_and_ack_amazon_irq(unsigned int irq_nr);
109 /***************************************** global data *******************************************/
111 dev_list *g_current_dev = NULL;
112 dev_list *g_head_dev = NULL;
113 dev_list *g_tail_dev = NULL;
114 channel_info g_log_chan[CHAN_TOTAL_NUM + 1];
115 struct proc_dir_entry *g_amazon_dma_dir;
116 static u8 rx_chan_list_len = 0;
117 static u8 tx_chan_list_len = 0;
118 static int rx_chan_list[RX_CHAN_NUM + 1];
119 static int tx_chan_list[TX_CHAN_NUM + 1];
120 static u32 comb_isr_mask[CHAN_TOTAL_NUM];
122 static inline int is_rx_chan(int chan_no)
123 /*judge if this is an rx channel*/
126 if (chan_no < RX_CHAN_NUM)
131 /* Ugly, Channel ON register is badly mapped to channel no. */
132 static u8 ch_on_mapping[CHAN_TOTAL_NUM] =
133 { 0, 1, 2, 3, 6, 7, 10, 4, 5, 8, 9, 11 };
135 /* Brief: check wether the chan_no is legal
136 * Parameter: chan_no: logical channel number
137 * Return: 0 if is not valid
140 static inline int is_valid_dma_ch(int chan_no)
142 return ((chan_no >= 0) && (chan_no < CHAN_TOTAL_NUM));
145 /* Brief: check whether a channel is open through Channel ON register
146 * Parameter: chan_no: logical channel number
147 * Return: 1 channel is open
149 * EINVAL: invalid parameter
151 static inline int is_channel_open(int chan_no)
153 return (AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) &
154 (1 << ch_on_mapping[chan_no]));
157 /* Brief: add a list entry
159 * always add to the tail and no redundancy allowed. (i.e. entries are unique)
161 * <0 : not deleted (due to not unique)
163 static inline int _add_list_entry(int *list, int size_of_list, int entry)
166 for (i = 0; i < size_of_list; i++) {
167 if (list[i] == entry)
177 /* Brief: delete a list entry
179 * find the entry and remove it. shift all entries behind it one step forward if necessary\
182 * <0 : not deleted (due to not found?)
184 static inline int _delete_list_entry(int *list, int size_of_list,
188 for (i = 0; i < size_of_list; i++) {
189 if (list[i] == entry) {
190 for (j = i; j < size_of_list; j++) {
191 list[j] = list[j + 1];
192 if (list[j + 1] < 0) {
202 /* Brief: enable a channel through Channel ON register
203 * Parameter: chan_no: logical channel number
205 * Please don't open a channel without a valid descriptor (hardware pitfall)
207 static inline void open_channel(int chan_no)
209 AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) |= (1 << ch_on_mapping[chan_no]);
210 if (is_rx_chan(chan_no)) {
211 if (_add_list_entry(rx_chan_list, RX_CHAN_NUM, chan_no) == 0) {
214 AMAZON_DMA_DMSG("cannot add chan %d to open list\n", chan_no);
217 if (_add_list_entry(tx_chan_list, TX_CHAN_NUM, chan_no) == 0) {
220 AMAZON_DMA_DMSG("cannot add chan %d to open list\n", chan_no);
225 /* Brief: disable a channel through Channel ON register
226 * Parameter: chan_no: logical channel number
229 static inline void close_channel(int chan_no)
231 AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) &= ~(1 << ch_on_mapping[chan_no]);
232 if (is_rx_chan(chan_no)) {
233 if (_delete_list_entry(rx_chan_list, RX_CHAN_NUM, chan_no) == 0) {
236 AMAZON_DMA_DMSG("cannot remove chan %d from open list \n",
240 if (_delete_list_entry(tx_chan_list, TX_CHAN_NUM, chan_no) == 0) {
243 AMAZON_DMA_DMSG("cannot remove chan %d from open list \n",
249 /* Brief: clear RX interrupt
251 inline void rx_chan_clear_isr(int chan_no)
253 #ifdef DMA_NO_POLLING
254 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR + chan_no * AMAZON_DMA_CH_STEP) =
256 (AMAZON_DMA_CH0_ISR +
258 AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT | DMA_ISR_EOP | DMA_ISR_CMDCPT
261 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR + chan_no * AMAZON_DMA_CH_STEP) =
263 (AMAZON_DMA_CH0_ISR +
265 AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT | DMA_ISR_EOP |
270 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
271 /* Brief: hacking function, this will reset all descriptors back to DMA
273 static void dma_reset_all_descriptors(int chan_no)
275 volatile struct rx_desc *rx_desc_p = NULL;
278 (struct rx_desc *) g_desc_list +
279 g_log_chan[chan_no].offset_from_base;
280 for (i = 0; i < g_log_chan[chan_no].desc_len; i++) {
281 rx_desc_p->status.word &=
282 (~(DMA_DESC_SOP_SET | DMA_DESC_EOP_SET | DMA_DESC_CPT_SET));
283 rx_desc_p->status.word |=
284 (DMA_DESC_OWN_DMA | g_log_chan[chan_no].packet_size);
289 /* Brief: Reset DMA descriptors
291 static void amazon_dma_reset_tpe_rx(int chan_no)
293 struct tx_desc *tx_desc_p = NULL;
296 // wait until all TX channels stop transmitting
297 for (j = 9; j <= 10; j++) {
299 (struct tx_desc *) g_desc_list +
300 g_log_chan[j].offset_from_base;
301 for (i = 0; i < g_log_chan[j].desc_len; i++) {
302 while ((tx_desc_p->status.field.OWN != CPU_OWN)) {
303 AMAZON_DMA_DMSG("DMA TX in progress\n"); // 000004:fchang
311 total_dma_tpe_reset++;
313 ("\n===============resetting TPE========================== \n");
314 if ((*tpe_reset) ()) {
315 panic("cannot reset TPE engien\n"); // 000004:fchang
318 panic("no tpe_reset function\n"); // 000004:fchang
321 dma_reset_all_descriptors(chan_no);
322 rx_chan_clear_isr(chan_no);
327 if ((*tpe_inject) ()) {
328 panic("cannot inject a cell\n"); // 000004:fchang
331 AMAZON_DMA_EMSG("no tpe_inject function\n");
337 (AMAZON_DMA_CH0_ISR +
338 chan_no * AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT)) {
339 rx_chan_clear_isr(chan_no);
341 dma_reset_all_descriptors(chan_no);
342 if (g_log_chan[chan_no].current_desc ==
343 (g_log_chan[chan_no].desc_len - 1)) {
344 g_log_chan[chan_no].current_desc = 0;
346 g_log_chan[chan_no].current_desc++;
354 AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) &= ~(1 << ch_on_mapping[chan_no]);
355 while (AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) &
356 (1 << ch_on_mapping[chan_no])) {
357 printk("TPE channel still on\n");
361 // AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = (1<<chan_no);
363 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + chan_no * AMAZON_DMA_CH_STEP) =
366 rx_chan_clear_isr(chan_no);
367 dma_reset_all_descriptors(chan_no);
369 AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) |= (1 << ch_on_mapping[chan_no]);
370 // g_log_chan[chan_no].current_desc=0;
377 AMAZON_DMA_EMSG("cannot restart TPE engien\n");
380 #endif // AMAZON_DMA_TPE_AAL5_RECOVERY
383 /* Brief: RX channel interrupt handler
384 * Parameter: RX channel no
385 * Description: the interrupt handler for each RX channel
386 * 1. check descriptor, clear ISR if no incoming packet
387 * 2. inform upper layer to receive packet (and update descriptors)
389 inline void rx_chan_intr_handler(int chan_no)
391 volatile struct rx_desc *rx_desc_p = NULL;
393 /* fetch the current descriptor */
395 (struct rx_desc *) g_desc_list +
396 g_log_chan[chan_no].offset_from_base +
397 g_log_chan[chan_no].current_desc;
399 g_log_chan[chan_no].dma_dev->current_rx_chan =
400 chan_no - g_log_chan[chan_no].dma_dev->logic_rx_chan_base;
402 // workaround for DMA pitfall: complete bit set happends before the
403 // other two bits (own,eop) are ready
404 if ((rx_desc_p->status.field.EoP != 1)
405 || (rx_desc_p->status.field.OWN != CPU_OWN)
406 || (rx_desc_p->status.field.data_length ==
407 g_log_chan[chan_no].packet_size)) {
408 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
409 if (chan_no == 4 || chan_no == 5) {
411 if (dma_sync_fails > MAX_SYNC_FAILS) {
414 (struct rx_desc *) g_desc_list +
415 g_log_chan[chan_no].offset_from_base;
417 (struct rx_desc *) g_desc_list +
418 g_log_chan[chan_no].offset_from_base + 1;
419 if ((rx_desc_p0->status.field.OWN == CPU_OWN
420 && rx_desc_p0->status.field.EoP != 1)
421 && (rx_desc_p1->status.field.OWN == CPU_OWN
422 && rx_desc_p1->status.field.EoP != 1)) {
423 amazon_dma_reset_tpe_rx(chan_no);
428 AMAZON_DMA_DMSG("too many times ch:%d\n", chan_no); // 000004:fchang
431 udelay(10); // 000004:fchang
433 #endif // //AMAZON_DMA_TPE_AAL5_RECOVERY
437 /* inform the upper layer to receive the packet */
438 g_log_chan[chan_no].intr_handler(g_log_chan[chan_no].dma_dev, RCV_INT);
439 /* check the next descriptor, if still contains the incoming packet,
440 then do not clear the interrupt status */
442 (struct rx_desc *) g_desc_list +
443 g_log_chan[chan_no].offset_from_base +
444 g_log_chan[chan_no].current_desc;
446 ((rx_desc_p->status.field.OWN == CPU_OWN)
447 && (rx_desc_p->status.field.C == 1))) {
448 rx_chan_clear_isr(chan_no);
453 /* Brief: TX channel interrupt handler
454 * Parameter: TX channel no
455 * Description: the interrupt handler for each TX channel
456 * 1. check all the descripters,if any of them had transmitted a packet, then free buffer
457 * because we cannot garantee the which one has already transmitted out, we have to go through all the descriptors here
458 * 2. clear the interrupt status bit
460 inline void tx_chan_intr_handler(int chan_no)
462 struct tx_desc *tx_desc_p = NULL;
466 (struct tx_desc *) g_desc_list +
467 g_log_chan[chan_no].offset_from_base;
469 for (i = 0; i < g_log_chan[chan_no].desc_len; i++) {
470 if ((tx_desc_p->status.field.OWN == CPU_OWN)
471 && (tx_desc_p->status.field.C == 1)) {
472 /* if already transmitted, then free the buffer */
474 buffer_free((u8 *) __va(tx_desc_p->Data_Pointer),
475 g_log_chan[chan_no].opt[i]);
476 tx_desc_p->status.field.C = 0;
477 /* inform the upper layer about the completion of the
478 transmitted packet, the upper layer may want to free the
480 g_log_chan[chan_no].intr_handler(g_log_chan[chan_no].dma_dev,
486 /* after all these operations, clear the interrupt status bit */
487 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR + chan_no * AMAZON_DMA_CH_STEP) =
489 (AMAZON_DMA_CH0_ISR +
491 AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT | DMA_ISR_EOP |
495 /* Brief: DMA interrupt handler
497 static irqreturn_t dma_interrupt(int irq, void *dev_id)
502 #ifdef NO_TX_INT // 000004:fchang
503 static int cnt = 0; // 000004:fchang
504 #endif // 000004:fchang
506 AMAZON_DMA_REG32(AMAZON_DMA_COMB_ISR)) & (COMB_ISR_RX_MASK |
508 if (isr & COMB_ISR_RX_MASK) {
509 // RX Channels: start WFQ algorithm
510 chan_no = CHAN_TOTAL_NUM;
511 for (i = 0; i < RX_CHAN_NUM; i++) {
512 if ((isr & (comb_isr_mask[i]))
513 && (g_log_chan[i].weight > 0)) {
514 if (g_log_chan[chan_no].weight < g_log_chan[i].weight) {
519 if (chan_no < CHAN_TOTAL_NUM) {
520 rx_chan_intr_handler(chan_no);
522 for (i = 0; i < RX_CHAN_NUM; i++) {
523 g_log_chan[i].weight = g_log_chan[i].default_weight;
531 for (i = 0; i < tx_chan_list_len; i++) {
533 (AMAZON_DMA_CH0_ISR +
535 AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT | DMA_ISR_EOP)) {
536 tx_chan_intr_handler(tx_chan_list[i]);
541 if (isr & COMB_ISR_TX_MASK) {
543 for (i = 0; i < tx_chan_list_len; i++) {
544 if (isr & (comb_isr_mask[tx_chan_list[i]])) {
545 tx_chan_intr_handler(tx_chan_list[i]);
555 /* Brief: read a packet from DMA RX channel
557 * Return: packet length
559 * This is called back in a context of DMA interrupt
560 * 1. prepare new descriptor
562 * 3. update WFQ weight
564 //507261:tc.chen int dma_device_read(struct dma_device_info* dma_dev, u8** dataptr, void** opt)
565 int asmlinkage dma_device_read(struct dma_device_info *dma_dev,
566 u8 ** dataptr, void **opt)
573 struct rx_desc *rx_desc_p;
577 chan_no = dma_dev->logic_rx_chan_base + dma_dev->current_rx_chan;
578 current_desc = g_log_chan[chan_no].current_desc;
580 (struct rx_desc *) (g_desc_list +
581 g_log_chan[chan_no].offset_from_base +
583 buf = (u8 *) __va(rx_desc_p->Data_Pointer); /* extract the virtual
586 len = rx_desc_p->status.field.data_length; /* extract the data length */
587 #ifndef CONFIG_MIPS_UNCACHED
588 dma_cache_inv((unsigned long) buf, len);
589 #endif // CONFIG_MIPS_UNCACHED
590 *(u32 *) dataptr = (u32) buf;
592 *(int *) opt = (int) g_log_chan[chan_no].opt[current_desc]; /* read
600 (u8 *) g_log_chan[chan_no].buffer_alloc(g_log_chan[chan_no].
601 packet_size, &byte_offset,
603 // should check null!!!!
604 if (buf == NULL || p == NULL) {
605 *(u32 *) dataptr = 0;
609 g_log_chan[chan_no].opt[current_desc] = p;
610 /* reduce the weight for WFQ algorithm */
611 g_log_chan[chan_no].weight -= len;
612 rx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) buf);
614 if (current_desc == g_log_chan[chan_no].desc_len - 1) {
619 g_log_chan[chan_no].current_desc = current_desc;
621 rx_desc_p->status.word = DMA_DESC_OWN_DMA
622 | (byte_offset << DMA_DESC_BYTEOFF_SHIFT)
623 | g_log_chan[chan_no].packet_size;
627 /* Brief: write a packet through DMA RX channel to peripheral
629 * Return: packet length
634 //507261:tc.chen int dma_device_write(struct dma_device_info* dma_dev, u8* dataptr, int len,void* opt)
635 int asmlinkage dma_device_write(struct dma_device_info *dma_dev,
636 u8 * dataptr, int len, void *opt)
639 struct tx_desc *tx_desc_p;
643 static int cnt = 0; // 000004:fchang
646 local_irq_save(flag);
648 chan_no = dma_dev->logic_tx_chan_base + dma_dev->current_tx_chan;
649 current_desc = g_log_chan[chan_no].current_desc;
651 (struct tx_desc *) (g_desc_list +
652 g_log_chan[chan_no].offset_from_base +
654 // 000003:tc.chen if(tx_desc_p->status.field.OWN==DMA_OWN){
655 if (tx_desc_p->status.field.OWN == DMA_OWN || tx_desc_p->status.field.C == 1) { // 000003:tc.chen
656 AMAZON_DMA_DMSG("no TX desc for CPU, drop packet\n");
658 g_log_chan[chan_no].intr_handler(dma_dev, TX_BUF_FULL_INT);
659 local_irq_restore(flag);
662 g_log_chan[chan_no].opt[current_desc] = opt;
664 /* byte offset----to adjust the starting address of the data buffer,
665 should be multiple of the burst length. */
667 ((u32) CPHYSADDR((u32) dataptr)) % (g_log_chan[chan_no].burst_len *
669 #ifndef CONFIG_MIPS_UNCACHED
670 dma_cache_wback((unsigned long) dataptr, len);
672 #endif // CONFIG_MIPS_UNCACHED
674 tx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) dataptr) - byte_offset;
676 tx_desc_p->status.word = DMA_DESC_OWN_DMA
678 | DMA_DESC_EOP_SET | (byte_offset << DMA_DESC_BYTEOFF_SHIFT)
681 if (is_channel_open(chan_no) == 0) {
682 // turn on if necessary
683 open_channel(chan_no);
685 #ifdef DMA_NO_POLLING
686 if ((AMAZON_DMA_REG32
687 (AMAZON_DMA_CH0_ISR +
688 chan_no * AMAZON_DMA_CH_STEP) & (DMA_ISR_DURR | DMA_ISR_CPT)) ==
690 // clear DURR if (CPT is AND set and DURR is set)
691 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR +
692 chan_no * AMAZON_DMA_CH_STEP) = DMA_ISR_DURR;
696 if (current_desc == (g_log_chan[chan_no].desc_len - 1)) {
703 g_log_chan[chan_no].current_desc = current_desc;
705 (struct tx_desc *) (g_desc_list +
706 g_log_chan[chan_no].offset_from_base +
708 // 000003:tc.chen if(tx_desc_p->status.field.OWN==DMA_OWN){
709 if (tx_desc_p->status.field.OWN == DMA_OWN || tx_desc_p->status.field.C == 1) { // 000003:tc.chen
710 g_log_chan[chan_no].intr_handler(dma_dev, TX_BUF_FULL_INT);
713 //000004:fchang Start
717 tx_chan_intr_handler(chan_no);
721 local_irq_restore(flag); // 000004:fchang
727 int desc_list_proc_read(char *buf, char **start, off_t offset,
728 int count, int *eof, void *data)
731 u32 *p = (u32 *) g_desc_list;
733 len += sprintf(buf + len, "descriptor list:\n");
734 for (i = 0; i < 120; i++) {
735 len += sprintf(buf + len, "%d\n", i);
736 len += sprintf(buf + len, "%08x\n", *(p + i * 2 + 1));
737 len += sprintf(buf + len, "%08x\n", *(p + i * 2));
745 int channel_weight_proc_read(char *buf, char **start, off_t offset,
746 int count, int *eof, void *data)
751 len += sprintf(buf + len, "Qos dma channel weight list\n");
754 "channel_num default_weight current_weight device Tx/Rx\n");
757 " 0 %08x %08x Switch Rx0\n",
758 g_log_chan[0].default_weight, g_log_chan[0].weight);
761 " 1 %08x %08x Switch Rx1\n",
762 g_log_chan[1].default_weight, g_log_chan[1].weight);
765 " 2 %08x %08x Switch Rx2\n",
766 g_log_chan[2].default_weight, g_log_chan[2].weight);
769 " 3 %08x %08x Switch Rx3\n",
770 g_log_chan[3].default_weight, g_log_chan[3].weight);
773 " 4 %08x %08x Switch Tx0\n",
774 g_log_chan[4].default_weight, g_log_chan[4].weight);
777 " 5 %08x %08x Switch Tx1\n",
778 g_log_chan[5].default_weight, g_log_chan[5].weight);
780 len+=sprintf(buf+len," 6 %08x %08x TPE
781 Rx0\n",g_log_chan[6].default_weight, g_log_chan[6].weight);
782 len+=sprintf(buf+len," 7 %08x %08x TPE
783 Rx0\n",g_log_chan[7].default_weight, g_log_chan[7].weight);
784 len+=sprintf(buf+len," 8 %08x %08x TPE
785 Tx0\n",g_log_chan[8].default_weight, g_log_chan[8].weight);
786 len+=sprintf(buf+len," 9 %08x %08x TPE
787 Rx0\n",g_log_chan[9].default_weight, g_log_chan[9].weight);
788 len+=sprintf(buf+len," 10 %08x %08x DPLUS
789 Rx0\n",g_log_chan[10].default_weight, g_log_chan[10].weight);
790 len+=sprintf(buf+len," 11 %08x %08x DPLUS
791 Rx0\n",g_log_chan[11].default_weight, g_log_chan[11].weight); */
795 int dma_register_proc_read(char *buf, char **start, off_t offset,
796 int count, int *eof, void *data)
801 len += sprintf(buf + len, "amazon dma driver\n");
802 len += sprintf(buf + len, "version 1.0\n");
803 len += sprintf(buf + len, "devices registered:\n");
804 for (temp_dev = g_head_dev; temp_dev; temp_dev = temp_dev->next) {
805 len += sprintf(buf + len, "%s ", temp_dev->dev->device_name);
807 len += sprintf(buf + len, "\n");
808 len += sprintf(buf + len, "CH_ON=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH_ON));
809 len += sprintf(buf + len, "CH_RST=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH_RST));
810 len += sprintf(buf + len, "CH0_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR));
811 len += sprintf(buf + len, "CH1_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH1_ISR));
812 len += sprintf(buf + len, "CH2_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH2_ISR));
813 len += sprintf(buf + len, "CH3_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH3_ISR));
814 len += sprintf(buf + len, "CH4_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH4_ISR));
815 len += sprintf(buf + len, "CH5_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH5_ISR));
816 len += sprintf(buf + len, "CH6_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH6_ISR));
817 len += sprintf(buf + len, "CH7_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH7_ISR));
818 len += sprintf(buf + len, "CH8_ISR=%08x\n",
819 AMAZON_DMA_REG32(AMAZON_DMA_CH8_ISR));
821 sprintf(buf + len, "CH9_ISR=%08x\n",
822 AMAZON_DMA_REG32(AMAZON_DMA_CH9_ISR));
824 sprintf(buf + len, "CH10_ISR=%08x\n",
825 AMAZON_DMA_REG32(AMAZON_DMA_CH10_ISR));
827 sprintf(buf + len, "CH11_ISR=%08x\n",
828 AMAZON_DMA_REG32(AMAZON_DMA_CH11_ISR));
830 sprintf(buf + len, "LCH0_MSK=%08x\n",
831 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK));
833 sprintf(buf + len, "LCH1_MSK=%08x\n",
834 AMAZON_DMA_REG32(AMAZON_DMA_CH1_MSK));
836 sprintf(buf + len, "LCH2_MSK=%08x\n",
837 AMAZON_DMA_REG32(AMAZON_DMA_CH2_MSK));
839 sprintf(buf + len, "LCH3_MSK=%08x\n",
840 AMAZON_DMA_REG32(AMAZON_DMA_CH3_MSK));
842 sprintf(buf + len, "LCH4_MSK=%08x\n",
843 AMAZON_DMA_REG32(AMAZON_DMA_CH4_MSK));
845 sprintf(buf + len, "LCH5_MSK=%08x\n",
846 AMAZON_DMA_REG32(AMAZON_DMA_CH5_MSK));
848 sprintf(buf + len, "LCH6_MSK=%08x\n",
849 AMAZON_DMA_REG32(AMAZON_DMA_CH6_MSK));
851 sprintf(buf + len, "LCH7_MSK=%08x\n",
852 AMAZON_DMA_REG32(AMAZON_DMA_CH7_MSK));
854 sprintf(buf + len, "LCH8_MSK=%08x\n",
855 AMAZON_DMA_REG32(AMAZON_DMA_CH8_MSK));
857 sprintf(buf + len, "LCH9_MSK=%08x\n",
858 AMAZON_DMA_REG32(AMAZON_DMA_CH9_MSK));
860 sprintf(buf + len, "LCH10_MSK=%08x\n",
861 AMAZON_DMA_REG32(AMAZON_DMA_CH10_MSK));
863 sprintf(buf + len, "LCH11_MSK=%08x\n",
864 AMAZON_DMA_REG32(AMAZON_DMA_CH11_MSK));
866 sprintf(buf + len, "Desc_BA=%08x\n",
867 AMAZON_DMA_REG32(AMAZON_DMA_Desc_BA));
869 sprintf(buf + len, "LCH0_DES_LEN=%08x\n",
870 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN));
872 sprintf(buf + len, "LCH1_DES_LEN=%08x\n",
873 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_LEN));
875 sprintf(buf + len, "LCH2_DES_LEN=%08x\n",
876 AMAZON_DMA_REG32(AMAZON_DMA_CH2_DES_LEN));
878 sprintf(buf + len, "LCH3_DES_LEN=%08x\n",
879 AMAZON_DMA_REG32(AMAZON_DMA_CH3_DES_LEN));
881 sprintf(buf + len, "LCH4_DES_LEN=%08x\n",
882 AMAZON_DMA_REG32(AMAZON_DMA_CH4_DES_LEN));
884 sprintf(buf + len, "LCH5_DES_LEN=%08x\n",
885 AMAZON_DMA_REG32(AMAZON_DMA_CH5_DES_LEN));
887 sprintf(buf + len, "LCH6_DES_LEN=%08x\n",
888 AMAZON_DMA_REG32(AMAZON_DMA_CH6_DES_LEN));
890 sprintf(buf + len, "LCH7_DES_LEN=%08x\n",
891 AMAZON_DMA_REG32(AMAZON_DMA_CH7_DES_LEN));
893 sprintf(buf + len, "LCH8_DES_LEN=%08x\n",
894 AMAZON_DMA_REG32(AMAZON_DMA_CH8_DES_LEN));
896 sprintf(buf + len, "LCH9_DES_LEN=%08x\n",
897 AMAZON_DMA_REG32(AMAZON_DMA_CH9_DES_LEN));
899 sprintf(buf + len, "LCH10_DES_LEN=%08x\n",
900 AMAZON_DMA_REG32(AMAZON_DMA_CH10_DES_LEN));
902 sprintf(buf + len, "LCH11_DES_LEN=%08x\n",
903 AMAZON_DMA_REG32(AMAZON_DMA_CH11_DES_LEN));
905 sprintf(buf + len, "LCH1_DES_OFST=%08x\n",
906 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_OFST));
908 sprintf(buf + len, "LCH2_DES_OFST=%08x\n",
909 AMAZON_DMA_REG32(AMAZON_DMA_CH2_DES_OFST));
911 sprintf(buf + len, "LCH3_DES_OFST=%08x\n",
912 AMAZON_DMA_REG32(AMAZON_DMA_CH3_DES_OFST));
914 sprintf(buf + len, "LCH4_DES_OFST=%08x\n",
915 AMAZON_DMA_REG32(AMAZON_DMA_CH4_DES_OFST));
917 sprintf(buf + len, "LCH5_DES_OFST=%08x\n",
918 AMAZON_DMA_REG32(AMAZON_DMA_CH5_DES_OFST));
920 sprintf(buf + len, "LCH6_DES_OFST=%08x\n",
921 AMAZON_DMA_REG32(AMAZON_DMA_CH6_DES_OFST));
923 sprintf(buf + len, "LCH7_DES_OFST=%08x\n",
924 AMAZON_DMA_REG32(AMAZON_DMA_CH7_DES_OFST));
926 sprintf(buf + len, "LCH8_DES_OFST=%08x\n",
927 AMAZON_DMA_REG32(AMAZON_DMA_CH8_DES_OFST));
929 sprintf(buf + len, "LCH9_DES_OFST=%08x\n",
930 AMAZON_DMA_REG32(AMAZON_DMA_CH9_DES_OFST));
932 sprintf(buf + len, "LCH10_DES_OFST=%08x\n",
933 AMAZON_DMA_REG32(AMAZON_DMA_CH10_DES_OFST));
935 sprintf(buf + len, "LCH11_DES_OFST=%08x\n",
936 AMAZON_DMA_REG32(AMAZON_DMA_CH11_DES_OFST));
938 sprintf(buf + len, "AMAZON_DMA_SW_BL=%08x\n",
939 AMAZON_DMA_REG32(AMAZON_DMA_SW_BL));
941 sprintf(buf + len, "AMAZON_DMA_TPE_BL=%08x\n",
942 AMAZON_DMA_REG32(AMAZON_DMA_TPE_BL));
944 sprintf(buf + len, "DPlus2FPI_BL=%08x\n",
945 AMAZON_DMA_REG32(AMAZON_DMA_DPlus2FPI_BL));
947 sprintf(buf + len, "GRX_BUF_LEN=%08x\n",
948 AMAZON_DMA_REG32(AMAZON_DMA_GRX_BUF_LEN));
950 sprintf(buf + len, "DMA_ECON_REG=%08x\n",
951 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG));
953 sprintf(buf + len, "POLLING_REG=%08x\n",
954 AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG));
956 sprintf(buf + len, "CH_WGT=%08x\n",
957 AMAZON_DMA_REG32(AMAZON_DMA_CH_WGT));
959 sprintf(buf + len, "TX_WGT=%08x\n",
960 AMAZON_DMA_REG32(AMAZON_DMA_TX_WGT));
962 sprintf(buf + len, "DPlus2FPI_CLASS=%08x\n",
963 AMAZON_DMA_REG32(AMAZON_DMA_DPLus2FPI_CLASS));
965 sprintf(buf + len, "COMB_ISR=%08x\n",
966 AMAZON_DMA_REG32(AMAZON_DMA_COMB_ISR));
967 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
968 len += sprintf(buf + len, "TPE fails:%u\n", total_dma_tpe_reset); // 000004:fchang
973 /* Brief: initialize DMA registers
976 static void dma_chip_init(void)
979 for (i = 0; i < CHAN_TOTAL_NUM; i++) {
980 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_OFST +
981 i * AMAZON_DMA_CH_STEP) = DEFAULT_OFFSET;
983 #ifdef DMA_NO_POLLING
984 AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG) = 0;
986 // enable poll mode and set polling counter
987 AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG) = DMA_POLLING_CNT | DMA_POLLING_ENABLE;
989 // to enable DMA drop
990 AMAZON_DMA_REG32(AMAZON_DMA_GRX_BUF_LEN) = 0x10000;
993 int insert_dev_list(dev_list * dev)
996 if (g_head_dev == NULL) {
1002 for (temp_dev = g_head_dev; temp_dev; temp_dev = temp_dev->next) {
1003 if (temp_dev->weight < dev->weight) {
1005 temp_dev->prev->next = dev;
1007 dev->prev = temp_dev->prev;
1008 dev->next = temp_dev;
1009 temp_dev->prev = dev;
1010 if (temp_dev == g_head_dev)
1017 g_tail_dev->next = dev;
1018 dev->prev = g_tail_dev;
1028 u8 *common_buffer_alloc(int len, int *byte_offset, void **opt)
1030 u8 *buffer = (u8 *) kmalloc(len * sizeof(u8), GFP_KERNEL);
1036 int common_buffer_free(u8 * dataptr, void *opt)
1044 int register_dev(struct dma_device_info *dma_dev)
1050 int byte_offset = 0;
1052 struct rx_desc *rx_desc_p;
1053 struct tx_desc *tx_desc_p;
1054 if (strcmp(dma_dev->device_name, "switch1") == 0) {
1055 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = SWITCH1_RST_MASK; // resest
1058 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG) |= 0x3; // endian
1061 burst_reg = AMAZON_DMA_SW_BL;
1062 dma_dev->logic_rx_chan_base = switch_rx_chan_base;
1063 dma_dev->logic_tx_chan_base = switch_tx_chan_base;
1066 else if (strcmp(dma_dev->device_name, "switch2") == 0) {
1067 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = SWITCH2_RST_MASK; // resest
1070 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG) |= 0x3; // endian
1073 burst_reg = AMAZON_DMA_SW_BL;
1074 dma_dev->logic_rx_chan_base = switch2_rx_chan_base;
1075 dma_dev->logic_tx_chan_base = switch2_tx_chan_base;
1077 } else if (strcmp(dma_dev->device_name, "TPE") == 0) {
1078 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = TPE_RST_MASK; // resest
1081 burst_reg = AMAZON_DMA_TPE_BL;
1082 dma_dev->logic_rx_chan_base = TPE_rx_chan_base;
1083 dma_dev->logic_tx_chan_base = TPE_tx_chan_base;
1086 else if (strcmp(dma_dev->device_name, "DPlus") == 0) {
1087 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = DPlus2FPI_RST_MASK; // resest
1090 dma_dev->logic_rx_chan_base = DPLus2FPI_rx_chan_base;
1091 dma_dev->logic_tx_chan_base = DPLus2FPI_tx_chan_base;
1096 for (temp = dma_dev->tx_burst_len; temp > 2; temp /= 2) {
1101 AMAZON_DMA_REG32(burst_reg) = i << 1;
1103 for (temp = dma_dev->rx_burst_len; temp > 2; temp /= 2) {
1106 AMAZON_DMA_REG32(burst_reg) += i;
1108 for (i = 0; i < dma_dev->num_rx_chan; i++) {
1110 temp = dma_dev->logic_rx_chan_base + i;
1111 g_log_chan[temp].dma_dev = dma_dev;
1112 g_log_chan[temp].weight = dma_dev->rx_chan[i].weight;
1113 g_log_chan[temp].default_weight = dma_dev->rx_chan[i].weight;
1114 g_log_chan[temp].current_desc = 0;
1115 g_log_chan[temp].desc_ofst = DEFAULT_OFFSET;
1116 g_log_chan[temp].desc_len = dma_dev->rx_chan[i].desc_num;
1117 g_log_chan[temp].offset_from_base = temp * DEFAULT_OFFSET;
1118 g_log_chan[temp].packet_size = dma_dev->rx_chan[i].packet_size;
1120 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN + temp * AMAZON_DMA_CH_STEP) = dma_dev->rx_chan[i].desc_num;
1121 // enable interrupt mask
1122 if (temp == 4 || temp == 5) {
1123 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + temp * AMAZON_DMA_CH_STEP) = 0x32;
1125 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + temp * AMAZON_DMA_CH_STEP) = 0x36;
1127 strcpy(g_log_chan[temp].device_name, dma_dev->device_name);
1128 g_log_chan[temp].burst_len = dma_dev->rx_burst_len;
1129 g_log_chan[temp].control = dma_dev->rx_chan[i].control;
1132 /* specify the buffer allocation and free method */
1133 if (dma_dev->buffer_alloc)
1134 g_log_chan[temp].buffer_alloc = dma_dev->buffer_alloc;
1136 g_log_chan[temp].buffer_alloc = common_buffer_alloc;
1138 if (dma_dev->buffer_free)
1139 g_log_chan[temp].buffer_free = dma_dev->buffer_free;
1141 g_log_chan[temp].buffer_free = common_buffer_free;
1143 if (dma_dev->intr_handler)
1144 g_log_chan[temp].intr_handler = dma_dev->intr_handler;
1146 g_log_chan[temp].intr_handler = NULL;
1148 for (j = 0; j < g_log_chan[temp].desc_len; j++) {
1149 rx_desc_p = (struct rx_desc *) (g_desc_list + g_log_chan[temp].offset_from_base + j);
1150 rx_desc_p->status.word = 0;
1151 rx_desc_p->status.field.data_length = g_log_chan[temp].packet_size;
1152 buffer = (u8 *) g_log_chan[temp].buffer_alloc(g_log_chan[temp].packet_size, &byte_offset, &p);
1153 rx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) buffer);
1154 rx_desc_p->status.field.byte_offset = byte_offset;
1155 /* fix me, should check if the addresss comply with the burst
1156 lenght requirment */
1157 g_log_chan[temp].opt[j] = p;
1158 rx_desc_p->status.field.OWN = DMA_OWN;
1161 /* open or close the channel */
1162 if (g_log_chan[temp].control)
1165 close_channel(temp);
1168 for (i = 0; i < dma_dev->num_tx_chan; i++) {
1169 temp = dma_dev->logic_tx_chan_base + i;
1170 g_log_chan[temp].dma_dev = dma_dev;
1171 g_log_chan[temp].weight = dma_dev->tx_chan[i].weight;
1172 g_log_chan[temp].default_weight = dma_dev->tx_chan[i].weight;
1173 g_log_chan[temp].current_desc = 0;
1174 g_log_chan[temp].desc_ofst = DEFAULT_OFFSET;
1175 g_log_chan[temp].desc_len = dma_dev->tx_chan[i].desc_num;
1176 g_log_chan[temp].offset_from_base = temp * DEFAULT_OFFSET;
1177 g_log_chan[temp].packet_size = dma_dev->tx_chan[i].packet_size;
1179 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN + temp * AMAZON_DMA_CH_STEP) = dma_dev->tx_chan[i].desc_num;
1180 // enable interrupt mask
1182 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + temp * AMAZON_DMA_CH_STEP) = 0x3e;
1184 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + temp * AMAZON_DMA_CH_STEP) = 0x36;
1187 strcpy(g_log_chan[temp].device_name, dma_dev->device_name);
1188 g_log_chan[temp].burst_len = dma_dev->tx_burst_len;
1189 g_log_chan[temp].control = dma_dev->tx_chan[i].control;
1191 if (dma_dev->buffer_alloc)
1192 g_log_chan[temp].buffer_alloc = dma_dev->buffer_alloc;
1194 g_log_chan[temp].buffer_alloc = common_buffer_alloc;
1196 if (dma_dev->buffer_free)
1197 g_log_chan[temp].buffer_free = dma_dev->buffer_free;
1199 g_log_chan[temp].buffer_free = common_buffer_free;
1201 if (dma_dev->intr_handler)
1202 g_log_chan[temp].intr_handler = dma_dev->intr_handler;
1204 g_log_chan[temp].intr_handler = NULL;
1206 for (j = 0; j < g_log_chan[temp].desc_len; j++) {
1209 (struct tx_desc *) (g_desc_list +
1210 g_log_chan[temp].offset_from_base + j);
1211 tx_desc_p->status.word = 0;
1212 tx_desc_p->status.field.data_length =
1213 g_log_chan[temp].packet_size;
1214 tx_desc_p->status.field.OWN = CPU_OWN;
1217 /* workaround DMA pitfall, we never turn on channel if we don't
1218 have proper descriptors */
1219 if (!g_log_chan[temp].control) {
1220 close_channel(temp);
1228 int dma_device_register(struct dma_device_info *dma_dev)
1231 temp_dev = (dev_list *) kmalloc(sizeof(dev_list), GFP_KERNEL);
1232 temp_dev->dev = dma_dev;
1233 temp_dev->weight = dma_dev->weight;
1234 insert_dev_list(temp_dev);
1235 /* check whether this is a known device */
1236 if ((strcmp(dma_dev->device_name, "switch1") == 0)
1237 || (strcmp(dma_dev->device_name, "TPE") == 0)
1238 || (strcmp(dma_dev->device_name, "switch2") == 0)
1239 || (strcmp(dma_dev->device_name, "DPlus") == 0)) {
1240 register_dev(dma_dev);
1247 int unregister_dev(struct dma_device_info *dma_dev)
1251 struct rx_desc *rx_desc_p;
1253 for (i = 0; i < dma_dev->num_rx_chan; i++) {
1254 temp = dma_dev->logic_rx_chan_base + i;
1255 close_channel(temp);
1256 for (j = 0; j < g_log_chan[temp].desc_len; j++) {
1258 (struct rx_desc *) (g_desc_list +
1259 g_log_chan[temp].offset_from_base + j);
1260 buffer = (u8 *) __va(rx_desc_p->Data_Pointer);
1261 g_log_chan[temp].buffer_free(buffer, g_log_chan[temp].opt[j]);
1264 for (i = 0; i < dma_dev->num_tx_chan; i++) {
1265 temp = dma_dev->logic_tx_chan_base + i;
1266 close_channel(temp);
1271 int dma_device_unregister(struct dma_device_info *dev)
1274 for (temp_dev = g_head_dev; temp_dev; temp_dev = temp_dev->next) {
1275 if (strcmp(dev->device_name, temp_dev->dev->device_name) == 0) {
1276 if ((strcmp(dev->device_name, "switch1") == 0)
1277 || (strcmp(dev->device_name, "TPE") == 0)
1278 || (strcmp(dev->device_name, "switch2") == 0)
1279 || (strcmp(dev->device_name, "DPlus") == 0))
1280 unregister_dev(dev);
1281 if (temp_dev == g_head_dev) {
1282 g_head_dev = temp_dev->next;
1285 if (temp_dev == g_tail_dev)
1286 g_tail_dev = temp_dev->prev;
1288 temp_dev->prev->next = temp_dev->next;
1290 temp_dev->next->prev = temp_dev->prev;
1300 void dma_device_update_rx(struct dma_device_info *dma_dev)
1303 for (i = 0; i < dma_dev->num_rx_chan; i++) {
1304 temp = dma_dev->logic_rx_chan_base + i;
1305 g_log_chan[temp].control = dma_dev->rx_chan[i].control;
1307 if (g_log_chan[temp].control)
1310 close_channel(temp);
1315 void dma_device_update_tx(struct dma_device_info *dma_dev)
1318 for (i = 0; i < dma_dev->num_tx_chan; i++) {
1319 temp = dma_dev->logic_tx_chan_base + i;
1320 g_log_chan[temp].control = dma_dev->tx_chan[i].control;
1321 if (g_log_chan[temp].control) {
1322 /* we turn on channel when send out the very first packet */
1323 // open_channel(temp);
1325 close_channel(temp);
1329 int dma_device_update(struct dma_device_info *dma_dev)
1331 dma_device_update_rx(dma_dev);
1332 dma_device_update_tx(dma_dev);
1336 static int dma_open(struct inode *inode, struct file *file)
1341 static int dma_release(struct inode *inode, struct file *file)
1343 /* release the resources */
1347 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
1348 static long dma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1350 static int dma_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1357 case 0: /* get register value */
1359 case 1: /* return channel weight */
1360 chan_no = *((int *) arg);
1361 *((int *) arg + 1) = g_log_chan[chan_no].default_weight;
1363 case 2: /* set channel weight */
1364 chan_no = *((int *) arg);
1365 value = *((int *) arg + 1);
1366 printk("new weight=%08x\n", value);
1367 g_log_chan[chan_no].default_weight = value;
1376 static struct file_operations dma_fops = {
1379 release:dma_release,
1380 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
1381 unlocked_ioctl:dma_ioctl,
1387 static int dma_init(void)
1391 printk("initialising dma core\n");
1392 result = register_chrdev(DMA_MAJOR, "dma-core", &dma_fops);
1394 AMAZON_DMA_EMSG("cannot register device dma-core!\n");
1397 result = request_irq(AMAZON_DMA_INT, dma_interrupt, IRQF_DISABLED, "dma-core", (void *) &dma_interrupt);
1399 AMAZON_DMA_EMSG("error, cannot get dma_irq!\n");
1400 free_irq(AMAZON_DMA_INT, (void *) &dma_interrupt);
1404 g_desc_list = (u64 *) KSEG1ADDR(__get_free_page(GFP_DMA));
1406 if (g_desc_list == NULL) {
1407 AMAZON_DMA_EMSG("no memory for desriptor\n");
1410 memset(g_desc_list, 0, PAGE_SIZE);
1411 AMAZON_DMA_REG32(AMAZON_DMA_Desc_BA) = (u32) CPHYSADDR((u32) g_desc_list);
1412 g_amazon_dma_dir = proc_mkdir("amazon_dma", NULL);
1413 create_proc_read_entry("dma_register", 0, g_amazon_dma_dir, dma_register_proc_read, NULL);
1414 create_proc_read_entry("g_desc_list", 0, g_amazon_dma_dir, desc_list_proc_read, NULL);
1415 create_proc_read_entry("channel_weight", 0, g_amazon_dma_dir, channel_weight_proc_read, NULL);
1418 for (i = 0; i < (RX_CHAN_NUM + 1); i++) {
1419 rx_chan_list[i] = -1;
1421 for (i = 0; i < (TX_CHAN_NUM + 1); i++) {
1422 tx_chan_list[i] = -1;
1425 for (i = 0; i < CHAN_TOTAL_NUM; i++) {
1426 comb_isr_mask[i] = 0x80000000 >> (i);
1429 g_log_chan[CHAN_TOTAL_NUM].weight = 0;
1430 printk("initialising dma core ... done\n");
1435 arch_initcall(dma_init);
1438 void dma_cleanup(void)
1442 unregister_chrdev(DMA_MAJOR, "dma-core");
1443 for (temp_dev = g_head_dev; temp_dev; temp_dev = temp_dev->next) {
1446 free_page(KSEG0ADDR((unsigned long) g_desc_list));
1447 remove_proc_entry("channel_weight", g_amazon_dma_dir);
1448 remove_proc_entry("dma_list", g_amazon_dma_dir);
1449 remove_proc_entry("dma_register", g_amazon_dma_dir);
1450 remove_proc_entry("amazon_dma", NULL);
1451 /* release the resources */
1452 free_irq(AMAZON_DMA_INT, (void *) &dma_interrupt);
1455 EXPORT_SYMBOL(dma_device_register);
1456 EXPORT_SYMBOL(dma_device_unregister);
1457 EXPORT_SYMBOL(dma_device_read);
1458 EXPORT_SYMBOL(dma_device_write);
1459 EXPORT_SYMBOL(dma_device_update);
1460 EXPORT_SYMBOL(dma_device_update_rx);
1462 MODULE_LICENSE("GPL");