enable start-stop-daemon by default, i want to use this to clean up a few init script...
[15.05/openwrt.git] / target / linux / amazon-2.6 / files / arch / mips / amazon / dma-core.c
1 /*
2  *   This program is free software; you can redistribute it and/or modify
3  *   it under the terms of the GNU General Public License as published by
4  *   the Free Software Foundation; either version 2 of the License, or
5  *   (at your option) any later version.
6  *
7  *   This program is distributed in the hope that it will be useful,
8  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
9  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  *   GNU General Public License for more details.
11  *
12  *   You should have received a copy of the GNU General Public License
13  *   along with this program; if not, write to the Free Software
14  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15  */
16 //-----------------------------------------------------------------------
17 /*
18  * Description:
19  *      Driver for Infineon Amazon DMA
20  */
21 //-----------------------------------------------------------------------
22 /* Author:      Wu Qi Ming[Qi-Ming.Wu@infineon.com]
23  * Created:     7-April-2004
24  */
25 //-----------------------------------------------------------------------
26 /* History
27  * Last changed on: 4-May-2004
28  * Last changed by: <peng.liu@infineon.com>
29  * Reason: debug
30  */
31 //----------------------------------------------------------------------- 
32 /* Last changed on: 03-Dec-2004
33  * Last changed by: peng.liu@infineon.com
34  * Reason: recover from TPE bug 
35  */
36
37 //000004:fchang 2005/6/2 Modified by Linpeng as described below
38 //----------------------------------------------------------------------- 
39 /* Last changed on: 28-Jan-2004
40  * Last changed by: peng.liu@infineon.com
41  * Reason: 
42  * - handle "out of memory" bug
43  */
44 //000003:tc.chen 2005/06/16 fix memory leak when Tx buffer full (heaving traffic).
45 //507261:tc.chen 2005/07/26 re-organize code address map to improve performance.
46
47 #if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
48 #define MODVERSIONS
49 #endif
50
51 #if defined(MODVERSIONS) && !defined(__GENKSYMS__)
52 #include <linux/modversions.h>
53 #endif
54
55 #ifndef EXPORT_SYMTAB
56 #define EXPORT_SYMTAB                   /* need this one 'cause we export symbols */
57 #endif
58
59 #undef DMA_NO_POLLING
60
61 /* no TX interrupt handling */
62 #define NO_TX_INT
63 /* need for DMA workaround */
64 #undef AMAZON_DMA_TPE_AAL5_RECOVERY
65
66 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
67 #define MAX_SYNC_FAILS 1000000  // 000004:fchang
68 unsigned int dma_sync_fails = 0;
69 unsigned int total_dma_tpe_reset = 0;
70 int (*tpe_reset) (void);
71 int (*tpe_start) (void);
72 int (*tpe_inject) (void);
73 #endif                                                  // AMAZON_DMA_TPE_AAL5_RECOVERY
74
75
76 #include <linux/module.h>
77 #include <linux/init.h>
78 #include <linux/sched.h>
79 #include <linux/kernel.h>
80 #include <linux/slab.h>
81 #include <linux/string.h>
82 #include <linux/timer.h>
83 #include <linux/fs.h>
84 #include <linux/errno.h>
85 #include <linux/proc_fs.h>
86 #include <linux/stat.h>
87 #include <linux/mm.h>
88 #include <linux/tty.h>
89 #include <linux/selection.h>
90 #include <linux/kmod.h>
91 #include <linux/vmalloc.h>
92 #include <linux/interrupt.h>
93 #include <linux/delay.h>
94 #include <asm/uaccess.h>
95 #include <linux/errno.h>
96 #include <asm/io.h>
97
98 #include <asm/amazon/amazon.h>
99 #include <asm/amazon/irq.h>
100 #include <asm/amazon/amazon_dma.h>
101 #include "dma-core.h"
102
103 #define AMAZON_DMA_EMSG(fmt, args...) printk( KERN_ERR  "%s: " fmt,__FUNCTION__, ## args)
104
105 static irqreturn_t dma_interrupt(int irq, void *dev_id);
106 extern void mask_and_ack_amazon_irq(unsigned int irq_nr);
107
108 /***************************************** global data *******************************************/
109 u64 *g_desc_list;
110 dev_list *g_current_dev = NULL;
111 dev_list *g_head_dev = NULL;
112 dev_list *g_tail_dev = NULL;
113 channel_info g_log_chan[CHAN_TOTAL_NUM + 1];
114 struct proc_dir_entry *g_amazon_dma_dir;
115 static u8 rx_chan_list_len = 0;
116 static u8 tx_chan_list_len = 0;
117 static int rx_chan_list[RX_CHAN_NUM + 1];
118 static int tx_chan_list[TX_CHAN_NUM + 1];
119 static u32 comb_isr_mask[CHAN_TOTAL_NUM];
120
121 static inline int is_rx_chan(int chan_no)
122 /*judge if this is an rx channel*/
123 {
124         int result = 0;
125         if (chan_no < RX_CHAN_NUM)
126                 result = 1;
127         return result;
128 }
129
130 /* Ugly, Channel ON register is badly mapped to channel no. */
131 static u8 ch_on_mapping[CHAN_TOTAL_NUM] =
132         { 0, 1, 2, 3, 6, 7, 10, 4, 5, 8, 9, 11 };
133
134 /* Brief:       check wether the chan_no is legal
135  * Parameter:   chan_no: logical channel number
136  * Return:      0 if is not valid
137  *              1 if is valid
138  */
139 static inline int is_valid_dma_ch(int chan_no)
140 {
141         return ((chan_no >= 0) && (chan_no < CHAN_TOTAL_NUM));
142 }
143
144 /* Brief:       check whether a channel is open through Channel ON register
145  * Parameter:  chan_no: logical channel number
146  * Return:      1 channel is open
147  *              0 not yet
148  *              EINVAL: invalid parameter
149  */
150 static inline int is_channel_open(int chan_no)
151 {
152         return (AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) &
153                         (1 << ch_on_mapping[chan_no]));
154 }
155
156 /* Brief: add a list entry
157  * Description: 
158  *      always add to the tail and no redundancy allowed. (i.e. entries are unique)
159  *      0       : entry deleted
160  *      <0      : not deleted (due to not unique)
161  */
162 static inline int _add_list_entry(int *list, int size_of_list, int entry)
163 {
164         int i;
165         for (i = 0; i < size_of_list; i++) {
166                 if (list[i] == entry)
167                         break;
168                 if (list[i] < 0) {
169                         list[i] = entry;
170                         return 0;
171                 }
172         }
173         return -1;
174 }
175
176 /* Brief: delete a list entry
177  * Description:
178  *      find the entry and remove it. shift all entries behind it one step forward if necessary\
179  * Return:
180  *      0       : entry deleted
181  *      <0      : not deleted (due to not found?)
182  */
183 static inline int _delete_list_entry(int *list, int size_of_list,
184                                                                          int entry)
185 {
186         int i, j;
187         for (i = 0; i < size_of_list; i++) {
188                 if (list[i] == entry) {
189                         for (j = i; j < size_of_list; j++) {
190                                 list[j] = list[j + 1];
191                                 if (list[j + 1] < 0) {
192                                         break;
193                                 }
194                         }
195                         return 0;
196                 }
197         }
198         return -1;
199 }
200
201 /* Brief:       enable a channel through Channel ON register
202  * Parameter:  chan_no: logical channel number
203  * Description: 
204  *      Please don't open a channel without a valid descriptor (hardware pitfall)
205  */
206 static inline void open_channel(int chan_no)
207 {
208         AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) |= (1 << ch_on_mapping[chan_no]);
209         if (is_rx_chan(chan_no)) {
210                 if (_add_list_entry(rx_chan_list, RX_CHAN_NUM, chan_no) == 0) {
211                         rx_chan_list_len++;
212                 } else {
213                         AMAZON_DMA_DMSG("cannot add chan %d to open list\n", chan_no);
214                 }
215         } else {
216                 if (_add_list_entry(tx_chan_list, TX_CHAN_NUM, chan_no) == 0) {
217                         tx_chan_list_len++;
218                 } else {
219                         AMAZON_DMA_DMSG("cannot add chan %d to open list\n", chan_no);
220                 }
221         }
222 }
223
224 /* Brief:       disable a channel through Channel ON register
225  * Parameter:  chan_no: logical channel number
226  */
227
228 static inline void close_channel(int chan_no)
229 {
230         AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) &= ~(1 << ch_on_mapping[chan_no]);
231         if (is_rx_chan(chan_no)) {
232                 if (_delete_list_entry(rx_chan_list, RX_CHAN_NUM, chan_no) == 0) {
233                         rx_chan_list_len--;
234                 } else {
235                         AMAZON_DMA_DMSG("cannot remove chan %d from open list \n",
236                                                         chan_no);
237                 }
238         } else {
239                 if (_delete_list_entry(tx_chan_list, TX_CHAN_NUM, chan_no) == 0) {
240                         tx_chan_list_len--;
241                 } else {
242                         AMAZON_DMA_DMSG("cannot remove chan %d from open list \n",
243                                                         chan_no);
244                 }
245         }
246 }
247
248 /* Brief: clear RX interrupt
249  */
250 inline void rx_chan_clear_isr(int chan_no)
251 {
252 #ifdef DMA_NO_POLLING
253         AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR + chan_no * AMAZON_DMA_CH_STEP) =
254                 (AMAZON_DMA_REG32
255                  (AMAZON_DMA_CH0_ISR +
256                   chan_no *
257                   AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT | DMA_ISR_EOP | DMA_ISR_CMDCPT
258                                                                  | DMA_ISR_DURR));
259 #else
260         AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR + chan_no * AMAZON_DMA_CH_STEP) =
261                 (AMAZON_DMA_REG32
262                  (AMAZON_DMA_CH0_ISR +
263                   chan_no *
264                   AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT | DMA_ISR_EOP |
265                                                                  DMA_ISR_CMDCPT));
266 #endif
267 }
268
269
270 /* Brief:       hacking function, this will reset all descriptors back to DMA
271  */
272 static void dma_reset_all_descriptors(int chan_no)
273 {
274         volatile struct rx_desc *rx_desc_p = NULL;
275         int i;
276         rx_desc_p =
277                 (struct rx_desc *) g_desc_list +
278                 g_log_chan[chan_no].offset_from_base;
279         for (i = 0; i < g_log_chan[chan_no].desc_len; i++) {
280                 rx_desc_p->status.word &=
281                         (~(DMA_DESC_SOP_SET | DMA_DESC_EOP_SET | DMA_DESC_CPT_SET));
282                 rx_desc_p->status.word |=
283                         (DMA_DESC_OWN_DMA | g_log_chan[chan_no].packet_size);
284                 rx_desc_p++;
285         }
286 }
287
288 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
289 /* Brief:       Reset DMA descriptors 
290  */
291 static void amazon_dma_reset_tpe_rx(int chan_no)
292 {
293         struct tx_desc *tx_desc_p = NULL;
294         int j, i = 0;
295
296         // wait until all TX channels stop transmitting
297         for (j = 9; j <= 10; j++) {
298                 tx_desc_p =
299                         (struct tx_desc *) g_desc_list +
300                         g_log_chan[j].offset_from_base;
301                 for (i = 0; i < g_log_chan[j].desc_len; i++) {
302                         while ((tx_desc_p->status.field.OWN != CPU_OWN)) {
303                                 AMAZON_DMA_DMSG("DMA TX in progress\n");        // 000004:fchang
304                                 udelay(100);
305                         }
306                         tx_desc_p++;
307                 }
308         }
309
310         if (tpe_reset) {
311                 total_dma_tpe_reset++;
312                 AMAZON_DMA_DMSG
313                         ("\n===============resetting TPE========================== \n");
314                 if ((*tpe_reset) ()) {
315                         panic("cannot reset TPE engien\n");     // 000004:fchang
316                 }
317         } else {
318                 panic("no tpe_reset function\n");       // 000004:fchang
319                 return;
320         }
321         dma_reset_all_descriptors(chan_no);
322         rx_chan_clear_isr(chan_no);
323         mb();
324
325         // send EoP
326         if (tpe_inject) {
327                 if ((*tpe_inject) ()) {
328                         panic("cannot inject a cell\n");        // 000004:fchang
329                 }
330         } else {
331                 AMAZON_DMA_EMSG("no tpe_inject function\n");
332                 return;
333         }
334         mb();
335         while (1) {
336                 if (AMAZON_DMA_REG32
337                         (AMAZON_DMA_CH0_ISR +
338                          chan_no * AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT)) {
339                         rx_chan_clear_isr(chan_no);
340                         mb();
341                         dma_reset_all_descriptors(chan_no);
342                         if (g_log_chan[chan_no].current_desc ==
343                                 (g_log_chan[chan_no].desc_len - 1)) {
344                                 g_log_chan[chan_no].current_desc = 0;
345                         } else {
346                                 g_log_chan[chan_no].current_desc++;
347                         }
348                         break;
349                 }
350                 mdelay(1);
351         }
352         mb();
353 #if 0
354         AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) &= ~(1 << ch_on_mapping[chan_no]);
355         while (AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) &
356                    (1 << ch_on_mapping[chan_no])) {
357                 printk("TPE channel still on\n");
358                 mdelay(1);
359         }
360
361         // AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = (1<<chan_no);
362         mb();
363         AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + chan_no * AMAZON_DMA_CH_STEP) =
364                 0x32;
365         mb();
366         rx_chan_clear_isr(chan_no);
367         dma_reset_all_descriptors(chan_no);
368         mb();
369         AMAZON_DMA_REG32(AMAZON_DMA_CH_ON) |= (1 << ch_on_mapping[chan_no]);
370         // g_log_chan[chan_no].current_desc=0;
371         mb();
372         mdelay(1);
373 #endif
374         if (tpe_start) {
375                 (*tpe_start) ();
376         } else {
377                 AMAZON_DMA_EMSG("cannot restart TPE engien\n");
378         }
379 }
380 #endif                                                  // AMAZON_DMA_TPE_AAL5_RECOVERY
381
382
383 /* Brief:       RX channel interrupt handler 
384  * Parameter:   RX channel no
385  * Description: the interrupt handler for each RX channel
386  *              1. check descriptor, clear ISR if no incoming packet
387  *              2. inform upper layer to receive packet (and update descriptors)
388  */
389 inline void rx_chan_intr_handler(int chan_no)
390 {
391         volatile struct rx_desc *rx_desc_p = NULL;
392
393         /* fetch the current descriptor */
394         rx_desc_p =
395                 (struct rx_desc *) g_desc_list +
396                 g_log_chan[chan_no].offset_from_base +
397                 g_log_chan[chan_no].current_desc;
398
399         g_log_chan[chan_no].dma_dev->current_rx_chan =
400                 chan_no - g_log_chan[chan_no].dma_dev->logic_rx_chan_base;
401
402         // workaround for DMA pitfall: complete bit set happends before the
403         // other two bits (own,eop) are ready
404         if ((rx_desc_p->status.field.EoP != 1)
405                 || (rx_desc_p->status.field.OWN != CPU_OWN)
406                 || (rx_desc_p->status.field.data_length ==
407                         g_log_chan[chan_no].packet_size)) {
408 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
409                 if (chan_no == 4 || chan_no == 5) {
410                         dma_sync_fails++;
411                         if (dma_sync_fails > MAX_SYNC_FAILS) {
412                                 // detect bug
413                                 rx_desc_p0 =
414                                         (struct rx_desc *) g_desc_list +
415                                         g_log_chan[chan_no].offset_from_base;
416                                 rx_desc_p1 =
417                                         (struct rx_desc *) g_desc_list +
418                                         g_log_chan[chan_no].offset_from_base + 1;
419                                 if ((rx_desc_p0->status.field.OWN == CPU_OWN
420                                          && rx_desc_p0->status.field.EoP != 1)
421                                         && (rx_desc_p1->status.field.OWN == CPU_OWN
422                                                 && rx_desc_p1->status.field.EoP != 1)) {
423                                         amazon_dma_reset_tpe_rx(chan_no);
424                                         dma_sync_fails = 0;
425                                         return;
426                                 }
427                                 dma_sync_fails = 0;
428                                 AMAZON_DMA_DMSG("too many times ch:%d\n", chan_no);     // 000004:fchang
429                                 return;
430                         }
431                         udelay(10);                     // 000004:fchang
432                 }
433 #endif                                                  // //AMAZON_DMA_TPE_AAL5_RECOVERY
434                 return;
435         }
436
437         /* inform the upper layer to receive the packet */
438         g_log_chan[chan_no].intr_handler(g_log_chan[chan_no].dma_dev, RCV_INT);
439         /* check the next descriptor, if still contains the incoming packet,
440            then do not clear the interrupt status */
441         rx_desc_p =
442                 (struct rx_desc *) g_desc_list +
443                 g_log_chan[chan_no].offset_from_base +
444                 g_log_chan[chan_no].current_desc;
445         if (!
446                 ((rx_desc_p->status.field.OWN == CPU_OWN)
447                  && (rx_desc_p->status.field.C == 1))) {
448                 rx_chan_clear_isr(chan_no);
449         }
450 }
451
452
453 /* Brief:       TX channel interrupt handler 
454  * Parameter:   TX channel no
455  * Description: the interrupt handler for each TX channel
456  * 1. check all the descripters,if any of them had transmitted a packet, then free buffer
457  * because we cannot garantee the which one has already transmitted out, we have to go through all the descriptors here
458  * 2. clear the interrupt status bit
459  */
460 inline void tx_chan_intr_handler(int chan_no)
461 {
462         struct tx_desc *tx_desc_p = NULL;
463         int i = 0;
464
465         tx_desc_p =
466                 (struct tx_desc *) g_desc_list +
467                 g_log_chan[chan_no].offset_from_base;
468
469         for (i = 0; i < g_log_chan[chan_no].desc_len; i++) {
470                 if ((tx_desc_p->status.field.OWN == CPU_OWN)
471                         && (tx_desc_p->status.field.C == 1)) {
472                         /* if already transmitted, then free the buffer */
473                         g_log_chan[chan_no].
474                                 buffer_free((u8 *) __va(tx_desc_p->Data_Pointer),
475                                                         g_log_chan[chan_no].opt[i]);
476                         tx_desc_p->status.field.C = 0;
477                         /* inform the upper layer about the completion of the
478                            transmitted packet, the upper layer may want to free the
479                            packet */
480                         g_log_chan[chan_no].intr_handler(g_log_chan[chan_no].dma_dev,
481                                                                                          TRANSMIT_CPT_INT);
482                 }
483                 tx_desc_p++;
484         }
485
486         /* after all these operations, clear the interrupt status bit */
487         AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR + chan_no * AMAZON_DMA_CH_STEP) =
488                 (AMAZON_DMA_REG32
489                  (AMAZON_DMA_CH0_ISR +
490                   chan_no *
491                   AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT | DMA_ISR_EOP |
492                                                                  DMA_ISR_CMDCPT));
493 }
494
495 /*      Brief:  DMA interrupt handler
496  */
497 static irqreturn_t dma_interrupt(int irq, void *dev_id)
498 {
499         int i = 0;
500         int chan_no;
501         u32 isr = 0;
502 #ifdef NO_TX_INT                                // 000004:fchang
503         static int cnt = 0;                     // 000004:fchang
504 #endif                                                  // 000004:fchang
505         while ((isr =
506                         AMAZON_DMA_REG32(AMAZON_DMA_COMB_ISR)) & (COMB_ISR_RX_MASK |
507                                                                                                           COMB_ISR_TX_MASK)) {
508                 if (isr & COMB_ISR_RX_MASK) {
509                         // RX Channels: start WFQ algorithm
510                         chan_no = CHAN_TOTAL_NUM;
511                         for (i = 0; i < RX_CHAN_NUM; i++) {
512                                 if ((isr & (comb_isr_mask[i]))
513                                         && (g_log_chan[i].weight > 0)) {
514                                         if (g_log_chan[chan_no].weight < g_log_chan[i].weight) {
515                                                 chan_no = i;
516                                         }
517                                 }
518                         }
519                         if (chan_no < CHAN_TOTAL_NUM) {
520                                 rx_chan_intr_handler(chan_no);
521                         } else {
522                                 for (i = 0; i < RX_CHAN_NUM; i++) {
523                                         g_log_chan[i].weight = g_log_chan[i].default_weight;
524                                 }
525                         }
526                 }
527 #ifdef NO_TX_INT
528                 cnt++;
529                 if (cnt == 10) {
530                         cnt = 0;
531                         for (i = 0; i < tx_chan_list_len; i++) {
532                                 if (AMAZON_DMA_REG32
533                                         (AMAZON_DMA_CH0_ISR +
534                                          tx_chan_list[i] *
535                                          AMAZON_DMA_CH_STEP) & (DMA_ISR_CPT | DMA_ISR_EOP)) {
536                                         tx_chan_intr_handler(tx_chan_list[i]);
537                                 }
538                         }
539                 }
540 #else
541                 if (isr & COMB_ISR_TX_MASK) {
542                         // TX channels: RR
543                         for (i = 0; i < tx_chan_list_len; i++) {
544                                 if (isr & (comb_isr_mask[tx_chan_list[i]])) {
545                                         tx_chan_intr_handler(tx_chan_list[i]);
546                                 }
547                         }
548                 }
549 #endif
550         }                                                       // while 
551         return IRQ_HANDLED;
552 }
553
554
555 /*      Brief:  read a packet from DMA RX channel
556  *      Parameter:
557  *      Return: packet length
558  *      Description:
559  *              This is called back in a context of DMA interrupt
560  *              1. prepare new descriptor
561  *              2. read data
562  *              3. update WFQ weight
563  */
564 //507261:tc.chen int dma_device_read(struct dma_device_info* dma_dev, u8** dataptr, void** opt)
565 int asmlinkage dma_device_read(struct dma_device_info *dma_dev,
566                                                            u8 ** dataptr, void **opt)
567 {
568         u8 *buf;
569         int len;
570         int chan_no = 0;
571         int byte_offset = 0;
572
573         struct rx_desc *rx_desc_p;
574         void *p = NULL;
575         int current_desc;
576
577         chan_no = dma_dev->logic_rx_chan_base + dma_dev->current_rx_chan;
578         current_desc = g_log_chan[chan_no].current_desc;
579         rx_desc_p =
580                 (struct rx_desc *) (g_desc_list +
581                                                         g_log_chan[chan_no].offset_from_base +
582                                                         current_desc);
583         buf = (u8 *) __va(rx_desc_p->Data_Pointer);     /* extract the virtual
584                                                                                                    address of the data
585                                                                                                    pointer */
586         len = rx_desc_p->status.field.data_length;      /* extract the data length */
587 #ifndef CONFIG_MIPS_UNCACHED
588         dma_cache_inv((unsigned long) buf, len);
589 #endif                                                  // CONFIG_MIPS_UNCACHED
590         *(u32 *) dataptr = (u32) buf;
591         if (opt) {
592                 *(int *) opt = (int) g_log_chan[chan_no].opt[current_desc];     /* read 
593                                                                                                                                            out 
594                                                                                                                                            the 
595                                                                                                                                            opt 
596                                                                                                                                            information */
597         }
598
599         buf =
600                 (u8 *) g_log_chan[chan_no].buffer_alloc(g_log_chan[chan_no].
601                                                                                                 packet_size, &byte_offset,
602                                                                                                 &p);
603         // should check null!!!!
604         if (buf == NULL || p == NULL) {
605                 *(u32 *) dataptr = 0;
606                 *(int *) opt = 0;
607                 len = 0;
608         } else {
609                 g_log_chan[chan_no].opt[current_desc] = p;
610                 /* reduce the weight for WFQ algorithm */
611                 g_log_chan[chan_no].weight -= len;
612                 rx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) buf);
613         }
614         if (current_desc == g_log_chan[chan_no].desc_len - 1) {
615                 current_desc = 0;
616         } else {
617                 current_desc++;
618         }
619         g_log_chan[chan_no].current_desc = current_desc;
620
621         rx_desc_p->status.word = DMA_DESC_OWN_DMA
622                 | (byte_offset << DMA_DESC_BYTEOFF_SHIFT)
623                 | g_log_chan[chan_no].packet_size;
624         return len;
625 }
626
627 /*      Brief:  write a packet through DMA RX channel to peripheral
628  *      Parameter:
629  *      Return: packet length
630  *      Description:
631  *
632  */
633 u64 dma_tx_drop = 0;
634 //507261:tc.chen int dma_device_write(struct dma_device_info* dma_dev, u8* dataptr, int len,void* opt)
635 int asmlinkage dma_device_write(struct dma_device_info *dma_dev,
636                                                                 u8 * dataptr, int len, void *opt)
637 {
638         int chan_no = 0;
639         struct tx_desc *tx_desc_p;
640
641         int byte_offset = 0;
642         int current_desc;
643         static int cnt = 0;                     // 000004:fchang
644
645         unsigned long flag;
646         local_irq_save(flag);
647
648         chan_no = dma_dev->logic_tx_chan_base + dma_dev->current_tx_chan;
649         current_desc = g_log_chan[chan_no].current_desc;
650         tx_desc_p =
651                 (struct tx_desc *) (g_desc_list +
652                                                         g_log_chan[chan_no].offset_from_base +
653                                                         current_desc);
654         // 000003:tc.chen if(tx_desc_p->status.field.OWN==DMA_OWN){
655         if (tx_desc_p->status.field.OWN == DMA_OWN || tx_desc_p->status.field.C == 1) { // 000003:tc.chen
656                 AMAZON_DMA_DMSG("no TX desc for CPU, drop packet\n");
657                 dma_tx_drop++;
658                 g_log_chan[chan_no].intr_handler(dma_dev, TX_BUF_FULL_INT);
659                 local_irq_restore(flag);
660                 return 0;
661         }
662         g_log_chan[chan_no].opt[current_desc] = opt;
663
664         /* byte offset----to adjust the starting address of the data buffer,
665            should be multiple of the burst length. */
666         byte_offset =
667                 ((u32) CPHYSADDR((u32) dataptr)) % (g_log_chan[chan_no].burst_len *
668                                                                                         4);
669 #ifndef CONFIG_MIPS_UNCACHED
670         dma_cache_wback((unsigned long) dataptr, len);
671         wmb();
672 #endif                                                  // CONFIG_MIPS_UNCACHED
673
674         tx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) dataptr) - byte_offset;
675         wmb();
676         tx_desc_p->status.word = DMA_DESC_OWN_DMA
677                 | DMA_DESC_SOP_SET
678                 | DMA_DESC_EOP_SET | (byte_offset << DMA_DESC_BYTEOFF_SHIFT)
679                 | len;
680         wmb();
681         if (is_channel_open(chan_no) == 0) {
682                 // turn on if necessary
683                 open_channel(chan_no);
684         }
685 #ifdef DMA_NO_POLLING
686         if ((AMAZON_DMA_REG32
687                  (AMAZON_DMA_CH0_ISR +
688                   chan_no * AMAZON_DMA_CH_STEP) & (DMA_ISR_DURR | DMA_ISR_CPT)) ==
689                 (DMA_ISR_DURR)) {
690                 // clear DURR if (CPT is AND set and DURR is set)
691                 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR +
692                                                  chan_no * AMAZON_DMA_CH_STEP) = DMA_ISR_DURR;
693         }
694 #endif
695
696         if (current_desc == (g_log_chan[chan_no].desc_len - 1)) {
697                 current_desc = 0;
698         } else {
699                 current_desc++;
700         }
701
702
703         g_log_chan[chan_no].current_desc = current_desc;
704         tx_desc_p =
705                 (struct tx_desc *) (g_desc_list +
706                                                         g_log_chan[chan_no].offset_from_base +
707                                                         current_desc);
708         // 000003:tc.chen if(tx_desc_p->status.field.OWN==DMA_OWN){
709         if (tx_desc_p->status.field.OWN == DMA_OWN || tx_desc_p->status.field.C == 1) { // 000003:tc.chen
710                 g_log_chan[chan_no].intr_handler(dma_dev, TX_BUF_FULL_INT);
711         }
712 #ifdef NO_TX_INT
713 //000004:fchang Start
714         cnt++;
715         if (cnt == 5) {
716                 cnt = 0;
717                 tx_chan_intr_handler(chan_no);
718         }
719 //000004:fchang End
720 #endif
721         local_irq_restore(flag);        // 000004:fchang
722         return len;
723 }
724
725
726
727 int desc_list_proc_read(char *buf, char **start, off_t offset,
728                                                 int count, int *eof, void *data)
729 {
730         int i;
731         u32 *p = (u32 *) g_desc_list;
732         int len = 0;
733         len += sprintf(buf + len, "descriptor list:\n");
734         for (i = 0; i < 120; i++) {
735                 len += sprintf(buf + len, "%d\n", i);
736                 len += sprintf(buf + len, "%08x\n", *(p + i * 2 + 1));
737                 len += sprintf(buf + len, "%08x\n", *(p + i * 2));
738
739         }
740
741         return len;
742
743 }
744
745 int channel_weight_proc_read(char *buf, char **start, off_t offset,
746                                                          int count, int *eof, void *data)
747 {
748
749         // int i=0;
750         int len = 0;
751         len += sprintf(buf + len, "Qos dma channel weight list\n");
752         len +=
753                 sprintf(buf + len,
754                                 "channel_num default_weight current_weight device Tx/Rx\n");
755         len +=
756                 sprintf(buf + len,
757                                 "     0      %08x        %08x      Switch   Rx0\n",
758                                 g_log_chan[0].default_weight, g_log_chan[0].weight);
759         len +=
760                 sprintf(buf + len,
761                                 "     1      %08x        %08x      Switch   Rx1\n",
762                                 g_log_chan[1].default_weight, g_log_chan[1].weight);
763         len +=
764                 sprintf(buf + len,
765                                 "     2      %08x        %08x      Switch   Rx2\n",
766                                 g_log_chan[2].default_weight, g_log_chan[2].weight);
767         len +=
768                 sprintf(buf + len,
769                                 "     3      %08x        %08x      Switch   Rx3\n",
770                                 g_log_chan[3].default_weight, g_log_chan[3].weight);
771         len +=
772                 sprintf(buf + len,
773                                 "     4      %08x        %08x      Switch   Tx0\n",
774                                 g_log_chan[4].default_weight, g_log_chan[4].weight);
775         len +=
776                 sprintf(buf + len,
777                                 "     5      %08x        %08x      Switch   Tx1\n",
778                                 g_log_chan[5].default_weight, g_log_chan[5].weight);
779         /* 
780            len+=sprintf(buf+len," 6 %08x %08x TPE
781            Rx0\n",g_log_chan[6].default_weight, g_log_chan[6].weight);
782            len+=sprintf(buf+len," 7 %08x %08x TPE
783            Rx0\n",g_log_chan[7].default_weight, g_log_chan[7].weight);
784            len+=sprintf(buf+len," 8 %08x %08x TPE
785            Tx0\n",g_log_chan[8].default_weight, g_log_chan[8].weight);
786            len+=sprintf(buf+len," 9 %08x %08x TPE
787            Rx0\n",g_log_chan[9].default_weight, g_log_chan[9].weight);
788            len+=sprintf(buf+len," 10 %08x %08x DPLUS
789            Rx0\n",g_log_chan[10].default_weight, g_log_chan[10].weight);
790            len+=sprintf(buf+len," 11 %08x %08x DPLUS
791            Rx0\n",g_log_chan[11].default_weight, g_log_chan[11].weight); */
792         return len;
793 }
794
795 int dma_register_proc_read(char *buf, char **start, off_t offset,
796                                                    int count, int *eof, void *data)
797 {
798         dev_list *temp_dev;
799         int len = 0;;
800
801         len += sprintf(buf + len, "amazon dma driver\n");
802         len += sprintf(buf + len, "version 1.0\n");
803         len += sprintf(buf + len, "devices registered:\n");
804         for (temp_dev = g_head_dev; temp_dev; temp_dev = temp_dev->next) {
805                 len += sprintf(buf + len, "%s ", temp_dev->dev->device_name);
806         }
807         len += sprintf(buf + len, "\n");
808         len += sprintf(buf + len, "CH_ON=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH_ON));
809         len += sprintf(buf + len, "CH_RST=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH_RST));
810         len += sprintf(buf + len, "CH0_ISR=%08x\n",     AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR));
811         len += sprintf(buf + len, "CH1_ISR=%08x\n",     AMAZON_DMA_REG32(AMAZON_DMA_CH1_ISR));
812         len += sprintf(buf + len, "CH2_ISR=%08x\n",     AMAZON_DMA_REG32(AMAZON_DMA_CH2_ISR));
813         len += sprintf(buf + len, "CH3_ISR=%08x\n",     AMAZON_DMA_REG32(AMAZON_DMA_CH3_ISR));
814         len += sprintf(buf + len, "CH4_ISR=%08x\n",     AMAZON_DMA_REG32(AMAZON_DMA_CH4_ISR));
815         len += sprintf(buf + len, "CH5_ISR=%08x\n",     AMAZON_DMA_REG32(AMAZON_DMA_CH5_ISR));
816         len += sprintf(buf + len, "CH6_ISR=%08x\n",     AMAZON_DMA_REG32(AMAZON_DMA_CH6_ISR));
817         len += sprintf(buf + len, "CH7_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH7_ISR));
818         len +=          sprintf(buf + len, "CH8_ISR=%08x\n",
819                                 AMAZON_DMA_REG32(AMAZON_DMA_CH8_ISR));
820         len +=
821                 sprintf(buf + len, "CH9_ISR=%08x\n",
822                                 AMAZON_DMA_REG32(AMAZON_DMA_CH9_ISR));
823         len +=
824                 sprintf(buf + len, "CH10_ISR=%08x\n",
825                                 AMAZON_DMA_REG32(AMAZON_DMA_CH10_ISR));
826         len +=
827                 sprintf(buf + len, "CH11_ISR=%08x\n",
828                                 AMAZON_DMA_REG32(AMAZON_DMA_CH11_ISR));
829         len +=
830                 sprintf(buf + len, "LCH0_MSK=%08x\n",
831                                 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK));
832         len +=
833                 sprintf(buf + len, "LCH1_MSK=%08x\n",
834                                 AMAZON_DMA_REG32(AMAZON_DMA_CH1_MSK));
835         len +=
836                 sprintf(buf + len, "LCH2_MSK=%08x\n",
837                                 AMAZON_DMA_REG32(AMAZON_DMA_CH2_MSK));
838         len +=
839                 sprintf(buf + len, "LCH3_MSK=%08x\n",
840                                 AMAZON_DMA_REG32(AMAZON_DMA_CH3_MSK));
841         len +=
842                 sprintf(buf + len, "LCH4_MSK=%08x\n",
843                                 AMAZON_DMA_REG32(AMAZON_DMA_CH4_MSK));
844         len +=
845                 sprintf(buf + len, "LCH5_MSK=%08x\n",
846                                 AMAZON_DMA_REG32(AMAZON_DMA_CH5_MSK));
847         len +=
848                 sprintf(buf + len, "LCH6_MSK=%08x\n",
849                                 AMAZON_DMA_REG32(AMAZON_DMA_CH6_MSK));
850         len +=
851                 sprintf(buf + len, "LCH7_MSK=%08x\n",
852                                 AMAZON_DMA_REG32(AMAZON_DMA_CH7_MSK));
853         len +=
854                 sprintf(buf + len, "LCH8_MSK=%08x\n",
855                                 AMAZON_DMA_REG32(AMAZON_DMA_CH8_MSK));
856         len +=
857                 sprintf(buf + len, "LCH9_MSK=%08x\n",
858                                 AMAZON_DMA_REG32(AMAZON_DMA_CH9_MSK));
859         len +=
860                 sprintf(buf + len, "LCH10_MSK=%08x\n",
861                                 AMAZON_DMA_REG32(AMAZON_DMA_CH10_MSK));
862         len +=
863                 sprintf(buf + len, "LCH11_MSK=%08x\n",
864                                 AMAZON_DMA_REG32(AMAZON_DMA_CH11_MSK));
865         len +=
866                 sprintf(buf + len, "Desc_BA=%08x\n",
867                                 AMAZON_DMA_REG32(AMAZON_DMA_Desc_BA));
868         len +=
869                 sprintf(buf + len, "LCH0_DES_LEN=%08x\n",
870                                 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN));
871         len +=
872                 sprintf(buf + len, "LCH1_DES_LEN=%08x\n",
873                                 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_LEN));
874         len +=
875                 sprintf(buf + len, "LCH2_DES_LEN=%08x\n",
876                                 AMAZON_DMA_REG32(AMAZON_DMA_CH2_DES_LEN));
877         len +=
878                 sprintf(buf + len, "LCH3_DES_LEN=%08x\n",
879                                 AMAZON_DMA_REG32(AMAZON_DMA_CH3_DES_LEN));
880         len +=
881                 sprintf(buf + len, "LCH4_DES_LEN=%08x\n",
882                                 AMAZON_DMA_REG32(AMAZON_DMA_CH4_DES_LEN));
883         len +=
884                 sprintf(buf + len, "LCH5_DES_LEN=%08x\n",
885                                 AMAZON_DMA_REG32(AMAZON_DMA_CH5_DES_LEN));
886         len +=
887                 sprintf(buf + len, "LCH6_DES_LEN=%08x\n",
888                                 AMAZON_DMA_REG32(AMAZON_DMA_CH6_DES_LEN));
889         len +=
890                 sprintf(buf + len, "LCH7_DES_LEN=%08x\n",
891                                 AMAZON_DMA_REG32(AMAZON_DMA_CH7_DES_LEN));
892         len +=
893                 sprintf(buf + len, "LCH8_DES_LEN=%08x\n",
894                                 AMAZON_DMA_REG32(AMAZON_DMA_CH8_DES_LEN));
895         len +=
896                 sprintf(buf + len, "LCH9_DES_LEN=%08x\n",
897                                 AMAZON_DMA_REG32(AMAZON_DMA_CH9_DES_LEN));
898         len +=
899                 sprintf(buf + len, "LCH10_DES_LEN=%08x\n",
900                                 AMAZON_DMA_REG32(AMAZON_DMA_CH10_DES_LEN));
901         len +=
902                 sprintf(buf + len, "LCH11_DES_LEN=%08x\n",
903                                 AMAZON_DMA_REG32(AMAZON_DMA_CH11_DES_LEN));
904         len +=
905                 sprintf(buf + len, "LCH1_DES_OFST=%08x\n",
906                                 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_OFST));
907         len +=
908                 sprintf(buf + len, "LCH2_DES_OFST=%08x\n",
909                                 AMAZON_DMA_REG32(AMAZON_DMA_CH2_DES_OFST));
910         len +=
911                 sprintf(buf + len, "LCH3_DES_OFST=%08x\n",
912                                 AMAZON_DMA_REG32(AMAZON_DMA_CH3_DES_OFST));
913         len +=
914                 sprintf(buf + len, "LCH4_DES_OFST=%08x\n",
915                                 AMAZON_DMA_REG32(AMAZON_DMA_CH4_DES_OFST));
916         len +=
917                 sprintf(buf + len, "LCH5_DES_OFST=%08x\n",
918                                 AMAZON_DMA_REG32(AMAZON_DMA_CH5_DES_OFST));
919         len +=
920                 sprintf(buf + len, "LCH6_DES_OFST=%08x\n",
921                                 AMAZON_DMA_REG32(AMAZON_DMA_CH6_DES_OFST));
922         len +=
923                 sprintf(buf + len, "LCH7_DES_OFST=%08x\n",
924                                 AMAZON_DMA_REG32(AMAZON_DMA_CH7_DES_OFST));
925         len +=
926                 sprintf(buf + len, "LCH8_DES_OFST=%08x\n",
927                                 AMAZON_DMA_REG32(AMAZON_DMA_CH8_DES_OFST));
928         len +=
929                 sprintf(buf + len, "LCH9_DES_OFST=%08x\n",
930                                 AMAZON_DMA_REG32(AMAZON_DMA_CH9_DES_OFST));
931         len +=
932                 sprintf(buf + len, "LCH10_DES_OFST=%08x\n",
933                                 AMAZON_DMA_REG32(AMAZON_DMA_CH10_DES_OFST));
934         len +=
935                 sprintf(buf + len, "LCH11_DES_OFST=%08x\n",
936                                 AMAZON_DMA_REG32(AMAZON_DMA_CH11_DES_OFST));
937         len +=
938                 sprintf(buf + len, "AMAZON_DMA_SW_BL=%08x\n",
939                                 AMAZON_DMA_REG32(AMAZON_DMA_SW_BL));
940         len +=
941                 sprintf(buf + len, "AMAZON_DMA_TPE_BL=%08x\n",
942                                 AMAZON_DMA_REG32(AMAZON_DMA_TPE_BL));
943         len +=
944                 sprintf(buf + len, "DPlus2FPI_BL=%08x\n",
945                                 AMAZON_DMA_REG32(AMAZON_DMA_DPlus2FPI_BL));
946         len +=
947                 sprintf(buf + len, "GRX_BUF_LEN=%08x\n",
948                                 AMAZON_DMA_REG32(AMAZON_DMA_GRX_BUF_LEN));
949         len +=
950                 sprintf(buf + len, "DMA_ECON_REG=%08x\n",
951                                 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG));
952         len +=
953                 sprintf(buf + len, "POLLING_REG=%08x\n",
954                                 AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG));
955         len +=
956                 sprintf(buf + len, "CH_WGT=%08x\n",
957                                 AMAZON_DMA_REG32(AMAZON_DMA_CH_WGT));
958         len +=
959                 sprintf(buf + len, "TX_WGT=%08x\n",
960                                 AMAZON_DMA_REG32(AMAZON_DMA_TX_WGT));
961         len +=
962                 sprintf(buf + len, "DPlus2FPI_CLASS=%08x\n",
963                                 AMAZON_DMA_REG32(AMAZON_DMA_DPLus2FPI_CLASS));
964         len +=
965                 sprintf(buf + len, "COMB_ISR=%08x\n",
966                                 AMAZON_DMA_REG32(AMAZON_DMA_COMB_ISR));
967 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
968         len += sprintf(buf + len, "TPE fails:%u\n", total_dma_tpe_reset);       // 000004:fchang
969 #endif
970         return len;
971 }
972
973 /*      Brief:  initialize DMA registers
974  *      Description:
975  */
976 static void dma_chip_init(void)
977 {
978         int i;
979         for (i = 0; i < CHAN_TOTAL_NUM; i++) {
980                 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_OFST +
981                                                  i * AMAZON_DMA_CH_STEP) = DEFAULT_OFFSET;
982         }
983 #ifdef DMA_NO_POLLING
984         AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG) = 0;
985 #else
986         // enable poll mode and set polling counter
987         AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG) = DMA_POLLING_CNT | DMA_POLLING_ENABLE;
988 #endif
989         // to enable DMA drop
990         AMAZON_DMA_REG32(AMAZON_DMA_GRX_BUF_LEN) = 0x10000;
991 }
992
993 int insert_dev_list(dev_list * dev)
994 {
995         dev_list *temp_dev;
996         if (g_head_dev == NULL) {
997                 g_head_dev = dev;
998                 g_tail_dev = dev;
999                 dev->prev = NULL;
1000                 dev->next = NULL;
1001         } else {
1002                 for (temp_dev = g_head_dev; temp_dev; temp_dev = temp_dev->next) {
1003                         if (temp_dev->weight < dev->weight) {
1004                                 if (temp_dev->prev)
1005                                         temp_dev->prev->next = dev;
1006
1007                                 dev->prev = temp_dev->prev;
1008                                 dev->next = temp_dev;
1009                                 temp_dev->prev = dev;
1010                                 if (temp_dev == g_head_dev)
1011                                         g_head_dev = dev;
1012                                 break;
1013                         }
1014                 }
1015
1016                 if (!temp_dev) {
1017                         g_tail_dev->next = dev;
1018                         dev->prev = g_tail_dev;
1019                         dev->next = NULL;
1020                         g_tail_dev = dev;
1021                 }
1022
1023         }
1024
1025         return 1;
1026 }
1027
1028 u8 *common_buffer_alloc(int len, int *byte_offset, void **opt)
1029 {
1030         u8 *buffer = (u8 *) kmalloc(len * sizeof(u8), GFP_KERNEL);
1031         *byte_offset = 0;
1032         return buffer;
1033
1034 }
1035
1036 int common_buffer_free(u8 * dataptr, void *opt)
1037 {
1038         if (dataptr)
1039                 kfree(dataptr);
1040         return 0;
1041 }
1042
1043
1044 int register_dev(struct dma_device_info *dma_dev)
1045 {
1046         int i, j, temp;
1047         int burst_reg = 0;
1048         u8 *buffer;
1049         void *p = NULL;
1050         int byte_offset = 0;
1051
1052         struct rx_desc *rx_desc_p;
1053         struct tx_desc *tx_desc_p;
1054         if (strcmp(dma_dev->device_name, "switch1") == 0) {
1055                 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = SWITCH1_RST_MASK; // resest
1056                                                                                                                                 // channel 
1057                                                                                                                                 // 1st 
1058                 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG) |= 0x3;       // endian
1059                                                                                                                         // conversion
1060                                                                                                                         // for Switch
1061                 burst_reg = AMAZON_DMA_SW_BL;
1062                 dma_dev->logic_rx_chan_base = switch_rx_chan_base;
1063                 dma_dev->logic_tx_chan_base = switch_tx_chan_base;
1064         }
1065
1066         else if (strcmp(dma_dev->device_name, "switch2") == 0) {
1067                 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = SWITCH2_RST_MASK; // resest
1068                                                                                                                                 // channel 
1069                                                                                                                                 // 1st
1070                 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG) |= 0x3;       // endian
1071                                                                                                                         // conversion
1072                                                                                                                         // for Switch
1073                 burst_reg = AMAZON_DMA_SW_BL;
1074                 dma_dev->logic_rx_chan_base = switch2_rx_chan_base;
1075                 dma_dev->logic_tx_chan_base = switch2_tx_chan_base;
1076
1077         } else if (strcmp(dma_dev->device_name, "TPE") == 0) {
1078                 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = TPE_RST_MASK;     // resest
1079                                                                                                                         // channel 1st 
1080                                                                                                                         // 
1081                 burst_reg = AMAZON_DMA_TPE_BL;
1082                 dma_dev->logic_rx_chan_base = TPE_rx_chan_base;
1083                 dma_dev->logic_tx_chan_base = TPE_tx_chan_base;
1084         }
1085
1086         else if (strcmp(dma_dev->device_name, "DPlus") == 0) {
1087                 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = DPlus2FPI_RST_MASK;       // resest 
1088                                                                                                                                         // channel 
1089                                                                                                                                         // 1st
1090                 dma_dev->logic_rx_chan_base = DPLus2FPI_rx_chan_base;
1091                 dma_dev->logic_tx_chan_base = DPLus2FPI_tx_chan_base;
1092
1093         }
1094
1095         i = 0;
1096         for (temp = dma_dev->tx_burst_len; temp > 2; temp /= 2) {
1097                 i += 1;
1098         }
1099
1100
1101         AMAZON_DMA_REG32(burst_reg) = i << 1;
1102         i = 0;
1103         for (temp = dma_dev->rx_burst_len; temp > 2; temp /= 2) {
1104                 i += 1;
1105         }
1106         AMAZON_DMA_REG32(burst_reg) += i;
1107
1108         for (i = 0; i < dma_dev->num_rx_chan; i++) {
1109
1110                 temp = dma_dev->logic_rx_chan_base + i;
1111                 g_log_chan[temp].dma_dev = dma_dev;
1112                 g_log_chan[temp].weight = dma_dev->rx_chan[i].weight;
1113                 g_log_chan[temp].default_weight = dma_dev->rx_chan[i].weight;
1114                 g_log_chan[temp].current_desc = 0;
1115                 g_log_chan[temp].desc_ofst = DEFAULT_OFFSET;
1116                 g_log_chan[temp].desc_len = dma_dev->rx_chan[i].desc_num;
1117                 g_log_chan[temp].offset_from_base = temp * DEFAULT_OFFSET;
1118                 g_log_chan[temp].packet_size = dma_dev->rx_chan[i].packet_size;
1119
1120                 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN + temp * AMAZON_DMA_CH_STEP) = dma_dev->rx_chan[i].desc_num;
1121                 // enable interrupt mask
1122                 if (temp == 4 || temp == 5) {
1123                         AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + temp * AMAZON_DMA_CH_STEP) = 0x32;
1124                 } else {
1125                         AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + temp * AMAZON_DMA_CH_STEP) = 0x36;
1126                 }
1127                 strcpy(g_log_chan[temp].device_name, dma_dev->device_name);
1128                 g_log_chan[temp].burst_len = dma_dev->rx_burst_len;
1129                 g_log_chan[temp].control = dma_dev->rx_chan[i].control;
1130
1131
1132                 /* specify the buffer allocation and free method */
1133                 if (dma_dev->buffer_alloc)
1134                         g_log_chan[temp].buffer_alloc = dma_dev->buffer_alloc;
1135                 else
1136                         g_log_chan[temp].buffer_alloc = common_buffer_alloc;
1137
1138                 if (dma_dev->buffer_free)
1139                         g_log_chan[temp].buffer_free = dma_dev->buffer_free;
1140                 else
1141                         g_log_chan[temp].buffer_free = common_buffer_free;
1142
1143                 if (dma_dev->intr_handler)
1144                         g_log_chan[temp].intr_handler = dma_dev->intr_handler;
1145                 else
1146                         g_log_chan[temp].intr_handler = NULL;
1147
1148                 for (j = 0; j < g_log_chan[temp].desc_len; j++) {
1149                         rx_desc_p = (struct rx_desc *) (g_desc_list + g_log_chan[temp].offset_from_base + j);
1150                         rx_desc_p->status.word = 0;
1151                         rx_desc_p->status.field.data_length = g_log_chan[temp].packet_size;
1152                         buffer = (u8 *) g_log_chan[temp].buffer_alloc(g_log_chan[temp].packet_size, &byte_offset, &p);
1153                         rx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) buffer);
1154                         rx_desc_p->status.field.byte_offset = byte_offset;
1155                         /* fix me, should check if the addresss comply with the burst
1156                            lenght requirment */
1157                         g_log_chan[temp].opt[j] = p;
1158                         rx_desc_p->status.field.OWN = DMA_OWN;
1159
1160                 }
1161                 /* open or close the channel */
1162                 if (g_log_chan[temp].control)
1163                         open_channel(temp);
1164                 else
1165                         close_channel(temp);
1166         }
1167
1168         for (i = 0; i < dma_dev->num_tx_chan; i++) {
1169                 temp = dma_dev->logic_tx_chan_base + i;
1170                 g_log_chan[temp].dma_dev = dma_dev;
1171                 g_log_chan[temp].weight = dma_dev->tx_chan[i].weight;
1172                 g_log_chan[temp].default_weight = dma_dev->tx_chan[i].weight;
1173                 g_log_chan[temp].current_desc = 0;
1174                 g_log_chan[temp].desc_ofst = DEFAULT_OFFSET;
1175                 g_log_chan[temp].desc_len = dma_dev->tx_chan[i].desc_num;
1176                 g_log_chan[temp].offset_from_base = temp * DEFAULT_OFFSET;
1177                 g_log_chan[temp].packet_size = dma_dev->tx_chan[i].packet_size;
1178
1179                 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN + temp * AMAZON_DMA_CH_STEP) = dma_dev->tx_chan[i].desc_num;
1180                 // enable interrupt mask
1181 #ifdef NO_TX_INT
1182                 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + temp * AMAZON_DMA_CH_STEP) = 0x3e;
1183 #else
1184                 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK + temp * AMAZON_DMA_CH_STEP) = 0x36;
1185 #endif
1186
1187                 strcpy(g_log_chan[temp].device_name, dma_dev->device_name);
1188                 g_log_chan[temp].burst_len = dma_dev->tx_burst_len;
1189                 g_log_chan[temp].control = dma_dev->tx_chan[i].control;
1190
1191                 if (dma_dev->buffer_alloc)
1192                         g_log_chan[temp].buffer_alloc = dma_dev->buffer_alloc;
1193                 else
1194                         g_log_chan[temp].buffer_alloc = common_buffer_alloc;
1195
1196                 if (dma_dev->buffer_free)
1197                         g_log_chan[temp].buffer_free = dma_dev->buffer_free;
1198                 else
1199                         g_log_chan[temp].buffer_free = common_buffer_free;
1200
1201                 if (dma_dev->intr_handler)
1202                         g_log_chan[temp].intr_handler = dma_dev->intr_handler;
1203                 else
1204                         g_log_chan[temp].intr_handler = NULL;
1205
1206                 for (j = 0; j < g_log_chan[temp].desc_len; j++) {
1207
1208                         tx_desc_p =
1209                                 (struct tx_desc *) (g_desc_list +
1210                                                                         g_log_chan[temp].offset_from_base + j);
1211                         tx_desc_p->status.word = 0;
1212                         tx_desc_p->status.field.data_length =
1213                                 g_log_chan[temp].packet_size;
1214                         tx_desc_p->status.field.OWN = CPU_OWN;
1215
1216                 }
1217                 /* workaround DMA pitfall, we never turn on channel if we don't
1218                    have proper descriptors */
1219                 if (!g_log_chan[temp].control) {
1220                         close_channel(temp);
1221                 }
1222
1223         }
1224
1225         return 0;
1226 }
1227
1228 int dma_device_register(struct dma_device_info *dma_dev)
1229 {
1230         dev_list *temp_dev;
1231         temp_dev = (dev_list *) kmalloc(sizeof(dev_list), GFP_KERNEL);
1232         temp_dev->dev = dma_dev;
1233         temp_dev->weight = dma_dev->weight;
1234         insert_dev_list(temp_dev);
1235         /* check whether this is a known device */
1236         if ((strcmp(dma_dev->device_name, "switch1") == 0)
1237                 || (strcmp(dma_dev->device_name, "TPE") == 0)
1238                 || (strcmp(dma_dev->device_name, "switch2") == 0)
1239                 || (strcmp(dma_dev->device_name, "DPlus") == 0)) {
1240                 register_dev(dma_dev);
1241         }
1242
1243         return 0;
1244 }
1245
1246
1247 int unregister_dev(struct dma_device_info *dma_dev)
1248 {
1249         int i, j, temp;
1250         u8 *buffer;
1251         struct rx_desc *rx_desc_p;
1252
1253         for (i = 0; i < dma_dev->num_rx_chan; i++) {
1254                 temp = dma_dev->logic_rx_chan_base + i;
1255                 close_channel(temp);
1256                 for (j = 0; j < g_log_chan[temp].desc_len; j++) {
1257                         rx_desc_p =
1258                                 (struct rx_desc *) (g_desc_list +
1259                                                                         g_log_chan[temp].offset_from_base + j);
1260                         buffer = (u8 *) __va(rx_desc_p->Data_Pointer);
1261                         g_log_chan[temp].buffer_free(buffer, g_log_chan[temp].opt[j]);
1262                 }
1263         }
1264         for (i = 0; i < dma_dev->num_tx_chan; i++) {
1265                 temp = dma_dev->logic_tx_chan_base + i;
1266                 close_channel(temp);
1267         }
1268         return 0;
1269 }
1270
1271 int dma_device_unregister(struct dma_device_info *dev)
1272 {
1273         dev_list *temp_dev;
1274         for (temp_dev = g_head_dev; temp_dev; temp_dev = temp_dev->next) {
1275                 if (strcmp(dev->device_name, temp_dev->dev->device_name) == 0) {
1276                         if ((strcmp(dev->device_name, "switch1") == 0)
1277                                 || (strcmp(dev->device_name, "TPE") == 0)
1278                                 || (strcmp(dev->device_name, "switch2") == 0)
1279                                 || (strcmp(dev->device_name, "DPlus") == 0))
1280                                 unregister_dev(dev);
1281                         if (temp_dev == g_head_dev) {
1282                                 g_head_dev = temp_dev->next;
1283                                 kfree(temp_dev);
1284                         } else {
1285                                 if (temp_dev == g_tail_dev)
1286                                         g_tail_dev = temp_dev->prev;
1287                                 if (temp_dev->prev)
1288                                         temp_dev->prev->next = temp_dev->next;
1289                                 if (temp_dev->next)
1290                                         temp_dev->next->prev = temp_dev->prev;
1291                                 kfree(temp_dev);
1292                         }
1293                         break;
1294                 }
1295
1296         }
1297         return 0;
1298 }
1299
1300 void dma_device_update_rx(struct dma_device_info *dma_dev)
1301 {
1302         int i, temp;
1303         for (i = 0; i < dma_dev->num_rx_chan; i++) {
1304                 temp = dma_dev->logic_rx_chan_base + i;
1305                 g_log_chan[temp].control = dma_dev->rx_chan[i].control;
1306
1307                 if (g_log_chan[temp].control)
1308                         open_channel(temp);
1309                 else
1310                         close_channel(temp);
1311         }
1312
1313 }
1314
1315 void dma_device_update_tx(struct dma_device_info *dma_dev)
1316 {
1317         int i, temp;
1318         for (i = 0; i < dma_dev->num_tx_chan; i++) {
1319                 temp = dma_dev->logic_tx_chan_base + i;
1320                 g_log_chan[temp].control = dma_dev->tx_chan[i].control;
1321                 if (g_log_chan[temp].control) {
1322                         /* we turn on channel when send out the very first packet */
1323                         // open_channel(temp);
1324                 } else
1325                         close_channel(temp);
1326         }
1327 }
1328
1329 int dma_device_update(struct dma_device_info *dma_dev)
1330 {
1331         dma_device_update_rx(dma_dev);
1332         dma_device_update_tx(dma_dev);
1333         return 0;
1334 }
1335
1336 static int dma_open(struct inode *inode, struct file *file)
1337 {
1338         return 0;
1339 }
1340
1341 static int dma_release(struct inode *inode, struct file *file)
1342 {
1343         /* release the resources */
1344         return 0;
1345 }
1346
1347 static int dma_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1348 {
1349         int value = 0;
1350         int result = 0;
1351         int chan_no = 0;
1352
1353         switch (cmd) {
1354         case 0:                                 /* get register value */
1355                 break;
1356         case 1:                                 /* return channel weight */
1357                 chan_no = *((int *) arg);
1358                 *((int *) arg + 1) = g_log_chan[chan_no].default_weight;
1359                 break;
1360         case 2:                                 /* set channel weight */
1361                 chan_no = *((int *) arg);
1362                 value = *((int *) arg + 1);
1363                 printk("new weight=%08x\n", value);
1364                 g_log_chan[chan_no].default_weight = value;
1365                 break;
1366         default:
1367                 break;
1368         }
1369         return result;
1370 }
1371
1372
1373 static struct file_operations dma_fops = {
1374   owner:THIS_MODULE,
1375   open:dma_open,
1376   release:dma_release,
1377   ioctl:dma_ioctl,
1378 };
1379
1380 static int dma_init(void)
1381 {
1382         int result = 0;
1383         int i;
1384         printk("initialising dma core\n");
1385         result = register_chrdev(DMA_MAJOR, "dma-core", &dma_fops);
1386         if (result) {
1387                 AMAZON_DMA_EMSG("cannot register device dma-core!\n");
1388                 return result;
1389         }
1390         result = request_irq(AMAZON_DMA_INT, dma_interrupt, SA_INTERRUPT, "dma-core", (void *) &dma_interrupt);
1391         if (result) {
1392                 AMAZON_DMA_EMSG("error, cannot get dma_irq!\n");
1393                 free_irq(AMAZON_DMA_INT, (void *) &dma_interrupt);
1394                 return -EFAULT;
1395         }
1396
1397         g_desc_list = (u64 *) KSEG1ADDR(__get_free_page(GFP_DMA));
1398
1399         if (g_desc_list == NULL) {
1400                 AMAZON_DMA_EMSG("no memory for desriptor\n");
1401                 return -ENOMEM;
1402         }
1403         memset(g_desc_list, 0, PAGE_SIZE);
1404         AMAZON_DMA_REG32(AMAZON_DMA_Desc_BA) = (u32) CPHYSADDR((u32) g_desc_list);
1405         g_amazon_dma_dir = proc_mkdir("amazon_dma", NULL);
1406         create_proc_read_entry("dma_register", 0, g_amazon_dma_dir, dma_register_proc_read, NULL);
1407         create_proc_read_entry("g_desc_list", 0, g_amazon_dma_dir, desc_list_proc_read, NULL);
1408         create_proc_read_entry("channel_weight", 0, g_amazon_dma_dir, channel_weight_proc_read, NULL);
1409
1410         dma_chip_init();
1411         for (i = 0; i < (RX_CHAN_NUM + 1); i++) {
1412                 rx_chan_list[i] = -1;
1413         }
1414         for (i = 0; i < (TX_CHAN_NUM + 1); i++) {
1415                 tx_chan_list[i] = -1;
1416         }
1417
1418         for (i = 0; i < CHAN_TOTAL_NUM; i++) {
1419                 comb_isr_mask[i] = 0x80000000 >> (i);
1420         }
1421
1422         g_log_chan[CHAN_TOTAL_NUM].weight = 0;
1423         printk("initialising dma core ... done\n");
1424
1425         return 0;
1426 }
1427
1428 arch_initcall(dma_init);
1429
1430
1431 void dma_cleanup(void)
1432 {
1433         dev_list *temp_dev;
1434
1435         unregister_chrdev(DMA_MAJOR, "dma-core");
1436         for (temp_dev = g_head_dev; temp_dev; temp_dev = temp_dev->next) {
1437                 kfree(temp_dev);
1438         }
1439         free_page(KSEG0ADDR((unsigned long) g_desc_list));
1440         remove_proc_entry("channel_weight", g_amazon_dma_dir);
1441         remove_proc_entry("dma_list", g_amazon_dma_dir);
1442         remove_proc_entry("dma_register", g_amazon_dma_dir);
1443         remove_proc_entry("amazon_dma", NULL);
1444         /* release the resources */
1445         free_irq(AMAZON_DMA_INT, (void *) &dma_interrupt);
1446 }
1447
1448 EXPORT_SYMBOL(dma_device_register);
1449 EXPORT_SYMBOL(dma_device_unregister);
1450 EXPORT_SYMBOL(dma_device_read);
1451 EXPORT_SYMBOL(dma_device_write);
1452 EXPORT_SYMBOL(dma_device_update);
1453 EXPORT_SYMBOL(dma_device_update_rx);
1454
1455 MODULE_LICENSE("GPL");