1 #include <linux/atmdev.h>
6 void mailbox_signal(unsigned int channel, int is_tx)
10 while(MBOX_IGU3_ISR_ISR(channel + 16));
11 *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(channel + 16);
13 while(MBOX_IGU3_ISR_ISR(channel));
14 *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(channel);
18 static int mailbox_rx_irq_handler(unsigned int channel, unsigned int *len)
22 register struct rx_descriptor reg_desc;
23 struct rx_descriptor *desc;
26 struct rx_inband_trailer *trailer;
28 /* get sk_buff pointer and descriptor */
29 skb_base = ppe_dev.dma.rx_descriptor_number * channel + ppe_dev.dma.rx_desc_read_pos[channel];
30 desc = &ppe_dev.dma.rx_descriptor_base[skb_base];
32 if ( reg_desc.own || !reg_desc.c )
35 if ( ++ppe_dev.dma.rx_desc_read_pos[channel] == ppe_dev.dma.rx_descriptor_number )
36 ppe_dev.dma.rx_desc_read_pos[channel] = 0;
38 skb = *(struct sk_buff **)((((u32)reg_desc.dataptr << 2) | KSEG0) - 4);
39 if ( (u32)skb <= 0x80000000 )
42 printk("skb problem: skb = %08X, system is panic!\n", (u32)skb);
48 if ( conn == ppe_dev.oam_rx_queue )
51 struct uni_cell_header *header = (struct uni_cell_header *)skb->data;
53 if ( header->pti == ATM_PTI_SEGF5 || header->pti == ATM_PTI_E2EF5 )
54 conn = find_vpivci(header->vpi, header->vci);
55 else if ( header->vci == 0x03 || header->vci == 0x04 )
56 conn = find_vpi(header->vpi);
60 if ( conn >= 0 && ppe_dev.connection[conn].vcc != NULL )
62 vcc = ppe_dev.connection[conn].vcc;
63 ppe_dev.connection[conn].access_time = xtime;
64 if ( vcc->push_oam != NULL )
65 vcc->push_oam(vcc, skb->data);
68 /* don't need resize */
75 if ( ppe_dev.connection[conn].vcc != NULL )
77 vcc = ppe_dev.connection[conn].vcc;
80 if ( vcc->qos.aal == ATM_AAL5 )
83 resize_skb_rx(skb, reg_desc.datalen + reg_desc.byteoff, 0);
84 skb_reserve(skb, reg_desc.byteoff);
85 skb_put(skb, reg_desc.datalen);
87 if ( (u32)ATM_SKB(skb) <= 0x80000000 )
90 printk("ATM_SKB(skb) problem: ATM_SKB(skb) = %08X, system is panic!\n", (u32)ATM_SKB(skb));
93 ATM_SKB(skb)->vcc = vcc;
94 ppe_dev.connection[conn].access_time = xtime;
95 if ( atm_charge(vcc, skb->truesize) )
97 struct sk_buff *new_skb;
99 new_skb = alloc_skb_rx();
103 UPDATE_VCC_STAT(conn, rx_pdu, 1);
105 ppe_dev.mib.wrx_pdu++;
107 atomic_inc(&vcc->stats->rx);
110 struct k_atm_aal_stats stats = *vcc->stats;
114 if ( vcc->stats->rx.counter != stats.rx.counter )
116 printk("vcc->stats->rx (diff) = %d", vcc->stats->rx.counter - stats.rx.counter);
119 if ( vcc->stats->rx_err.counter != stats.rx_err.counter )
121 printk("vcc->stats->rx_err (diff) = %d", vcc->stats->rx_err.counter - stats.rx_err.counter);
124 if ( vcc->stats->rx_drop.counter != stats.rx_drop.counter )
126 printk("vcc->stats->rx_drop (diff) = %d", vcc->stats->rx_drop.counter - stats.rx_drop.counter);
129 if ( vcc->stats->tx.counter != stats.tx.counter )
131 printk("vcc->stats->tx (diff) = %d", vcc->stats->tx.counter - stats.tx.counter);
134 if ( vcc->stats->tx_err.counter != stats.tx_err.counter )
136 printk("vcc->stats->tx_err (diff) = %d", vcc->stats->tx_err.counter - stats.tx_err.counter);
140 printk("vcc->stats not changed");
142 reg_desc.dataptr = (u32)new_skb->data >> 2;
145 *len = reg_desc.datalen;
150 UPDATE_VCC_STAT(conn, rx_sw_drop_pdu, 1);
152 ppe_dev.mib.wrx_drop_pdu++;
154 atomic_inc(&vcc->stats->rx_drop);
156 resize_skb_rx(skb, ppe_dev.aal5.rx_buffer_size, 0);
161 /* no enough space */
162 UPDATE_VCC_STAT(conn, rx_sw_drop_pdu, 1);
164 ppe_dev.mib.wrx_drop_pdu++;
166 atomic_inc(&vcc->stats->rx_drop);
168 resize_skb_rx(skb, ppe_dev.aal5.rx_buffer_size, 0);
174 resize_skb_rx(skb, CELL_SIZE, 1);
175 skb_put(skb, CELL_SIZE);
177 ATM_SKB(skb)->vcc = vcc;
178 ppe_dev.connection[conn].access_time = xtime;
179 if ( atm_charge(vcc, skb->truesize) )
181 struct sk_buff *new_skb;
183 new_skb = alloc_skb_rx();
187 atomic_inc(&vcc->stats->rx);
189 reg_desc.dataptr = (u32)new_skb->data >> 2;
197 atomic_inc(&vcc->stats->rx_drop);
198 resize_skb_rx(skb, ppe_dev.aal5.rx_buffer_size, 0);
204 atomic_inc(&vcc->stats->rx_drop);
205 resize_skb_rx(skb, ppe_dev.aal5.rx_buffer_size, 0);
210 printk("reg_desc.err\n");
212 /* drop packet/cell */
213 if ( vcc->qos.aal == ATM_AAL5 )
215 UPDATE_VCC_STAT(conn, rx_err_pdu, 1);
217 trailer = (struct rx_inband_trailer *)((u32)skb->data + ((reg_desc.byteoff + reg_desc.datalen + DMA_ALIGNMENT - 1) & ~ (DMA_ALIGNMENT - 1)));
218 if ( trailer->stw_crc )
219 ppe_dev.connection[conn].aal5_vcc_crc_err++;
220 if ( trailer->stw_ovz )
221 ppe_dev.connection[conn].aal5_vcc_oversize_sdu++;
224 atomic_inc(&vcc->stats->rx_err);
225 /* don't need resize */
230 printk("ppe_dev.connection[%d].vcc == NULL\n", conn);
232 ppe_dev.mib.wrx_drop_pdu++;
234 /* don't need resize */
238 reg_desc.byteoff = 0;
239 reg_desc.datalen = ppe_dev.aal5.rx_buffer_size;
243 /* write discriptor to memory */
246 printk("leave mailbox_rx_irq_handler");
251 static inline void mailbox_tx_irq_handler(unsigned int conn)
253 if ( ppe_dev.dma.tx_desc_alloc_flag[conn] )
259 release_pos = &ppe_dev.dma.tx_desc_release_pos[conn];
260 desc_base = ppe_dev.dma.tx_descriptor_number * (conn - QSB_QUEUE_NUMBER_BASE) + *release_pos;
261 while ( !ppe_dev.dma.tx_descriptor_base[desc_base].own )
263 skb = ppe_dev.dma.tx_skb_pointers[desc_base];
265 ppe_dev.dma.tx_descriptor_base[desc_base].own = 1; // pretend PP32 hold owner bit, so that won't be released more than once, so allocation process don't check this bit
267 if ( ++*release_pos == ppe_dev.dma.tx_descriptor_number )
270 if ( *release_pos == ppe_dev.dma.tx_desc_alloc_pos[conn] )
272 ppe_dev.dma.tx_desc_alloc_flag[conn] = 0;
274 atm_free_tx_skb_vcc(skb);
278 if ( *release_pos == 0 )
279 desc_base = ppe_dev.dma.tx_descriptor_number * (conn - QSB_QUEUE_NUMBER_BASE);
283 atm_free_tx_skb_vcc(skb);
288 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
289 static inline int check_desc_valid(unsigned int channel)
292 struct rx_descriptor *desc;
294 skb_base = ppe_dev.dma.rx_descriptor_number * channel + ppe_dev.dma.rx_desc_read_pos[channel];
295 desc = &ppe_dev.dma.rx_descriptor_base[skb_base];
296 return !desc->own && desc->c ? 1 : 0;
300 irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
302 int channel_mask; /* DMA channel accordant IRQ bit mask */
304 unsigned int rx_irq_number[MAX_RX_DMA_CHANNEL_NUMBER] = {0};
305 unsigned int total_rx_irq_number = 0;
307 printk("mailbox_irq_handler");
309 if ( !*MBOX_IGU1_ISR )
310 return IRQ_RETVAL(1);
314 while ( channel < ppe_dev.dma.rx_total_channel_used )
316 if ( (*MBOX_IGU1_ISR & channel_mask) )
320 *MBOX_IGU1_ISRC = channel_mask;
321 printk(" RX: *MBOX_IGU1_ISR = 0x%08X\n", *MBOX_IGU1_ISR);
322 /* wait for mailbox cleared */
323 while ( (*MBOX_IGU3_ISR & channel_mask) );
325 /* shadow the number of valid descriptor */
326 rx_irq_number[channel] = WRX_DMA_CHANNEL_CONFIG(channel)->vlddes;
328 total_rx_irq_number += rx_irq_number[channel];
330 printk("total_rx_irq_number = %d", total_rx_irq_number);
331 printk("vlddes = %d, rx_irq_number[%d] = %d, total_rx_irq_number = %d\n", WRX_DMA_CHANNEL_CONFIG(channel)->vlddes, channel, rx_irq_number[channel], total_rx_irq_number);
338 channel_mask = 1 << (16 + QSB_QUEUE_NUMBER_BASE);
339 channel = QSB_QUEUE_NUMBER_BASE;
340 while ( channel - QSB_QUEUE_NUMBER_BASE < ppe_dev.dma.tx_total_channel_used )
342 if ( (*MBOX_IGU1_ISR & channel_mask) )
344 // if ( channel != 1 )
346 printk("TX irq error\n");
353 *MBOX_IGU1_ISRC = channel_mask;
354 printk(" TX: *MBOX_IGU1_ISR = 0x%08X\n", *MBOX_IGU1_ISR);
355 mailbox_tx_irq_handler(channel);
362 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
364 while ( total_rx_irq_number )
370 /* handle it as soon as possible */
371 while ( rx_irq_number[channel] != 0 && mailbox_rx_irq_handler(channel, NULL) == 0 )
373 rx_irq_number[channel]--;
374 total_rx_irq_number--;
375 printk("RX_DMA_CH_CBR, total_rx_irq_number = %d", total_rx_irq_number);
376 printk("RX_DMA_CH_CBR, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
377 /* signal firmware that descriptor is updated */
378 mailbox_signal(channel, 0);
380 // if ( rx_irq_number[channel] != 0 )
381 printk("RX_DMA_CH_CBR, rx_irq_number[channel] = %d", rx_irq_number[channel]);
383 case RX_DMA_CH_VBR_RT:
385 if ( rx_irq_number[RX_DMA_CH_VBR_RT] != 0
386 && (rx_irq_number[RX_DMA_CH_VBR_NRT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_NRT) || ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] < ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT])
387 && (rx_irq_number[RX_DMA_CH_AVR] == 0 || !check_desc_valid(RX_DMA_CH_AVR) || ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] < ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT])
392 if ( mailbox_rx_irq_handler(RX_DMA_CH_VBR_RT, &len) == 0 )
394 rx_irq_number[RX_DMA_CH_VBR_RT]--;
395 total_rx_irq_number--;
396 printk("RX_DMA_CH_VBR_RT, total_rx_irq_number = %d", total_rx_irq_number);
397 printk("RX_DMA_CH_VBR_RT, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
398 /* signal firmware that descriptor is updated */
399 mailbox_signal(channel, 0);
401 len = (len + CELL_SIZE - 1) / CELL_SIZE;
402 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] <= len )
403 ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_RT] + ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] - len;
406 // if ( rx_irq_number[channel] != 0 )
408 printk("RX_DMA_CH_VBR_RT, rx_irq_number[channel] = %d, total_rx_irq_number = %d", rx_irq_number[channel], total_rx_irq_number);
409 // rx_irq_number[channel] = 0;
410 // total_rx_irq_number = 0;
413 case RX_DMA_CH_VBR_NRT:
415 if ( rx_irq_number[RX_DMA_CH_VBR_NRT] != 0
416 && (rx_irq_number[RX_DMA_CH_VBR_RT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_RT) || ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] < ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT])
417 && (rx_irq_number[RX_DMA_CH_AVR] == 0 || !check_desc_valid(RX_DMA_CH_AVR) || ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] < ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT])
422 if ( mailbox_rx_irq_handler(RX_DMA_CH_VBR_NRT, &len) == 0 )
424 rx_irq_number[RX_DMA_CH_VBR_NRT]--;
425 total_rx_irq_number--;
426 printk("RX_DMA_CH_VBR_NRT, total_rx_irq_number = %d", total_rx_irq_number);
427 printk("RX_DMA_CH_VBR_NRT, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
428 /* signal firmware that descriptor is updated */
429 mailbox_signal(channel, 0);
431 len = (len + CELL_SIZE - 1) / CELL_SIZE;
432 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] <= len )
433 ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_NRT] + ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] - len;
436 // if ( rx_irq_number[channel] != 0 )
437 printk("RX_DMA_CH_VBR_NRT, rx_irq_number[channel] = %d", rx_irq_number[channel]);
441 if ( rx_irq_number[RX_DMA_CH_AVR] != 0
442 && (rx_irq_number[RX_DMA_CH_VBR_RT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_RT) || ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] < ppe_dev.dma.rx_weight[RX_DMA_CH_AVR])
443 && (rx_irq_number[RX_DMA_CH_VBR_NRT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_NRT) || ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] < ppe_dev.dma.rx_weight[RX_DMA_CH_AVR])
448 if ( mailbox_rx_irq_handler(RX_DMA_CH_AVR, &len) == 0 )
450 rx_irq_number[RX_DMA_CH_AVR]--;
451 total_rx_irq_number--;
452 printk("RX_DMA_CH_AVR, total_rx_irq_number = %d", total_rx_irq_number);
453 printk("RX_DMA_CH_AVR, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
454 /* signal firmware that descriptor is updated */
455 mailbox_signal(channel, 0);
457 len = (len + CELL_SIZE - 1) / CELL_SIZE;
458 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] <= len )
459 ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_AVR] + ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] - len;
462 // if ( rx_irq_number[channel] != 0 )
463 printk("RX_DMA_CH_AVR, rx_irq_number[channel] = %d", rx_irq_number[channel]);
467 /* Handle it when all others are handled or others are not available to handle. */
468 if ( rx_irq_number[channel] != 0
469 && (rx_irq_number[RX_DMA_CH_VBR_RT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_RT))
470 && (rx_irq_number[RX_DMA_CH_VBR_NRT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_NRT))
471 && (rx_irq_number[RX_DMA_CH_AVR] == 0 || !check_desc_valid(RX_DMA_CH_AVR)) )
472 if ( mailbox_rx_irq_handler(channel, NULL) == 0 )
474 rx_irq_number[channel]--;
475 total_rx_irq_number--;
476 printk("RX_DMA_CH_UBR, total_rx_irq_number = %d, rx_irq_number[%d] = %d", total_rx_irq_number, channel, rx_irq_number[channel]);
477 printk("RX_DMA_CH_UBR, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
478 /* signal firmware that descriptor is updated */
479 mailbox_signal(channel, 0);
481 printk("RX_DMA_CH_UBR, rx_irq_number[channel] = %d", rx_irq_number[channel]);
484 if ( ++channel == ppe_dev.dma.rx_total_channel_used )
489 while ( total_rx_irq_number )
491 while ( rx_irq_number[channel] != 0 && mailbox_rx_irq_handler(channel, NULL) == 0 )
493 rx_irq_number[channel]--;
494 total_rx_irq_number--;
495 /* signal firmware that descriptor is updated */
496 mailbox_signal(channel, 0);
499 if ( ++channel == ppe_dev.dma.rx_total_channel_used )
502 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
503 return IRQ_RETVAL(1);