1 /******************************************************************************
3 ** FILE NAME : ifxmips_ptm_vdsl.c
9 ** DESCRIPTION : PTM driver common source file (core functions for VR9)
10 ** COPYRIGHT : Copyright (c) 2006
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
20 ** $Date $Author $Comment
21 ** 07 JUL 2009 Xu Liang Init Version
22 *******************************************************************************/
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/ctype.h>
29 #include <linux/errno.h>
30 #include <linux/proc_fs.h>
31 #include <linux/init.h>
32 #include <linux/ioctl.h>
33 #include <linux/etherdevice.h>
34 #include <linux/interrupt.h>
36 #include "ifxmips_ptm_vdsl.h"
37 #include <lantiq_soc.h>
39 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
40 #define MODULE_PARM(a, b) module_param(a, int, 0)
42 static int wanqos_en = 0;
43 static int queue_gamma_map[4] = {0xFE, 0x01, 0x00, 0x00};
45 MODULE_PARM(wanqos_en, "i");
46 MODULE_PARM_DESC(wanqos_en, "WAN QoS support, 1 - enabled, 0 - disabled.");
48 MODULE_PARM_ARRAY(queue_gamma_map, "4-4i");
49 MODULE_PARM_DESC(queue_gamma_map, "TX QoS queues mapping to 4 TX Gamma interfaces.");
51 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
52 extern int (*ifx_mei_atm_showtime_exit)(void);
53 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
55 static int g_showtime = 0;
56 static void *g_xdata_addr = NULL;
59 #define ENABLE_TMP_DBG 0
61 unsigned long cgu_get_pp32_clock(void)
63 struct clk *c = clk_get_ppe();
64 unsigned long rate = clk_get_rate(c);
69 static void ptm_setup(struct net_device *);
70 static struct net_device_stats *ptm_get_stats(struct net_device *);
71 static int ptm_open(struct net_device *);
72 static int ptm_stop(struct net_device *);
73 static unsigned int ptm_poll(int, unsigned int);
74 static int ptm_napi_poll(struct napi_struct *, int);
75 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
76 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
77 static void ptm_tx_timeout(struct net_device *);
79 static inline struct sk_buff* alloc_skb_rx(void);
80 static inline struct sk_buff* alloc_skb_tx(unsigned int);
81 static inline struct sk_buff *get_skb_pointer(unsigned int);
82 static inline int get_tx_desc(unsigned int, unsigned int *);
85 * Mailbox handler and signal function
87 static irqreturn_t mailbox_irq_handler(int, void *);
90 * Tasklet to Handle Swap Descriptors
92 static void do_swap_desc_tasklet(unsigned long);
96 * Init & clean-up functions
98 static inline int init_priv_data(void);
99 static inline void clear_priv_data(void);
100 static inline int init_tables(void);
101 static inline void clear_tables(void);
103 static int g_wanqos_en = 0;
105 static int g_queue_gamma_map[4];
107 static struct ptm_priv_data g_ptm_priv_data;
109 static struct net_device_ops g_ptm_netdev_ops = {
110 .ndo_get_stats = ptm_get_stats,
111 .ndo_open = ptm_open,
112 .ndo_stop = ptm_stop,
113 .ndo_start_xmit = ptm_hard_start_xmit,
114 .ndo_validate_addr = eth_validate_addr,
115 .ndo_set_mac_address = eth_mac_addr,
116 .ndo_change_mtu = eth_change_mtu,
117 .ndo_do_ioctl = ptm_ioctl,
118 .ndo_tx_timeout = ptm_tx_timeout,
121 static struct net_device *g_net_dev[1] = {0};
122 static char *g_net_dev_name[1] = {"ptm0"};
124 static int g_ptm_prio_queue_map[8];
126 static DECLARE_TASKLET(g_swap_desc_tasklet, do_swap_desc_tasklet, 0);
129 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
132 * ####################################
134 * ####################################
137 static void ptm_setup(struct net_device *dev)
140 dev->netdev_ops = &g_ptm_netdev_ops;
141 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 16);
142 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
144 dev->dev_addr[0] = 0x00;
145 dev->dev_addr[1] = 0x20;
146 dev->dev_addr[2] = 0xda;
147 dev->dev_addr[3] = 0x86;
148 dev->dev_addr[4] = 0x23;
149 dev->dev_addr[5] = 0x75 + ndev;
152 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
154 struct net_device_stats *s;
156 if ( dev != g_net_dev[0] )
158 s = &g_ptm_priv_data.itf[0].stats;
163 static int ptm_open(struct net_device *dev)
165 ASSERT(dev == g_net_dev[0], "incorrect device");
167 napi_enable(&g_ptm_priv_data.itf[0].napi);
169 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
171 netif_start_queue(dev);
176 static int ptm_stop(struct net_device *dev)
178 ASSERT(dev == g_net_dev[0], "incorrect device");
180 IFX_REG_W32_MASK(1 | (1 << 17), 0, MBOX_IGU1_IER);
182 napi_disable(&g_ptm_priv_data.itf[0].napi);
184 netif_stop_queue(dev);
189 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
191 unsigned int work_done = 0;
192 volatile struct rx_descriptor *desc;
193 struct rx_descriptor reg_desc;
194 struct sk_buff *skb, *new_skb;
196 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
198 while ( work_done < work_to_do ) {
199 desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
200 if ( desc->own /* || !desc->c */ ) // if PP32 hold descriptor or descriptor not completed
202 if ( ++g_ptm_priv_data.itf[0].rx_desc_pos == WAN_RX_DESC_NUM )
203 g_ptm_priv_data.itf[0].rx_desc_pos = 0;
206 skb = get_skb_pointer(reg_desc.dataptr);
207 ASSERT(skb != NULL, "invalid pointer skb == NULL");
209 new_skb = alloc_skb_rx();
210 if ( new_skb != NULL ) {
211 skb_reserve(skb, reg_desc.byteoff);
212 skb_put(skb, reg_desc.datalen);
214 // parse protocol header
215 skb->dev = g_net_dev[0];
216 skb->protocol = eth_type_trans(skb, skb->dev);
218 g_net_dev[0]->last_rx = jiffies;
220 netif_receive_skb(skb);
222 g_ptm_priv_data.itf[0].stats.rx_packets++;
223 g_ptm_priv_data.itf[0].stats.rx_bytes += reg_desc.datalen;
225 reg_desc.dataptr = (unsigned int)new_skb->data & 0x0FFFFFFF;
226 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
229 reg_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
233 /* write discriptor to memory */
234 *((volatile unsigned int *)desc + 1) = *((unsigned int *)®_desc + 1);
236 *(volatile unsigned int *)desc = *(unsigned int *)®_desc;
244 static int ptm_napi_poll(struct napi_struct *napi, int budget)
247 unsigned int work_done;
249 work_done = ptm_poll(ndev, budget);
252 if ( !netif_running(napi->dev) ) {
258 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_ISRC);
260 if (work_done < budget) {
262 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
270 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
274 volatile struct tx_descriptor *desc;
275 struct tx_descriptor reg_desc = {0};
276 struct sk_buff *skb_to_free;
277 unsigned int byteoff;
279 ASSERT(dev == g_net_dev[0], "incorrect device");
282 err("not in showtime");
283 goto PTM_HARD_START_XMIT_FAIL;
286 /* allocate descriptor */
287 desc_base = get_tx_desc(0, &f_full);
289 dev->trans_start = jiffies;
290 netif_stop_queue(dev);
292 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_ISRC);
293 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_IER);
296 goto PTM_HARD_START_XMIT_FAIL;
297 desc = &CPU_TO_WAN_TX_DESC_BASE[desc_base];
299 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
300 if ( skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff || skb_cloned(skb) ) {
301 struct sk_buff *new_skb;
303 ASSERT(skb_headroom(skb) >= sizeof(struct sk_buff *) + byteoff, "skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff");
304 ASSERT(!skb_cloned(skb), "skb is cloned");
306 new_skb = alloc_skb_tx(skb->len);
307 if ( new_skb == NULL ) {
309 goto ALLOC_SKB_TX_FAIL;
311 skb_put(new_skb, skb->len);
312 memcpy(new_skb->data, skb->data, skb->len);
313 dev_kfree_skb_any(skb);
315 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
316 /* write back to physical memory */
317 dma_cache_wback((unsigned long)skb->data, skb->len);
320 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
321 /* write back to physical memory */
322 dma_cache_wback((unsigned long)skb->data - byteoff - sizeof(struct sk_buff *), skb->len + byteoff + sizeof(struct sk_buff *));
324 /* free previous skb */
325 skb_to_free = get_skb_pointer(desc->dataptr);
326 if ( skb_to_free != NULL )
327 dev_kfree_skb_any(skb_to_free);
329 /* update descriptor */
331 reg_desc.dataptr = (unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1));
332 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
333 reg_desc.qid = g_ptm_prio_queue_map[skb->priority > 7 ? 7 : skb->priority];
334 reg_desc.byteoff = byteoff;
337 reg_desc.sop = reg_desc.eop = 1;
340 g_ptm_priv_data.itf[0].stats.tx_packets++;
341 g_ptm_priv_data.itf[0].stats.tx_bytes += reg_desc.datalen;
343 /* write discriptor to memory */
344 *((volatile unsigned int *)desc + 1) = *((unsigned int *)®_desc + 1);
346 *(volatile unsigned int *)desc = *(unsigned int *)®_desc;
348 dev->trans_start = jiffies;
353 PTM_HARD_START_XMIT_FAIL:
354 dev_kfree_skb_any(skb);
355 g_ptm_priv_data.itf[0].stats.tx_dropped++;
359 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
361 ASSERT(dev == g_net_dev[0], "incorrect device");
365 case IFX_PTM_MIB_CW_GET:
366 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = IFX_REG_R32(DREG_AR_CELL0) + IFX_REG_R32(DREG_AR_CELL1);
367 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = IFX_REG_R32(DREG_AR_IDLE_CNT0) + IFX_REG_R32(DREG_AR_IDLE_CNT1);
368 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = IFX_REG_R32(DREG_AR_CVN_CNT0) + IFX_REG_R32(DREG_AR_CVN_CNT1) + IFX_REG_R32(DREG_AR_CVNP_CNT0) + IFX_REG_R32(DREG_AR_CVNP_CNT1);
369 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = IFX_REG_R32(DREG_AT_CELL0) + IFX_REG_R32(DREG_AT_CELL1);
370 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = IFX_REG_R32(DREG_AT_IDLE_CNT0) + IFX_REG_R32(DREG_AT_IDLE_CNT1);
372 case IFX_PTM_MIB_FRAME_GET:
374 PTM_FRAME_MIB_T data = {0};
377 data.RxCorrect = IFX_REG_R32(DREG_AR_HEC_CNT0) + IFX_REG_R32(DREG_AR_HEC_CNT1) + IFX_REG_R32(DREG_AR_AIIDLE_CNT0) + IFX_REG_R32(DREG_AR_AIIDLE_CNT1);
378 for ( i = 0; i < 4; i++ )
379 data.RxDropped += WAN_RX_MIB_TABLE(i)->wrx_dropdes_pdu;
380 for ( i = 0; i < 8; i++ )
381 data.TxSend += WAN_TX_MIB_TABLE(i)->wtx_total_pdu;
383 *((PTM_FRAME_MIB_T *)ifr->ifr_data) = data;
386 case IFX_PTM_CFG_GET:
387 // use bear channel 0 preemption gamma interface settings
388 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = 1;
389 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_eth_fcs_ver_dis == 0 ? 1 : 0;
390 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis == 0 ? 1 : 0;;
391 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size == 0 ? 0 : (RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size * 16);
392 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis == 0 ? 1 : 0;
393 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : 1;
394 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : (TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size * 16);
396 case IFX_PTM_CFG_SET:
400 for ( i = 0; i < 4; i++ ) {
401 RX_GAMMA_ITF_CFG(i)->rx_eth_fcs_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 0 : 1;
403 RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck ? 0 : 1;
405 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen ) {
406 case 16: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 1; break;
407 case 32: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 2; break;
408 default: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 0;
411 TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 0 : 1;
413 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen ) {
414 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen ) {
415 case 16: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 1; break;
416 case 32: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 2; break;
417 default: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
421 TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
425 case IFX_PTM_MAP_PKT_PRIO_TO_Q:
427 struct ppe_prio_q_map cmd;
429 if ( copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)) )
432 if ( cmd.pkt_prio < 0 || cmd.pkt_prio >= ARRAY_SIZE(g_ptm_prio_queue_map) )
435 if ( cmd.qid < 0 || cmd.qid >= g_wanqos_en )
438 g_ptm_prio_queue_map[cmd.pkt_prio] = cmd.qid;
448 static void ptm_tx_timeout(struct net_device *dev)
450 ASSERT(dev == g_net_dev[0], "incorrect device");
452 /* disable TX irq, release skb when sending new packet */
453 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
455 /* wake up TX queue */
456 netif_wake_queue(dev);
461 static inline struct sk_buff* alloc_skb_rx(void)
465 /* allocate memroy including trailer and padding */
466 skb = dev_alloc_skb(RX_MAX_BUFFER_SIZE + DATA_BUFFER_ALIGNMENT);
468 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
469 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
470 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
471 /* pub skb in reserved area "skb->data - 4" */
472 *((struct sk_buff **)skb->data - 1) = skb;
474 /* write back and invalidate cache */
475 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
476 /* invalidate cache */
477 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
483 static inline struct sk_buff* alloc_skb_tx(unsigned int size)
487 /* allocate memory including padding */
488 size = RX_MAX_BUFFER_SIZE;
489 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
490 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
491 /* must be burst length alignment */
493 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
497 static inline struct sk_buff *get_skb_pointer(unsigned int dataptr)
499 unsigned int skb_dataptr;
502 // usually, CPE memory is less than 256M bytes
503 // so NULL means invalid pointer
504 if ( dataptr == 0 ) {
505 dbg("dataptr is 0, it's supposed to be invalid pointer");
509 skb_dataptr = (dataptr - 4) | KSEG1;
510 skb = *(struct sk_buff **)skb_dataptr;
512 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
513 ASSERT((((unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1))) | KSEG1) == (dataptr | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
518 static inline int get_tx_desc(unsigned int itf, unsigned int *f_full)
521 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[0];
523 // assume TX is serial operation
524 // no protection provided
528 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 ) {
529 desc_base = p_itf->tx_desc_pos;
530 if ( ++(p_itf->tx_desc_pos) == CPU_TO_WAN_TX_DESC_NUM )
531 p_itf->tx_desc_pos = 0;
532 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 )
539 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
544 isr = IFX_REG_R32(MBOX_IGU1_ISR);
545 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
546 isr &= IFX_REG_R32(MBOX_IGU1_IER);
549 IFX_REG_W32_MASK(1, 0, MBOX_IGU1_IER);
550 napi_schedule(&g_ptm_priv_data.itf[0].napi);
551 #if defined(ENABLE_TMP_DBG) && ENABLE_TMP_DBG
553 volatile struct rx_descriptor *desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
555 if ( desc->own ) { // PP32 hold
556 err("invalid interrupt");
562 IFX_REG_W32_MASK(1 << 16, 0, MBOX_IGU1_IER);
563 tasklet_hi_schedule(&g_swap_desc_tasklet);
566 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
567 netif_wake_queue(g_net_dev[0]);
573 static void do_swap_desc_tasklet(unsigned long arg)
576 volatile struct tx_descriptor *desc;
578 unsigned int byteoff;
580 while ( budget-- > 0 ) {
581 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) // if PP32 hold descriptor
584 desc = &WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos];
585 if ( ++g_ptm_priv_data.itf[0].tx_swap_desc_pos == WAN_SWAP_DESC_NUM )
586 g_ptm_priv_data.itf[0].tx_swap_desc_pos = 0;
588 skb = get_skb_pointer(desc->dataptr);
590 dev_kfree_skb_any(skb);
592 skb = alloc_skb_tx(RX_MAX_BUFFER_SIZE);
594 panic("can't allocate swap buffer for PPE firmware use\n");
595 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
596 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
598 desc->dataptr = (unsigned int)skb->data & 0x0FFFFFFF;
603 IFX_REG_W32_MASK(0, 16, MBOX_IGU1_ISRC);
604 // no more skb to be replaced
605 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) { // if PP32 hold descriptor
606 IFX_REG_W32_MASK(0, 1 << 16, MBOX_IGU1_IER);
610 tasklet_hi_schedule(&g_swap_desc_tasklet);
615 static inline int ifx_ptm_version(char *buf)
618 unsigned int major, minor;
620 ifx_ptm_get_fw_ver(&major, &minor);
622 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
623 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
628 static inline int init_priv_data(void)
632 g_wanqos_en = wanqos_en ? wanqos_en : 8;
633 if ( g_wanqos_en > 8 )
636 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ )
638 g_queue_gamma_map[i] = queue_gamma_map[i] & ((1 << g_wanqos_en) - 1);
639 for ( j = 0; j < i; j++ )
640 g_queue_gamma_map[i] &= ~g_queue_gamma_map[j];
643 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
646 int max_packet_priority = ARRAY_SIZE(g_ptm_prio_queue_map);
648 int q_step, q_accum, p_step;
650 tx_num_q = __ETH_WAN_TX_QUEUE_NUM;
651 q_step = tx_num_q - 1;
652 p_step = max_packet_priority - 1;
653 for ( j = 0, q_accum = 0; j < max_packet_priority; j++, q_accum += q_step )
654 g_ptm_prio_queue_map[j] = q_step - (q_accum + (p_step >> 1)) / p_step;
660 static inline void clear_priv_data(void)
664 static inline int init_tables(void)
666 struct sk_buff *skb_pool[WAN_RX_DESC_NUM] = {0};
667 struct cfg_std_data_len cfg_std_data_len = {0};
668 struct tx_qos_cfg tx_qos_cfg = {0};
669 struct psave_cfg psave_cfg = {0};
670 struct eg_bwctrl_cfg eg_bwctrl_cfg = {0};
671 struct test_mode test_mode = {0};
672 struct rx_bc_cfg rx_bc_cfg = {0};
673 struct tx_bc_cfg tx_bc_cfg = {0};
674 struct gpio_mode gpio_mode = {0};
675 struct gpio_wm_cfg gpio_wm_cfg = {0};
676 struct rx_gamma_itf_cfg rx_gamma_itf_cfg = {0};
677 struct tx_gamma_itf_cfg tx_gamma_itf_cfg = {0};
678 struct wtx_qos_q_desc_cfg wtx_qos_q_desc_cfg = {0};
679 struct rx_descriptor rx_desc = {0};
680 struct tx_descriptor tx_desc = {0};
683 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
684 skb_pool[i] = alloc_skb_rx();
685 if ( skb_pool[i] == NULL )
686 goto ALLOC_SKB_RX_FAIL;
689 cfg_std_data_len.byte_off = RX_HEAD_MAC_ADDR_ALIGNMENT; // this field replaces byte_off in rx descriptor of VDSL ingress
690 cfg_std_data_len.data_len = 1600;
691 *CFG_STD_DATA_LEN = cfg_std_data_len;
693 tx_qos_cfg.time_tick = cgu_get_pp32_clock() / 62500; // 16 * (cgu_get_pp32_clock() / 1000000)
694 tx_qos_cfg.overhd_bytes = 0;
695 tx_qos_cfg.eth1_eg_qnum = __ETH_WAN_TX_QUEUE_NUM;
696 tx_qos_cfg.eth1_burst_chk = 1;
697 tx_qos_cfg.eth1_qss = 0;
698 tx_qos_cfg.shape_en = 0; // disable
699 tx_qos_cfg.wfq_en = 0; // strict priority
700 *TX_QOS_CFG = tx_qos_cfg;
702 psave_cfg.start_state = 0;
703 psave_cfg.sleep_en = 1; // enable sleep mode
704 *PSAVE_CFG = psave_cfg;
706 eg_bwctrl_cfg.fdesc_wm = 16;
707 eg_bwctrl_cfg.class_len = 128;
708 *EG_BWCTRL_CFG = eg_bwctrl_cfg;
710 //*GPIO_ADDR = (unsigned int)IFX_GPIO_P0_OUT;
711 *GPIO_ADDR = (unsigned int)0x00000000; // disabled by default
713 gpio_mode.gpio_bit_bc1 = 2;
714 gpio_mode.gpio_bit_bc0 = 1;
715 gpio_mode.gpio_bc1_en = 0;
716 gpio_mode.gpio_bc0_en = 0;
717 *GPIO_MODE = gpio_mode;
719 gpio_wm_cfg.stop_wm_bc1 = 2;
720 gpio_wm_cfg.start_wm_bc1 = 4;
721 gpio_wm_cfg.stop_wm_bc0 = 2;
722 gpio_wm_cfg.start_wm_bc0 = 4;
723 *GPIO_WM_CFG = gpio_wm_cfg;
725 test_mode.mib_clear_mode = 0;
726 test_mode.test_mode = 0;
727 *TEST_MODE = test_mode;
729 rx_bc_cfg.local_state = 0;
730 rx_bc_cfg.remote_state = 0;
731 rx_bc_cfg.to_false_th = 7;
732 rx_bc_cfg.to_looking_th = 3;
733 *RX_BC_CFG(0) = rx_bc_cfg;
734 *RX_BC_CFG(1) = rx_bc_cfg;
736 tx_bc_cfg.fill_wm = 2;
737 tx_bc_cfg.uflw_wm = 2;
738 *TX_BC_CFG(0) = tx_bc_cfg;
739 *TX_BC_CFG(1) = tx_bc_cfg;
741 rx_gamma_itf_cfg.receive_state = 0;
742 rx_gamma_itf_cfg.rx_min_len = 60;
743 rx_gamma_itf_cfg.rx_pad_en = 1;
744 rx_gamma_itf_cfg.rx_eth_fcs_ver_dis = 0;
745 rx_gamma_itf_cfg.rx_rm_eth_fcs = 1;
746 rx_gamma_itf_cfg.rx_tc_crc_ver_dis = 0;
747 rx_gamma_itf_cfg.rx_tc_crc_size = 1;
748 rx_gamma_itf_cfg.rx_eth_fcs_result = 0xC704DD7B;
749 rx_gamma_itf_cfg.rx_tc_crc_result = 0x1D0F1D0F;
750 rx_gamma_itf_cfg.rx_crc_cfg = 0x2500;
751 rx_gamma_itf_cfg.rx_eth_fcs_init_value = 0xFFFFFFFF;
752 rx_gamma_itf_cfg.rx_tc_crc_init_value = 0x0000FFFF;
753 rx_gamma_itf_cfg.rx_max_len_sel = 0;
754 rx_gamma_itf_cfg.rx_edit_num2 = 0;
755 rx_gamma_itf_cfg.rx_edit_pos2 = 0;
756 rx_gamma_itf_cfg.rx_edit_type2 = 0;
757 rx_gamma_itf_cfg.rx_edit_en2 = 0;
758 rx_gamma_itf_cfg.rx_edit_num1 = 0;
759 rx_gamma_itf_cfg.rx_edit_pos1 = 0;
760 rx_gamma_itf_cfg.rx_edit_type1 = 0;
761 rx_gamma_itf_cfg.rx_edit_en1 = 0;
762 rx_gamma_itf_cfg.rx_inserted_bytes_1l = 0;
763 rx_gamma_itf_cfg.rx_inserted_bytes_1h = 0;
764 rx_gamma_itf_cfg.rx_inserted_bytes_2l = 0;
765 rx_gamma_itf_cfg.rx_inserted_bytes_2h = 0;
766 rx_gamma_itf_cfg.rx_len_adj = -6;
767 for ( i = 0; i < 4; i++ )
768 *RX_GAMMA_ITF_CFG(i) = rx_gamma_itf_cfg;
770 tx_gamma_itf_cfg.tx_len_adj = 6;
771 tx_gamma_itf_cfg.tx_crc_off_adj = 6;
772 tx_gamma_itf_cfg.tx_min_len = 0;
773 tx_gamma_itf_cfg.tx_eth_fcs_gen_dis = 0;
774 tx_gamma_itf_cfg.tx_tc_crc_size = 1;
775 tx_gamma_itf_cfg.tx_crc_cfg = 0x2F00;
776 tx_gamma_itf_cfg.tx_eth_fcs_init_value = 0xFFFFFFFF;
777 tx_gamma_itf_cfg.tx_tc_crc_init_value = 0x0000FFFF;
778 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ ) {
779 tx_gamma_itf_cfg.queue_mapping = g_queue_gamma_map[i];
780 *TX_GAMMA_ITF_CFG(i) = tx_gamma_itf_cfg;
783 for ( i = 0; i < __ETH_WAN_TX_QUEUE_NUM; i++ ) {
784 wtx_qos_q_desc_cfg.length = WAN_TX_DESC_NUM;
785 wtx_qos_q_desc_cfg.addr = __ETH_WAN_TX_DESC_BASE(i);
786 *WTX_QOS_Q_DESC_CFG(i) = wtx_qos_q_desc_cfg;
789 // default TX queue QoS config is all ZERO
792 IFX_REG_W32(0x90111293, TX_CTRL_K_TABLE(0));
793 IFX_REG_W32(0x14959617, TX_CTRL_K_TABLE(1));
794 IFX_REG_W32(0x18999A1B, TX_CTRL_K_TABLE(2));
795 IFX_REG_W32(0x9C1D1E9F, TX_CTRL_K_TABLE(3));
796 IFX_REG_W32(0xA02122A3, TX_CTRL_K_TABLE(4));
797 IFX_REG_W32(0x24A5A627, TX_CTRL_K_TABLE(5));
798 IFX_REG_W32(0x28A9AA2B, TX_CTRL_K_TABLE(6));
799 IFX_REG_W32(0xAC2D2EAF, TX_CTRL_K_TABLE(7));
800 IFX_REG_W32(0x30B1B233, TX_CTRL_K_TABLE(8));
801 IFX_REG_W32(0xB43536B7, TX_CTRL_K_TABLE(9));
802 IFX_REG_W32(0xB8393ABB, TX_CTRL_K_TABLE(10));
803 IFX_REG_W32(0x3CBDBE3F, TX_CTRL_K_TABLE(11));
804 IFX_REG_W32(0xC04142C3, TX_CTRL_K_TABLE(12));
805 IFX_REG_W32(0x44C5C647, TX_CTRL_K_TABLE(13));
806 IFX_REG_W32(0x48C9CA4B, TX_CTRL_K_TABLE(14));
807 IFX_REG_W32(0xCC4D4ECF, TX_CTRL_K_TABLE(15));
809 // init RX descriptor
814 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
815 rx_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
816 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
817 rx_desc.dataptr = (unsigned int)skb_pool[i]->data & 0x0FFFFFFF;
818 WAN_RX_DESC_BASE[i] = rx_desc;
821 // init TX descriptor
831 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ )
832 CPU_TO_WAN_TX_DESC_BASE[i] = tx_desc;
833 for ( i = 0; i < WAN_TX_DESC_NUM_TOTAL; i++ )
834 WAN_TX_DESC_BASE(0)[i] = tx_desc;
836 // init Swap descriptor
837 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ )
838 WAN_SWAP_DESC_BASE[i] = tx_desc;
840 // init fastpath TX descriptor
842 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ )
843 FASTPATH_TO_WAN_TX_DESC_BASE[i] = tx_desc;
849 dev_kfree_skb_any(skb_pool[i]);
853 static inline void clear_tables(void)
858 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
859 skb = get_skb_pointer(WAN_RX_DESC_BASE[i].dataptr);
861 dev_kfree_skb_any(skb);
864 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ ) {
865 skb = get_skb_pointer(CPU_TO_WAN_TX_DESC_BASE[i].dataptr);
867 dev_kfree_skb_any(skb);
870 for ( j = 0; j < 8; j++ )
871 for ( i = 0; i < WAN_TX_DESC_NUM; i++ ) {
872 skb = get_skb_pointer(WAN_TX_DESC_BASE(j)[i].dataptr);
874 dev_kfree_skb_any(skb);
877 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ ) {
878 skb = get_skb_pointer(WAN_SWAP_DESC_BASE[i].dataptr);
880 dev_kfree_skb_any(skb);
883 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ ) {
884 skb = get_skb_pointer(FASTPATH_TO_WAN_TX_DESC_BASE[i].dataptr);
886 dev_kfree_skb_any(skb);
890 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
892 ASSERT(port_cell != NULL, "port_cell is NULL");
893 ASSERT(xdata_addr != NULL, "xdata_addr is NULL");
895 // TODO: ReTX set xdata_addr
896 g_xdata_addr = xdata_addr;
900 IFX_REG_W32(0x0F, UTP_CFG);
903 // IFX_REG_W32_MASK(1 << 17, 0, FFSM_CFG0);
906 printk("enter showtime\n");
911 static int ptm_showtime_exit(void)
917 // IFX_REG_W32_MASK(0, 1 << 17, FFSM_CFG0);
920 IFX_REG_W32(0x00, UTP_CFG);
924 // TODO: ReTX clean state
927 printk("leave showtime\n");
934 static int ifx_ptm_init(void)
939 struct port_cell_info port_cell = {0};
941 ret = init_priv_data();
943 err("INIT_PRIV_DATA_FAIL");
944 goto INIT_PRIV_DATA_FAIL;
950 err("INIT_TABLES_FAIL");
951 goto INIT_TABLES_FAIL;
954 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
955 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], ether_setup, ptm_setup);
956 if ( g_net_dev[i] == NULL )
957 goto ALLOC_NETDEV_FAIL;
960 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
961 ret = register_netdev(g_net_dev[i]);
963 goto REGISTER_NETDEV_FAIL;
966 /* register interrupt handler */
967 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
969 if ( ret == -EBUSY ) {
970 err("IRQ may be occupied by other driver, please reconfig to disable it.");
973 err("request_irq fail");
975 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
977 disable_irq(PPE_MAILBOX_IGU1_INT);
979 ret = ifx_pp32_start(0);
981 err("ifx_pp32_start fail!");
982 goto PP32_START_FAIL;
984 IFX_REG_W32(1 << 16, MBOX_IGU1_IER); // enable SWAP interrupt
985 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
987 enable_irq(PPE_MAILBOX_IGU1_INT);
989 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr);
991 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
992 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
994 ifx_ptm_version(ver_str);
995 printk(KERN_INFO "%s", ver_str);
997 printk("ifxmips_ptm: PTM init succeed\n");
1002 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1003 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1004 i = ARRAY_SIZE(g_net_dev);
1005 REGISTER_NETDEV_FAIL:
1007 unregister_netdev(g_net_dev[i]);
1008 i = ARRAY_SIZE(g_net_dev);
1011 free_netdev(g_net_dev[i]);
1012 g_net_dev[i] = NULL;
1015 INIT_PRIV_DATA_FAIL:
1017 printk("ifxmips_ptm: PTM init failed\n");
1021 static void __exit ifx_ptm_exit(void)
1024 ifx_mei_atm_showtime_enter = NULL;
1025 ifx_mei_atm_showtime_exit = NULL;
1030 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1032 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1033 unregister_netdev(g_net_dev[i]);
1035 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1036 free_netdev(g_net_dev[i]);
1037 g_net_dev[i] = NULL;
1042 ifx_ptm_uninit_chip();
1048 static int __init wanqos_en_setup(char *line)
1050 wanqos_en = simple_strtoul(line, NULL, 0);
1052 if ( wanqos_en < 1 || wanqos_en > 8 )
1058 static int __init queue_gamma_map_setup(char *line)
1063 for ( i = 0, p = line; i < ARRAY_SIZE(queue_gamma_map) && isxdigit(*p); i++ )
1065 queue_gamma_map[i] = simple_strtoul(p, &p, 0);
1066 if ( *p == ',' || *p == ';' || *p == ':' )
1073 module_init(ifx_ptm_init);
1074 module_exit(ifx_ptm_exit);
1076 __setup("wanqos_en=", wanqos_en_setup);
1077 __setup("queue_gamma_map=", queue_gamma_map_setup);
1080 MODULE_LICENSE("GPL");