2 * drivers/net/danube_mii0.c
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) 2005 Infineon
20 * Rewrite of Infineon Danube code, thanks to infineon for the support,
21 * software and hardware
23 * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/interrupt.h>
32 #include <asm/uaccess.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
37 #include <linux/tcp.h>
38 #include <linux/skbuff.h>
40 #include <linux/ethtool.h>
41 #include <asm/checksum.h>
42 #include <linux/init.h>
43 #include <asm/delay.h>
44 #include <asm/danube/danube.h>
45 #include <asm/danube/danube_mii0.h>
46 #include <asm/danube/danube_dma.h>
47 #include <asm/danube/danube_pmu.h>
49 static struct net_device danube_mii0_dev;
50 static unsigned char u_boot_ethaddr[MAX_ADDR_LEN];
53 danube_write_mdio (u32 phy_addr, u32 phy_reg, u16 phy_data)
55 u32 val = MDIO_ACC_REQUEST |
56 ((phy_addr & MDIO_ACC_ADDR_MASK) << MDIO_ACC_ADDR_OFFSET) |
57 ((phy_reg & MDIO_ACC_REG_MASK) << MDIO_ACC_REG_OFFSET) |
60 while (readl(DANUBE_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST);
61 writel(val, DANUBE_PPE32_MDIO_ACC);
65 danube_read_mdio (u32 phy_addr, u32 phy_reg)
67 u32 val = MDIO_ACC_REQUEST | MDIO_ACC_READ |
68 ((phy_addr & MDIO_ACC_ADDR_MASK) << MDIO_ACC_ADDR_OFFSET) |
69 ((phy_reg & MDIO_ACC_REG_MASK) << MDIO_ACC_REG_OFFSET);
71 writel(val, DANUBE_PPE32_MDIO_ACC);
72 while (readl(DANUBE_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST){};
73 val = readl(DANUBE_PPE32_MDIO_ACC) & MDIO_ACC_VAL_MASK;
79 danube_switch_open (struct net_device *dev)
81 struct switch_priv* priv = (struct switch_priv*)dev->priv;
82 struct dma_device_info* dma_dev = priv->dma_device;
85 for (i = 0; i < dma_dev->max_rx_chan_num; i++)
87 if ((dma_dev->rx_chan[i])->control == DANUBE_DMA_CH_ON)
88 (dma_dev->rx_chan[i])->open(dma_dev->rx_chan[i]);
91 netif_start_queue(dev);
97 switch_release (struct net_device *dev){
98 struct switch_priv* priv = (struct switch_priv*)dev->priv;
99 struct dma_device_info* dma_dev = priv->dma_device;
102 for (i = 0; i < dma_dev->max_rx_chan_num; i++)
103 dma_dev->rx_chan[i]->close(dma_dev->rx_chan[i]);
105 netif_stop_queue(dev);
111 switch_hw_receive (struct net_device* dev,struct dma_device_info* dma_dev)
113 struct switch_priv *priv = (struct switch_priv*)dev->priv;
114 unsigned char* buf = NULL;
115 struct sk_buff *skb = NULL;
118 len = dma_device_read(dma_dev, &buf, (void**)&skb);
120 if (len >= ETHERNET_PACKET_DMA_BUFFER_SIZE)
122 printk("packet too large %d\n",len);
123 goto switch_hw_receive_err_exit;
130 printk("cannot restore pointer\n");
131 goto switch_hw_receive_err_exit;
134 if (len > (skb->end - skb->tail))
136 printk("BUG, len:%d end:%p tail:%p\n", (len+4), skb->end, skb->tail);
137 goto switch_hw_receive_err_exit;
142 skb->protocol = eth_type_trans(skb, dev);
145 priv->stats.rx_packets++;
146 priv->stats.rx_bytes += len;
150 switch_hw_receive_err_exit:
154 dev_kfree_skb_any(skb);
155 priv->stats.rx_errors++;
156 priv->stats.rx_dropped++;
165 switch_hw_tx (char *buf, int len, struct net_device *dev)
168 struct switch_priv *priv = dev->priv;
169 struct dma_device_info* dma_dev = priv->dma_device;
171 ret = dma_device_write(dma_dev, buf, len, priv->skb);
177 switch_tx (struct sk_buff *skb, struct net_device *dev)
181 struct switch_priv *priv = dev->priv;
182 struct dma_device_info* dma_dev = priv->dma_device;
184 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
187 dev->trans_start = jiffies;
188 // TODO we got more than 1 dma channel, so we should do something intelligent
189 // here to select one
190 dma_dev->current_tx_chan = 0;
194 if (switch_hw_tx(data, len, dev) != len)
196 dev_kfree_skb_any(skb);
197 priv->stats.tx_errors++;
198 priv->stats.tx_dropped++;
200 priv->stats.tx_packets++;
201 priv->stats.tx_bytes+=len;
208 switch_tx_timeout (struct net_device *dev)
211 struct switch_priv* priv = (struct switch_priv*)dev->priv;
213 priv->stats.tx_errors++;
215 for (i = 0; i < priv->dma_device->max_tx_chan_num; i++)
217 priv->dma_device->tx_chan[i]->disable_irq(priv->dma_device->tx_chan[i]);
220 netif_wake_queue(dev);
226 dma_intr_handler (struct dma_device_info* dma_dev, int status)
233 switch_hw_receive(&danube_mii0_dev, dma_dev);
236 case TX_BUF_FULL_INT:
237 printk("tx buffer full\n");
238 netif_stop_queue(&danube_mii0_dev);
239 for (i = 0; i < dma_dev->max_tx_chan_num; i++)
241 if ((dma_dev->tx_chan[i])->control==DANUBE_DMA_CH_ON)
242 dma_dev->tx_chan[i]->enable_irq(dma_dev->tx_chan[i]);
246 case TRANSMIT_CPT_INT:
247 for (i = 0; i < dma_dev->max_tx_chan_num; i++)
248 dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i]);
250 netif_wake_queue(&danube_mii0_dev);
258 danube_etop_dma_buffer_alloc (int len, int *byte_offset, void **opt)
260 unsigned char *buffer = NULL;
261 struct sk_buff *skb = NULL;
263 skb = dev_alloc_skb(ETHERNET_PACKET_DMA_BUFFER_SIZE);
267 buffer = (unsigned char*)(skb->data);
269 *(int*)opt = (int)skb;
276 danube_etop_dma_buffer_free (unsigned char *dataptr, void *opt)
278 struct sk_buff *skb = NULL;
284 skb = (struct sk_buff*)opt;
285 dev_kfree_skb_any(skb);
289 static struct net_device_stats*
290 danube_get_stats (struct net_device *dev)
292 return (struct net_device_stats *)dev->priv;
296 switch_init (struct net_device *dev)
300 struct switch_priv *priv;
304 printk("%s up\n", dev->name);
306 dev->open = danube_switch_open;
307 dev->stop = switch_release;
308 dev->hard_start_xmit = switch_tx;
309 dev->get_stats = danube_get_stats;
310 dev->tx_timeout = switch_tx_timeout;
311 dev->watchdog_timeo = 10 * HZ;
312 dev->priv = kmalloc(sizeof(struct switch_priv), GFP_KERNEL);
314 if (dev->priv == NULL)
317 memset(dev->priv, 0, sizeof(struct switch_priv));
320 priv->dma_device = dma_device_reserve("PPE");
322 if (!priv->dma_device){
327 priv->dma_device->buffer_alloc = &danube_etop_dma_buffer_alloc;
328 priv->dma_device->buffer_free = &danube_etop_dma_buffer_free;
329 priv->dma_device->intr_handler = &dma_intr_handler;
330 priv->dma_device->max_rx_chan_num = 4;
332 for (i = 0; i < priv->dma_device->max_rx_chan_num; i++)
334 priv->dma_device->rx_chan[i]->packet_size = ETHERNET_PACKET_DMA_BUFFER_SIZE;
335 priv->dma_device->rx_chan[i]->control = DANUBE_DMA_CH_ON;
338 for (i = 0; i < priv->dma_device->max_tx_chan_num; i++)
341 priv->dma_device->tx_chan[i]->control = DANUBE_DMA_CH_ON;
343 priv->dma_device->tx_chan[i]->control = DANUBE_DMA_CH_OFF;
346 dma_device_register(priv->dma_device);
348 /*read the mac address from the mac table and put them into the mac table.*/
349 for (i = 0; i < 6; i++)
351 retval += u_boot_ethaddr[i];
355 /* ethaddr not set in u-boot ? */
358 printk("use default MAC address\n");
359 dev->dev_addr[0] = 0x00;
360 dev->dev_addr[1] = 0x11;
361 dev->dev_addr[2] = 0x22;
362 dev->dev_addr[3] = 0x33;
363 dev->dev_addr[4] = 0x44;
364 dev->dev_addr[5] = 0x55;
366 for (i = 0; i < 6; i++)
367 dev->dev_addr[i] = u_boot_ethaddr[i];
374 danube_sw_chip_init (int mode)
376 danube_pmu_enable(DANUBE_PMU_PWDCR_DMA);
377 danube_pmu_enable(DANUBE_PMU_PWDCR_PPE);
379 if(mode == REV_MII_MODE)
380 writel((readl(DANUBE_PPE32_CFG) & PPE32_MII_MASK) | PPE32_MII_REVERSE, DANUBE_PPE32_CFG);
381 else if(mode == MII_MODE)
382 writel((readl(DANUBE_PPE32_CFG) & PPE32_MII_MASK) | PPE32_MII_NORMAL, DANUBE_PPE32_CFG);
384 writel(PPE32_PLEN_UNDER | PPE32_PLEN_OVER, DANUBE_PPE32_IG_PLEN_CTRL);
386 writel(PPE32_CGEN, DANUBE_PPE32_ENET_MAC_CFG);
392 switch_init_module(void)
396 danube_mii0_dev.init = switch_init;
398 strcpy(danube_mii0_dev.name, "eth%d");
399 SET_MODULE_OWNER(dev);
401 result = register_netdev(&danube_mii0_dev);
404 printk("error %i registering device \"%s\"\n", result, danube_mii0_dev.name);
408 /* danube eval kit connects the phy/switch in REV mode */
409 danube_sw_chip_init(REV_MII_MODE);
410 printk("danube MAC driver loaded!\n");
419 struct switch_priv *priv = (struct switch_priv*)danube_mii0_dev.priv;
421 printk("danube_mii0 cleanup\n");
423 dma_device_unregister(priv->dma_device);
424 dma_device_release(priv->dma_device);
425 kfree(priv->dma_device);
426 kfree(danube_mii0_dev.priv);
427 unregister_netdev(&danube_mii0_dev);
432 module_init(switch_init_module);
433 module_exit(switch_cleanup);