[ramips] fixes rx path for eth, spinlock it, increases buffer size, board now survive...
[10.03/openwrt.git] / target / linux / ramips / files / drivers / net / ramips.c
index 3084096..e78eb6f 100644 (file)
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
 
-#include <eth.h>
+#include <ramips_eth_platform.h>
+#include "ramips_eth.h"
 
 #define TX_TIMEOUT (20 * HZ / 100)
-#define        MAX_RX_LENGTH   1500
+#define        MAX_RX_LENGTH   1600
 
 #ifdef CONFIG_RALINK_RT305X
 #include "ramips_esw.c"
@@ -54,18 +55,28 @@ static void
 ramips_cleanup_dma(struct net_device *dev)
 {
        struct raeth_priv *priv = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < NUM_RX_DESC; i++)
+               if (priv->rx_skb[i])
+                       dev_kfree_skb_any(priv->rx_skb[i]);
 
-       dma_free_coherent(NULL, NUM_RX_DESC * sizeof(struct ramips_rx_dma),
-               priv->rx, priv->phy_rx);
+       if (priv->rx)
+               dma_free_coherent(NULL,
+                                 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
+                                 priv->rx, priv->phy_rx);
 
-       dma_free_coherent(NULL, NUM_TX_DESC * sizeof(struct ramips_tx_dma),
-               priv->tx, priv->phy_tx);
+       if (priv->tx)
+               dma_free_coherent(NULL,
+                                 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
+                                 priv->tx, priv->phy_tx);
 }
 
 static int
 ramips_alloc_dma(struct net_device *dev)
 {
        struct raeth_priv *priv = netdev_priv(dev);
+       int err = -ENOMEM;
        int i;
 
        priv->skb_free_idx = 0;
@@ -73,6 +84,9 @@ ramips_alloc_dma(struct net_device *dev)
        /* setup tx ring */
        priv->tx = dma_alloc_coherent(NULL,
                NUM_TX_DESC * sizeof(struct ramips_tx_dma), &priv->phy_tx, GFP_ATOMIC);
+       if (!priv->tx)
+               goto err_cleanup;
+
        for(i = 0; i < NUM_TX_DESC; i++)
        {
                memset(&priv->tx[i], 0, sizeof(struct ramips_tx_dma));
@@ -84,11 +98,17 @@ ramips_alloc_dma(struct net_device *dev)
        /* setup rx ring */
        priv->rx = dma_alloc_coherent(NULL,
                NUM_RX_DESC * sizeof(struct ramips_rx_dma), &priv->phy_rx, GFP_ATOMIC);
+       if (!priv->rx)
+               goto err_cleanup;
+
        memset(priv->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
        for(i = 0; i < NUM_RX_DESC; i++)
        {
                struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);
-               BUG_ON(!new_skb);
+
+               if (!new_skb)
+                       goto err_cleanup;
+
                skb_reserve(new_skb, 2);
                priv->rx[i].rxd1 =
                        dma_map_single(NULL, skb_put(new_skb, 2), MAX_RX_LENGTH + 2,
@@ -98,6 +118,10 @@ ramips_alloc_dma(struct net_device *dev)
        }
 
        return 0;
+
+ err_cleanup:
+       ramips_cleanup_dma(dev);
+       return err;
 }
 
 static void
@@ -123,6 +147,8 @@ ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev)
        unsigned long tx;
        unsigned int tx_next;
        unsigned int mapped_addr;
+       unsigned long flags;
+
        if(priv->plat->min_pkt_len)
        {
                if(skb->len < priv->plat->min_pkt_len)
@@ -140,33 +166,30 @@ ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev)
        mapped_addr = (unsigned int)dma_map_single(NULL, skb->data, skb->len,
                        DMA_TO_DEVICE);
        dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
+       spin_lock_irqsave(&priv->page_lock, flags);
        tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
        if(tx == NUM_TX_DESC - 1)
                tx_next = 0;
        else
                tx_next = tx + 1;
-       if((priv->tx_skb[tx]== 0) && (priv->tx_skb[tx_next] == 0))
-       {
-               if(!(priv->tx[tx].txd2 & TX_DMA_DONE))
-               {
-                       kfree_skb(skb);
-                       dev->stats.tx_dropped++;
-                       printk(KERN_ERR "%s: dropping\n", dev->name);
-                       return 0;
-               }
-               priv->tx[tx].txd1 = virt_to_phys(skb->data);
-               priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
-               priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
-               ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
-               dev->stats.tx_packets++;
-               dev->stats.tx_bytes += skb->len;
-               priv->tx_skb[tx] = skb;
-               ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
-       } else {
-               dev->stats.tx_dropped++;
-               kfree_skb(skb);
-       }
-       return 0;
+       if((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
+               !(priv->tx[tx].txd2 & TX_DMA_DONE) || !(priv->tx[tx_next].txd2 & TX_DMA_DONE))
+               goto out;
+       priv->tx[tx].txd1 = mapped_addr;
+       priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
+       priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+       priv->tx_skb[tx] = skb;
+       wmb();
+       ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
+       spin_unlock_irqrestore(&priv->page_lock, flags);
+       return NETDEV_TX_OK;
+out:
+       spin_unlock_irqrestore(&priv->page_lock, flags);
+       dev->stats.tx_dropped++;
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 static void
@@ -188,7 +211,6 @@ ramips_eth_rx_hw(unsigned long ptr)
 
                rx_skb = priv->rx_skb[rx];
                rx_skb->len = RX_DMA_PLEN0(priv->rx[rx].rxd2);
-               rx_skb->tail = rx_skb->data + rx_skb->len;
                rx_skb->dev = dev;
                rx_skb->protocol = eth_type_trans(rx_skb, dev);
                rx_skb->ip_summed = CHECKSUM_NONE;
@@ -196,7 +218,7 @@ ramips_eth_rx_hw(unsigned long ptr)
                dev->stats.rx_bytes += rx_skb->len;
                netif_rx(rx_skb);
 
-               new_skb = __dev_alloc_skb(MAX_RX_LENGTH + 2, GFP_DMA | GFP_ATOMIC);
+               new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + 2);
                priv->rx_skb[rx] = new_skb;
                BUG_ON(!new_skb);
                skb_reserve(new_skb, 2);
@@ -204,6 +226,7 @@ ramips_eth_rx_hw(unsigned long ptr)
                        dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH + 2,
                        DMA_FROM_DEVICE);
                priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
+               wmb();
                ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
        }
        if(max_rx == 0)
@@ -277,8 +300,17 @@ static int
 ramips_eth_open(struct net_device *dev)
 {
        struct raeth_priv *priv = netdev_priv(dev);
+       int err;
+
+       err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
+                         dev->name, dev);
+       if (err)
+               return err;
+
+       err = ramips_alloc_dma(dev);
+       if (err)
+               goto err_free_irq;
 
-       ramips_alloc_dma(dev);
        ramips_setup_dma(dev);
        ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
                (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
@@ -288,7 +320,6 @@ ramips_eth_open(struct net_device *dev)
                ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
                ((rt305x_sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
                RAMIPS_FE_GLO_CFG);
-       request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED, dev->name, dev);
        tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
                (unsigned long)dev);
        tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
@@ -305,6 +336,10 @@ ramips_eth_open(struct net_device *dev)
        ramips_fe_wr(0, RAMIPS_FE_RST_GL);
        netif_start_queue(dev);
        return 0;
+
+ err_free_irq:
+       free_irq(dev->irq, dev);
+       return err;
 }
 
 static int
@@ -343,6 +378,7 @@ ramips_eth_probe(struct net_device *dev)
        dev->mtu = MAX_RX_LENGTH;
        dev->tx_timeout = ramips_eth_timeout;
        dev->watchdog_timeo = TX_TIMEOUT;
+       spin_lock_init(&priv->page_lock);
        return 0;
 }