[lantiq] bump kernel to 3.2.12
[openwrt.git] / target / linux / lantiq / patches-3.2 / 0044-MIPS-NET-several-fixes-to-etop-driver.patch
1 From 06663beb0230c02d1962eca8d9f6709c2e852328 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 21 Mar 2012 18:14:06 +0100
4 Subject: [PATCH 44/70] MIPS: NET: several fixes to etop driver
5
6 ---
7  drivers/net/ethernet/lantiq_etop.c |  208 +++++++++++++++++++-----------------
8  1 files changed, 108 insertions(+), 100 deletions(-)
9
10 diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
11 index a084d74..1a807d8 100644
12 --- a/drivers/net/ethernet/lantiq_etop.c
13 +++ b/drivers/net/ethernet/lantiq_etop.c
14 @@ -103,15 +103,6 @@
15  /* the newer xway socks have a embedded 3/7 port gbit multiplexer */
16  #define ltq_has_gbit()         (ltq_is_ar9() || ltq_is_vr9())
17  
18 -/* use 2 static channels for TX/RX
19 -   depending on the SoC we need to use different DMA channels for ethernet */
20 -#define LTQ_ETOP_TX_CHANNEL    1
21 -#define LTQ_ETOP_RX_CHANNEL    ((ltq_is_ase()) ? (5) : \
22 -                               ((ltq_has_gbit()) ? (0) : (6)))
23 -
24 -#define IS_TX(x)               (x == LTQ_ETOP_TX_CHANNEL)
25 -#define IS_RX(x)               (x == LTQ_ETOP_RX_CHANNEL)
26 -
27  #define ltq_etop_r32(x)                ltq_r32(ltq_etop_membase + (x))
28  #define ltq_etop_w32(x, y)     ltq_w32(x, ltq_etop_membase + (y))
29  #define ltq_etop_w32_mask(x, y, z)     \
30 @@ -128,8 +119,8 @@ static void __iomem *ltq_etop_membase;
31  static void __iomem *ltq_gbit_membase;
32  
33  struct ltq_etop_chan {
34 -       int idx;
35         int tx_free;
36 +       int irq;
37         struct net_device *netdev;
38         struct napi_struct napi;
39         struct ltq_dma_channel dma;
40 @@ -144,8 +135,8 @@ struct ltq_etop_priv {
41         struct mii_bus *mii_bus;
42         struct phy_device *phydev;
43  
44 -       struct ltq_etop_chan ch[MAX_DMA_CHAN];
45 -       int tx_free[MAX_DMA_CHAN >> 1];
46 +       struct ltq_etop_chan txch;
47 +       struct ltq_etop_chan rxch;
48  
49         spinlock_t lock;
50  
51 @@ -206,8 +197,10 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget)
52  {
53         struct ltq_etop_chan *ch = container_of(napi,
54                                 struct ltq_etop_chan, napi);
55 +       struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
56         int rx = 0;
57         int complete = 0;
58 +       unsigned long flags;
59  
60         while ((rx < budget) && !complete) {
61                 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
62 @@ -221,7 +214,9 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget)
63         }
64         if (complete || !rx) {
65                 napi_complete(&ch->napi);
66 +               spin_lock_irqsave(&priv->lock, flags);
67                 ltq_dma_ack_irq(&ch->dma);
68 +               spin_unlock_irqrestore(&priv->lock, flags);
69         }
70         return rx;
71  }
72 @@ -233,7 +228,7 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget)
73                 container_of(napi, struct ltq_etop_chan, napi);
74         struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
75         struct netdev_queue *txq =
76 -               netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
77 +               netdev_get_tx_queue(ch->netdev, ch->dma.nr >> 1);
78         unsigned long flags;
79  
80         spin_lock_irqsave(&priv->lock, flags);
81 @@ -251,7 +246,9 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget)
82         if (netif_tx_queue_stopped(txq))
83                 netif_tx_start_queue(txq);
84         napi_complete(&ch->napi);
85 +       spin_lock_irqsave(&priv->lock, flags);
86         ltq_dma_ack_irq(&ch->dma);
87 +       spin_unlock_irqrestore(&priv->lock, flags);
88         return 1;
89  }
90  
91 @@ -259,9 +256,10 @@ static irqreturn_t
92  ltq_etop_dma_irq(int irq, void *_priv)
93  {
94         struct ltq_etop_priv *priv = _priv;
95 -       int ch = irq - LTQ_DMA_ETOP;
96 -
97 -       napi_schedule(&priv->ch[ch].napi);
98 +       if (irq == priv->txch.dma.irq)
99 +               napi_schedule(&priv->txch.napi);
100 +       else
101 +               napi_schedule(&priv->rxch.napi);
102         return IRQ_HANDLED;
103  }
104  
105 @@ -273,7 +271,7 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
106         ltq_dma_free(&ch->dma);
107         if (ch->dma.irq)
108                 free_irq(ch->dma.irq, priv);
109 -       if (IS_RX(ch->idx)) {
110 +       if (ch == &priv->txch) {
111                 int desc;
112                 for (desc = 0; desc < LTQ_DESC_NUM; desc++)
113                         dev_kfree_skb_any(ch->skb[ch->dma.desc]);
114 @@ -284,7 +282,6 @@ static void
115  ltq_etop_hw_exit(struct net_device *dev)
116  {
117         struct ltq_etop_priv *priv = netdev_priv(dev);
118 -       int i;
119  
120         clk_disable(priv->clk_ppe);
121  
122 @@ -296,9 +293,8 @@ ltq_etop_hw_exit(struct net_device *dev)
123                 clk_disable(priv->clk_ephycgu);
124         }
125  
126 -       for (i = 0; i < MAX_DMA_CHAN; i++)
127 -               if (IS_TX(i) || IS_RX(i))
128 -                       ltq_etop_free_channel(dev, &priv->ch[i]);
129 +       ltq_etop_free_channel(dev, &priv->txch);
130 +       ltq_etop_free_channel(dev, &priv->rxch);
131  }
132  
133  static void
134 @@ -326,8 +322,6 @@ ltq_etop_hw_init(struct net_device *dev)
135  {
136         struct ltq_etop_priv *priv = netdev_priv(dev);
137         unsigned int mii_mode = priv->pldata->mii_mode;
138 -       int err = 0;
139 -       int i;
140  
141         clk_enable(priv->clk_ppe);
142  
143 @@ -369,31 +363,50 @@ ltq_etop_hw_init(struct net_device *dev)
144         /* enable crc generation */
145         ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
146  
147 +       return 0;
148 +}
149 +
150 +static int
151 +ltq_etop_dma_init(struct net_device *dev)
152 +{
153 +       struct ltq_etop_priv *priv = netdev_priv(dev);
154 +       int tx = 1;
155 +       int rx = ((ltq_is_ase()) ? (5) : \
156 +               ((ltq_is_ar9()) ? (0) : (6)));
157 +       int tx_irq = LTQ_DMA_ETOP + tx;
158 +       int rx_irq = LTQ_DMA_ETOP + rx;
159 +       int err;
160 +
161         ltq_dma_init_port(DMA_PORT_ETOP);
162  
163 -       for (i = 0; i < MAX_DMA_CHAN && !err; i++) {
164 -               int irq = LTQ_DMA_ETOP + i;
165 -               struct ltq_etop_chan *ch = &priv->ch[i];
166 -
167 -               ch->idx = ch->dma.nr = i;
168 -
169 -               if (IS_TX(i)) {
170 -                       ltq_dma_alloc_tx(&ch->dma);
171 -                       err = request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
172 -                               "etop_tx", priv);
173 -               } else if (IS_RX(i)) {
174 -                       ltq_dma_alloc_rx(&ch->dma);
175 -                       for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
176 -                                       ch->dma.desc++)
177 -                               if (ltq_etop_alloc_skb(ch))
178 -                                       err = -ENOMEM;
179 -                       ch->dma.desc = 0;
180 -                       err = request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
181 -                               "etop_rx", priv);
182 +       priv->txch.dma.nr = tx;
183 +       ltq_dma_alloc_tx(&priv->txch.dma);
184 +       err = request_irq(tx_irq, ltq_etop_dma_irq, IRQF_DISABLED,
185 +               "eth_tx", priv);
186 +       if (err) {
187 +               netdev_err(dev, "failed to allocate tx irq\n");
188 +               goto err_out;
189 +       }
190 +       priv->txch.dma.irq = tx_irq;
191 +
192 +       priv->rxch.dma.nr = rx;
193 +       ltq_dma_alloc_rx(&priv->rxch.dma);
194 +       for (priv->rxch.dma.desc = 0; priv->rxch.dma.desc < LTQ_DESC_NUM;
195 +                       priv->rxch.dma.desc++) {
196 +               if (ltq_etop_alloc_skb(&priv->rxch)) {
197 +                       netdev_err(dev, "failed to allocate skbs\n");
198 +                       err = -ENOMEM;
199 +                       goto err_out;
200                 }
201 -               if (!err)
202 -                       ch->dma.irq = irq;
203         }
204 +       priv->rxch.dma.desc = 0;
205 +       err = request_irq(rx_irq, ltq_etop_dma_irq, IRQF_DISABLED,
206 +               "eth_rx", priv);
207 +       if (err)
208 +               netdev_err(dev, "failed to allocate rx irq\n");
209 +       else
210 +               priv->rxch.dma.irq = rx_irq;
211 +err_out:
212         return err;
213  }
214  
215 @@ -410,7 +423,10 @@ ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
216  {
217         struct ltq_etop_priv *priv = netdev_priv(dev);
218  
219 -       return phy_ethtool_gset(priv->phydev, cmd);
220 +       if (priv->phydev)
221 +               return phy_ethtool_gset(priv->phydev, cmd);
222 +       else
223 +               return 0;
224  }
225  
226  static int
227 @@ -418,7 +434,10 @@ ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
228  {
229         struct ltq_etop_priv *priv = netdev_priv(dev);
230  
231 -       return phy_ethtool_sset(priv->phydev, cmd);
232 +       if (priv->phydev)
233 +               return phy_ethtool_sset(priv->phydev, cmd);
234 +       else
235 +               return 0;
236  }
237  
238  static int
239 @@ -426,7 +445,10 @@ ltq_etop_nway_reset(struct net_device *dev)
240  {
241         struct ltq_etop_priv *priv = netdev_priv(dev);
242  
243 -       return phy_start_aneg(priv->phydev);
244 +       if (priv->phydev)
245 +               return phy_start_aneg(priv->phydev);
246 +       else
247 +               return 0;
248  }
249  
250  static const struct ethtool_ops ltq_etop_ethtool_ops = {
251 @@ -618,18 +640,19 @@ static int
252  ltq_etop_open(struct net_device *dev)
253  {
254         struct ltq_etop_priv *priv = netdev_priv(dev);
255 -       int i;
256 +       unsigned long flags;
257  
258 -       for (i = 0; i < MAX_DMA_CHAN; i++) {
259 -               struct ltq_etop_chan *ch = &priv->ch[i];
260 +       napi_enable(&priv->txch.napi);
261 +       napi_enable(&priv->rxch.napi);
262 +
263 +       spin_lock_irqsave(&priv->lock, flags);
264 +       ltq_dma_open(&priv->txch.dma);
265 +       ltq_dma_open(&priv->rxch.dma);
266 +       spin_unlock_irqrestore(&priv->lock, flags);
267  
268 -               if (!IS_TX(i) && (!IS_RX(i)))
269 -                       continue;
270 -               ltq_dma_open(&ch->dma);
271 -               napi_enable(&ch->napi);
272 -       }
273         if (priv->phydev)
274                 phy_start(priv->phydev);
275 +
276         netif_tx_start_all_queues(dev);
277         return 0;
278  }
279 @@ -638,19 +661,19 @@ static int
280  ltq_etop_stop(struct net_device *dev)
281  {
282         struct ltq_etop_priv *priv = netdev_priv(dev);
283 -       int i;
284 +       unsigned long flags;
285  
286         netif_tx_stop_all_queues(dev);
287         if (priv->phydev)
288                 phy_stop(priv->phydev);
289 -       for (i = 0; i < MAX_DMA_CHAN; i++) {
290 -               struct ltq_etop_chan *ch = &priv->ch[i];
291 +       napi_disable(&priv->txch.napi);
292 +       napi_disable(&priv->rxch.napi);
293 +
294 +       spin_lock_irqsave(&priv->lock, flags);
295 +       ltq_dma_close(&priv->txch.dma);
296 +       ltq_dma_close(&priv->rxch.dma);
297 +       spin_unlock_irqrestore(&priv->lock, flags);
298  
299 -               if (!IS_RX(i) && !IS_TX(i))
300 -                       continue;
301 -               napi_disable(&ch->napi);
302 -               ltq_dma_close(&ch->dma);
303 -       }
304         return 0;
305  }
306  
307 @@ -660,16 +683,16 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
308         int queue = skb_get_queue_mapping(skb);
309         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
310         struct ltq_etop_priv *priv = netdev_priv(dev);
311 -       struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
312 -       struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
313 +       struct ltq_dma_desc *desc =
314 +               &priv->txch.dma.desc_base[priv->txch.dma.desc];
315         unsigned long flags;
316         u32 byte_offset;
317         int len;
318  
319         len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
320  
321 -       if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
322 -               dev_kfree_skb_any(skb);
323 +       if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) ||
324 +                       priv->txch.skb[priv->txch.dma.desc]) {
325                 netdev_err(dev, "tx ring full\n");
326                 netif_tx_stop_queue(txq);
327                 return NETDEV_TX_BUSY;
328 @@ -677,7 +700,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
329  
330         /* dma needs to start on a 16 byte aligned address */
331         byte_offset = CPHYSADDR(skb->data) % 16;
332 -       ch->skb[ch->dma.desc] = skb;
333 +       priv->txch.skb[priv->txch.dma.desc] = skb;
334  
335         dev->trans_start = jiffies;
336  
337 @@ -687,11 +710,11 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
338         wmb();
339         desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
340                 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
341 -       ch->dma.desc++;
342 -       ch->dma.desc %= LTQ_DESC_NUM;
343 +       priv->txch.dma.desc++;
344 +       priv->txch.dma.desc %= LTQ_DESC_NUM;
345         spin_unlock_irqrestore(&priv->lock, flags);
346  
347 -       if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
348 +       if (priv->txch.dma.desc_base[priv->txch.dma.desc].ctl & LTQ_DMA_OWN)
349                 netif_tx_stop_queue(txq);
350  
351         return NETDEV_TX_OK;
352 @@ -776,6 +799,10 @@ ltq_etop_init(struct net_device *dev)
353         err = ltq_etop_hw_init(dev);
354         if (err)
355                 goto err_hw;
356 +       err = ltq_etop_dma_init(dev);
357 +       if (err)
358 +               goto err_hw;
359 +
360         ltq_etop_change_mtu(dev, 1500);
361  
362         memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
363 @@ -811,6 +838,9 @@ ltq_etop_tx_timeout(struct net_device *dev)
364         err = ltq_etop_hw_init(dev);
365         if (err)
366                 goto err_hw;
367 +       err = ltq_etop_dma_init(dev);
368 +       if (err)
369 +               goto err_hw;
370         dev->trans_start = jiffies;
371         netif_wake_queue(dev);
372         return;
373 @@ -834,14 +864,13 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
374         .ndo_tx_timeout = ltq_etop_tx_timeout,
375  };
376  
377 -static int __init
378 +static int __devinit
379  ltq_etop_probe(struct platform_device *pdev)
380  {
381         struct net_device *dev;
382         struct ltq_etop_priv *priv;
383         struct resource *res, *gbit_res;
384         int err;
385 -       int i;
386  
387         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
388         if (!res) {
389 @@ -917,15 +946,10 @@ ltq_etop_probe(struct platform_device *pdev)
390  
391         spin_lock_init(&priv->lock);
392  
393 -       for (i = 0; i < MAX_DMA_CHAN; i++) {
394 -               if (IS_TX(i))
395 -                       netif_napi_add(dev, &priv->ch[i].napi,
396 -                               ltq_etop_poll_tx, 8);
397 -               else if (IS_RX(i))
398 -                       netif_napi_add(dev, &priv->ch[i].napi,
399 -                               ltq_etop_poll_rx, 32);
400 -               priv->ch[i].netdev = dev;
401 -       }
402 +       netif_napi_add(dev, &priv->txch.napi, ltq_etop_poll_tx, 8);
403 +       netif_napi_add(dev, &priv->rxch.napi, ltq_etop_poll_rx, 32);
404 +       priv->txch.netdev = dev;
405 +       priv->rxch.netdev = dev;
406  
407         err = register_netdev(dev);
408         if (err)
409 @@ -955,6 +979,7 @@ ltq_etop_remove(struct platform_device *pdev)
410  }
411  
412  static struct platform_driver ltq_mii_driver = {
413 +       .probe = ltq_etop_probe,
414         .remove = __devexit_p(ltq_etop_remove),
415         .driver = {
416                 .name = "ltq_etop",
417 @@ -962,24 +987,7 @@ static struct platform_driver ltq_mii_driver = {
418         },
419  };
420  
421 -int __init
422 -init_ltq_etop(void)
423 -{
424 -       int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
425 -
426 -       if (ret)
427 -               pr_err("ltq_etop: Error registering platfom driver!");
428 -       return ret;
429 -}
430 -
431 -static void __exit
432 -exit_ltq_etop(void)
433 -{
434 -       platform_driver_unregister(&ltq_mii_driver);
435 -}
436 -
437 -module_init(init_ltq_etop);
438 -module_exit(exit_ltq_etop);
439 +module_platform_driver(ltq_mii_driver);
440  
441  MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
442  MODULE_DESCRIPTION("Lantiq SoC ETOP");
443 -- 
444 1.7.7.1
445