702d1822f7f93ac48fe474ee7ebe80f1e9c5359f
[15.05/openwrt.git] / target / linux / ixp4xx / patches-3.14 / 002-ixp4xx_eth-use-parent-device-for-dma-allocations.patch
1 From 1d67040af0144c549f4db8144d2ccc253ff8639c Mon Sep 17 00:00:00 2001
2 From: Jonas Gorski <jogo@openwrt.org>
3 Date: Mon, 1 Jul 2013 16:39:28 +0200
4 Subject: [PATCH 2/2] net: ixp4xx_eth: use parent device for dma allocations
5
6 Now that the platfomr device provides a dma_cohorent_mask, use it for
7 dma operations.
8
9 This fixes ethernet on ixp4xx which was broken since 3.7.
10
11 Signed-off-by: Jonas Gorski <jogo@openwrt.org>
12 ---
13  drivers/net/ethernet/xscale/ixp4xx_eth.c |   23 ++++++++++++-----------
14  1 file changed, 12 insertions(+), 11 deletions(-)
15
16 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
17 +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
18 @@ -661,10 +661,10 @@ static inline void queue_put_desc(unsign
19  static inline void dma_unmap_tx(struct port *port, struct desc *desc)
20  {
21  #ifdef __ARMEB__
22 -       dma_unmap_single(&port->netdev->dev, desc->data,
23 +       dma_unmap_single(port->netdev->dev.parent, desc->data,
24                          desc->buf_len, DMA_TO_DEVICE);
25  #else
26 -       dma_unmap_single(&port->netdev->dev, desc->data & ~3,
27 +       dma_unmap_single(port->netdev->dev.parent, desc->data & ~3,
28                          ALIGN((desc->data & 3) + desc->buf_len, 4),
29                          DMA_TO_DEVICE);
30  #endif
31 @@ -731,9 +731,9 @@ static int eth_poll(struct napi_struct *
32  
33  #ifdef __ARMEB__
34                 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
35 -                       phys = dma_map_single(&dev->dev, skb->data,
36 +                       phys = dma_map_single(dev->dev.parent, skb->data,
37                                               RX_BUFF_SIZE, DMA_FROM_DEVICE);
38 -                       if (dma_mapping_error(&dev->dev, phys)) {
39 +                       if (dma_mapping_error(dev->dev.parent, phys)) {
40                                 dev_kfree_skb(skb);
41                                 skb = NULL;
42                         }
43 @@ -756,10 +756,11 @@ static int eth_poll(struct napi_struct *
44  #ifdef __ARMEB__
45                 temp = skb;
46                 skb = port->rx_buff_tab[n];
47 -               dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
48 +               dma_unmap_single(dev->dev.parent, desc->data - NET_IP_ALIGN,
49                                  RX_BUFF_SIZE, DMA_FROM_DEVICE);
50  #else
51 -               dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
52 +               dma_sync_single_for_cpu(dev->dev.parent,
53 +                                       desc->data - NET_IP_ALIGN,
54                                         RX_BUFF_SIZE, DMA_FROM_DEVICE);
55                 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
56                               ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
57 @@ -878,7 +879,7 @@ static int eth_xmit(struct sk_buff *skb,
58         memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
59  #endif
60  
61 -       phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
62 +       phys = dma_map_single(dev->dev.parent, mem, bytes, DMA_TO_DEVICE);
63         if (dma_mapping_error(&dev->dev, phys)) {
64                 dev_kfree_skb(skb);
65  #ifndef __ARMEB__
66 @@ -1128,7 +1129,7 @@ static int init_queues(struct port *port
67         int i;
68  
69         if (!ports_open) {
70 -               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
71 +               dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
72                                            POOL_ALLOC_SIZE, 32, 0);
73                 if (!dma_pool)
74                         return -ENOMEM;
75 @@ -1156,9 +1157,9 @@ static int init_queues(struct port *port
76                 data = buff;
77  #endif
78                 desc->buf_len = MAX_MRU;
79 -               desc->data = dma_map_single(&port->netdev->dev, data,
80 +               desc->data = dma_map_single(port->netdev->dev.parent, data,
81                                             RX_BUFF_SIZE, DMA_FROM_DEVICE);
82 -               if (dma_mapping_error(&port->netdev->dev, desc->data)) {
83 +               if (dma_mapping_error(port->netdev->dev.parent, desc->data)) {
84                         free_buffer(buff);
85                         return -EIO;
86                 }
87 @@ -1178,7 +1179,7 @@ static void destroy_queues(struct port *
88                         struct desc *desc = rx_desc_ptr(port, i);
89                         buffer_t *buff = port->rx_buff_tab[i];
90                         if (buff) {
91 -                               dma_unmap_single(&port->netdev->dev,
92 +                               dma_unmap_single(port->netdev->dev.parent,
93                                                  desc->data - NET_IP_ALIGN,
94                                                  RX_BUFF_SIZE, DMA_FROM_DEVICE);
95                                 free_buffer(buff);