kernel: backport upstream inet fix
[openwrt.git] / target / linux / generic / patches-3.12 / 770-bgmac-backport.patch
1 patches for bgmac backported from net-next/master
2
3 --- a/drivers/net/ethernet/broadcom/bgmac.c
4 +++ b/drivers/net/ethernet/broadcom/bgmac.c
5 @@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(stru
6         dma_desc->ctl0 = cpu_to_le32(ctl0);
7         dma_desc->ctl1 = cpu_to_le32(ctl1);
8  
9 +       netdev_sent_queue(net_dev, skb->len);
10 +
11         wmb();
12  
13         /* Increase ring->end to point empty slot. We tell hardware the first
14 @@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgm
15         struct device *dma_dev = bgmac->core->dma_dev;
16         int empty_slot;
17         bool freed = false;
18 +       unsigned bytes_compl = 0, pkts_compl = 0;
19  
20         /* The last slot that hardware didn't consume yet */
21         empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
22 @@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgm
23                                          slot->skb->len, DMA_TO_DEVICE);
24                         slot->dma_addr = 0;
25  
26 +                       bytes_compl += slot->skb->len;
27 +                       pkts_compl++;
28 +
29                         /* Free memory! :) */
30                         dev_kfree_skb(slot->skb);
31                         slot->skb = NULL;
32 @@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgm
33                 freed = true;
34         }
35  
36 +       netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
37 +
38         if (freed && netif_queue_stopped(bgmac->net_dev))
39                 netif_wake_queue(bgmac->net_dev);
40  }
41 @@ -244,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(str
42                                      struct bgmac_slot_info *slot)
43  {
44         struct device *dma_dev = bgmac->core->dma_dev;
45 +       struct sk_buff *skb;
46 +       dma_addr_t dma_addr;
47         struct bgmac_rx_header *rx;
48  
49         /* Alloc skb */
50 -       slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
51 -       if (!slot->skb)
52 +       skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
53 +       if (!skb)
54                 return -ENOMEM;
55  
56         /* Poison - if everything goes fine, hardware will overwrite it */
57 -       rx = (struct bgmac_rx_header *)slot->skb->data;
58 +       rx = (struct bgmac_rx_header *)skb->data;
59         rx->len = cpu_to_le16(0xdead);
60         rx->flags = cpu_to_le16(0xbeef);
61  
62         /* Map skb for the DMA */
63 -       slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
64 -                                       BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
65 -       if (dma_mapping_error(dma_dev, slot->dma_addr)) {
66 +       dma_addr = dma_map_single(dma_dev, skb->data,
67 +                                 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
68 +       if (dma_mapping_error(dma_dev, dma_addr)) {
69                 bgmac_err(bgmac, "DMA mapping error\n");
70 +               dev_kfree_skb(skb);
71                 return -ENOMEM;
72         }
73 +
74 +       /* Update the slot */
75 +       slot->skb = skb;
76 +       slot->dma_addr = dma_addr;
77 +
78         if (slot->dma_addr & 0xC0000000)
79                 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
80  
81         return 0;
82  }
83  
84 +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
85 +                                   struct bgmac_dma_ring *ring, int desc_idx)
86 +{
87 +       struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
88 +       u32 ctl0 = 0, ctl1 = 0;
89 +
90 +       if (desc_idx == ring->num_slots - 1)
91 +               ctl0 |= BGMAC_DESC_CTL0_EOT;
92 +       ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
93 +       /* Is there any BGMAC device that requires extension? */
94 +       /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
95 +        * B43_DMA64_DCTL1_ADDREXT_MASK;
96 +        */
97 +
98 +       dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
99 +       dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
100 +       dma_desc->ctl0 = cpu_to_le32(ctl0);
101 +       dma_desc->ctl1 = cpu_to_le32(ctl1);
102 +}
103 +
104  static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
105                              int weight)
106  {
107 @@ -287,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgma
108                 struct device *dma_dev = bgmac->core->dma_dev;
109                 struct bgmac_slot_info *slot = &ring->slots[ring->start];
110                 struct sk_buff *skb = slot->skb;
111 -               struct sk_buff *new_skb;
112                 struct bgmac_rx_header *rx;
113                 u16 len, flags;
114  
115 @@ -300,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgma
116                 len = le16_to_cpu(rx->len);
117                 flags = le16_to_cpu(rx->flags);
118  
119 -               /* Check for poison and drop or pass the packet */
120 -               if (len == 0xdead && flags == 0xbeef) {
121 -                       bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
122 -                                 ring->start);
123 -               } else {
124 +               do {
125 +                       dma_addr_t old_dma_addr = slot->dma_addr;
126 +                       int err;
127 +
128 +                       /* Check for poison and drop or pass the packet */
129 +                       if (len == 0xdead && flags == 0xbeef) {
130 +                               bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
131 +                                         ring->start);
132 +                               dma_sync_single_for_device(dma_dev,
133 +                                                          slot->dma_addr,
134 +                                                          BGMAC_RX_BUF_SIZE,
135 +                                                          DMA_FROM_DEVICE);
136 +                               break;
137 +                       }
138 +
139                         /* Omit CRC. */
140                         len -= ETH_FCS_LEN;
141  
142 -                       new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
143 -                       if (new_skb) {
144 -                               skb_put(new_skb, len);
145 -                               skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
146 -                                                                new_skb->data,
147 -                                                                len);
148 -                               skb_checksum_none_assert(skb);
149 -                               new_skb->protocol =
150 -                                       eth_type_trans(new_skb, bgmac->net_dev);
151 -                               netif_receive_skb(new_skb);
152 -                               handled++;
153 -                       } else {
154 -                               bgmac->net_dev->stats.rx_dropped++;
155 -                               bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
156 +                       /* Prepare new skb as replacement */
157 +                       err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
158 +                       if (err) {
159 +                               /* Poison the old skb */
160 +                               rx->len = cpu_to_le16(0xdead);
161 +                               rx->flags = cpu_to_le16(0xbeef);
162 +
163 +                               dma_sync_single_for_device(dma_dev,
164 +                                                          slot->dma_addr,
165 +                                                          BGMAC_RX_BUF_SIZE,
166 +                                                          DMA_FROM_DEVICE);
167 +                               break;
168                         }
169 +                       bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
170  
171 -                       /* Poison the old skb */
172 -                       rx->len = cpu_to_le16(0xdead);
173 -                       rx->flags = cpu_to_le16(0xbeef);
174 -               }
175 -
176 -               /* Make it back accessible to the hardware */
177 -               dma_sync_single_for_device(dma_dev, slot->dma_addr,
178 -                                          BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
179 +                       /* Unmap old skb, we'll pass it to the netfif */
180 +                       dma_unmap_single(dma_dev, old_dma_addr,
181 +                                        BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
182 +
183 +                       skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
184 +                       skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
185 +
186 +                       skb_checksum_none_assert(skb);
187 +                       skb->protocol = eth_type_trans(skb, bgmac->net_dev);
188 +                       netif_receive_skb(skb);
189 +                       handled++;
190 +               } while (0);
191  
192                 if (++ring->start >= BGMAC_RX_RING_SLOTS)
193                         ring->start = 0;
194 @@ -495,8 +543,6 @@ err_dma_free:
195  static void bgmac_dma_init(struct bgmac *bgmac)
196  {
197         struct bgmac_dma_ring *ring;
198 -       struct bgmac_dma_desc *dma_desc;
199 -       u32 ctl0, ctl1;
200         int i;
201  
202         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
203 @@ -529,23 +575,8 @@ static void bgmac_dma_init(struct bgmac
204                 if (ring->unaligned)
205                         bgmac_dma_rx_enable(bgmac, ring);
206  
207 -               for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
208 -                    j++, dma_desc++) {
209 -                       ctl0 = ctl1 = 0;
210 -
211 -                       if (j == ring->num_slots - 1)
212 -                               ctl0 |= BGMAC_DESC_CTL0_EOT;
213 -                       ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
214 -                       /* Is there any BGMAC device that requires extension? */
215 -                       /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
216 -                        * B43_DMA64_DCTL1_ADDREXT_MASK;
217 -                        */
218 -
219 -                       dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
220 -                       dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
221 -                       dma_desc->ctl0 = cpu_to_le32(ctl0);
222 -                       dma_desc->ctl1 = cpu_to_le32(ctl1);
223 -               }
224 +               for (j = 0; j < ring->num_slots; j++)
225 +                       bgmac_dma_rx_setup_desc(bgmac, ring, j);
226  
227                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
228                             ring->index_base +
229 @@ -988,6 +1019,8 @@ static void bgmac_chip_reset(struct bgma
230         bgmac_miiconfig(bgmac);
231         bgmac_phy_init(bgmac);
232  
233 +       netdev_reset_queue(bgmac->net_dev);
234 +
235         bgmac->int_status = 0;
236  }
237