package/madwifi: refresh madwifi patches
[openwrt.git] / package / madwifi / patches / 300-napi_polling.patch
1 --- a/ath/if_ath.c
2 +++ b/ath/if_ath.c
3 @@ -184,7 +184,11 @@ static void ath_recv_mgmt(struct ieee802
4         struct sk_buff *, int, int, u_int64_t);
5  static void ath_setdefantenna(struct ath_softc *, u_int);
6  static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
7 -static void ath_rx_tasklet(TQUEUE_ARG);
8 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
9 +static int ath_rx_poll(struct napi_struct *napi, int budget);
10 +#else
11 +static int ath_rx_poll(struct net_device *dev, int *budget);
12 +#endif
13  static int ath_hardstart(struct sk_buff *, struct net_device *);
14  static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
15  #ifdef ATH_SUPERG_COMP
16 @@ -376,6 +380,9 @@ static u_int32_t ath_set_clamped_maxtxpo
17                 u_int32_t new_clamped_maxtxpower);
18  static u_int32_t ath_get_real_maxtxpower(struct ath_softc *sc);
19  
20 +static void ath_poll_disable(struct net_device *dev);
21 +static void ath_poll_enable(struct net_device *dev);
22 +
23  /* calibrate every 30 secs in steady state but check every second at first. */
24  static int ath_calinterval = ATH_SHORT_CALINTERVAL;
25  static int ath_countrycode = CTRY_DEFAULT;     /* country code */
26 @@ -547,7 +554,6 @@ ath_attach(u_int16_t devid, struct net_d
27  
28         atomic_set(&sc->sc_txbuf_counter, 0);
29  
30 -       ATH_INIT_TQUEUE(&sc->sc_rxtq,     ath_rx_tasklet,       dev);
31         ATH_INIT_TQUEUE(&sc->sc_txtq,     ath_tx_tasklet,       dev);
32         ATH_INIT_TQUEUE(&sc->sc_bmisstq,  ath_bmiss_tasklet,    dev);
33         ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet,   dev);
34 @@ -821,6 +827,12 @@ ath_attach(u_int16_t devid, struct net_d
35         dev->set_mac_address = ath_set_mac_address;
36         dev->change_mtu = ath_change_mtu;
37         dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
38 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
39 +       netif_napi_add(dev, &sc->sc_napi, ath_rx_poll, 64);
40 +#else
41 +       dev->poll = ath_rx_poll;
42 +       dev->weight = 64;
43 +#endif
44  #ifdef USE_HEADERLEN_RESV
45         dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
46                                 sizeof(struct llc) +
47 @@ -2220,6 +2232,7 @@ ath_intr(int irq, void *dev_id, struct p
48                 (status & HAL_INT_GLOBAL)       ? " HAL_INT_GLOBAL"     : ""
49                 );
50  
51 +       sc->sc_isr = status;
52         status &= sc->sc_imask;                 /* discard unasked for bits */
53         /* As soon as we know we have a real interrupt we intend to service, 
54          * we will check to see if we need an initial hardware TSF reading. 
55 @@ -2277,7 +2290,21 @@ ath_intr(int irq, void *dev_id, struct p
56                 }
57                 if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
58                         ath_uapsd_processtriggers(sc, hw_tsf);
59 -                       ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
60 +                       sc->sc_isr &= ~HAL_INT_RX;
61 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
62 +                       if (netif_rx_schedule_prep(dev, &sc->sc_napi))
63 +#else
64 +                       if (netif_rx_schedule_prep(dev))
65 +#endif
66 +                       {
67 +                               sc->sc_imask &= ~HAL_INT_RX;
68 +                               ath_hal_intrset(ah, sc->sc_imask);
69 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
70 +                               __netif_rx_schedule(dev, &sc->sc_napi);
71 +#else
72 +                               __netif_rx_schedule(dev);
73 +#endif
74 +                       }
75                 }
76                 if (status & HAL_INT_TX) {
77  #ifdef ATH_SUPERG_DYNTURBO
78 @@ -2303,6 +2330,11 @@ ath_intr(int irq, void *dev_id, struct p
79                                 }
80                         }
81  #endif
82 +                       /* disable transmit interrupt */
83 +                       sc->sc_isr &= ~HAL_INT_TX;
84 +                       ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
85 +                       sc->sc_imask &= ~HAL_INT_TX;
86 +
87                         ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
88                 }
89                 if (status & HAL_INT_BMISS) {
90 @@ -2515,6 +2547,7 @@ ath_init(struct net_device *dev)
91         if (sc->sc_tx99 != NULL)
92                 sc->sc_tx99->start(sc->sc_tx99);
93  #endif
94 +       ath_poll_enable(dev);
95  
96  done:
97         ATH_UNLOCK(sc);
98 @@ -2555,6 +2588,9 @@ ath_stop_locked(struct net_device *dev)
99                 if (sc->sc_tx99 != NULL)
100                         sc->sc_tx99->stop(sc->sc_tx99);
101  #endif
102 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
103 +               ath_poll_disable(dev);
104 +#endif
105                 netif_stop_queue(dev);  /* XXX re-enabled by ath_newstate */
106                 dev->flags &= ~IFF_RUNNING;     /* NB: avoid recursion */
107                 ieee80211_stop_running(ic);     /* stop all VAPs */
108 @@ -4013,12 +4049,47 @@ ath_key_set(struct ieee80211vap *vap, co
109         return ath_keyset(sc, k, mac, vap->iv_bss);
110  }
111  
112 +static void ath_poll_disable(struct net_device *dev)
113 +{
114 +       struct ath_softc *sc = dev->priv;
115 +
116 +       /*
117 +        * XXX Using in_softirq is not right since we might
118 +        * be called from other soft irq contexts than
119 +        * ath_rx_poll
120 +        */
121 +       if (!in_softirq()) {
122 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
123 +               napi_disable(&sc->sc_napi);
124 +#else
125 +               netif_poll_disable(dev);
126 +#endif
127 +       }
128 +}
129 +
130 +static void ath_poll_enable(struct net_device *dev)
131 +{
132 +       struct ath_softc *sc = dev->priv;
133 +
134 +       /* NB: see above */
135 +       if (!in_softirq()) {
136 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
137 +               napi_enable(&sc->sc_napi);
138 +#else
139 +               netif_poll_enable(dev);
140 +#endif
141 +       }
142 +}
143 +
144 +
145  /*
146   * Block/unblock tx+rx processing while a key change is done.
147   * We assume the caller serializes key management operations
148   * so we only need to worry about synchronization with other
149   * uses that originate in the driver.
150   */
151 +#define        IS_UP(_dev) \
152 +       (((_dev)->flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP))
153  static void
154  ath_key_update_begin(struct ieee80211vap *vap)
155  {
156 @@ -4032,14 +4103,9 @@ ath_key_update_begin(struct ieee80211vap
157          * When called from the rx tasklet we cannot use
158          * tasklet_disable because it will block waiting
159          * for us to complete execution.
160 -        *
161 -        * XXX Using in_softirq is not right since we might
162 -        * be called from other soft irq contexts than
163 -        * ath_rx_tasklet.
164          */
165 -       if (!in_softirq())
166 -               tasklet_disable(&sc->sc_rxtq);
167 -       netif_stop_queue(dev);
168 +       if (IS_UP(vap->iv_dev))
169 +               netif_stop_queue(dev);
170  }
171  
172  static void
173 @@ -4051,9 +4117,9 @@ ath_key_update_end(struct ieee80211vap *
174  #endif
175  
176         DPRINTF(sc, ATH_DEBUG_KEYCACHE, "End\n");
177 -       netif_wake_queue(dev);
178 -       if (!in_softirq())              /* NB: see above */
179 -               tasklet_enable(&sc->sc_rxtq);
180 +
181 +       if (IS_UP(vap->iv_dev))
182 +               netif_wake_queue(dev);
183  }
184  
185  /*
186 @@ -6360,15 +6426,25 @@ ath_setdefantenna(struct ath_softc *sc,
187         sc->sc_rxotherant = 0;
188  }
189  
190 -static void
191 -ath_rx_tasklet(TQUEUE_ARG data)
192 +static int
193 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
194 +ath_rx_poll(struct napi_struct *napi, int budget)
195 +#else
196 +ath_rx_poll(struct net_device *dev, int *budget)
197 +#endif
198  {
199  #define        PA2DESC(_sc, _pa) \
200         ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
201                 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
202 -       struct net_device *dev = (struct net_device *)data;
203 -       struct ath_buf *bf;
204 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
205 +       struct ath_softc *sc = container_of(napi, struct ath_softc, sc_napi);
206 +       struct net_device *dev = sc->sc_dev;
207 +       u_int rx_limit = budget;
208 +#else
209         struct ath_softc *sc = dev->priv;
210 +       u_int rx_limit = min(dev->quota, *budget);
211 +#endif
212 +       struct ath_buf *bf;
213         struct ieee80211com *ic = &sc->sc_ic;
214         struct ath_hal *ah = sc ? sc->sc_ah : NULL;
215         struct ath_desc *ds;
216 @@ -6378,8 +6454,10 @@ ath_rx_tasklet(TQUEUE_ARG data)
217         unsigned int len;
218         int type;
219         u_int phyerr;
220 +       u_int processed = 0, early_stop = 0;
221  
222         DPRINTF(sc, ATH_DEBUG_RX_PROC, "invoked\n");
223 +process_rx_again:
224         do {
225                 bf = STAILQ_FIRST(&sc->sc_rxbuf);
226                 if (bf == NULL) {               /* XXX ??? can this happen */
227 @@ -6403,6 +6481,15 @@ ath_rx_tasklet(TQUEUE_ARG data)
228                         /* NB: never process the self-linked entry at the end */
229                         break;
230                 }
231 +
232 +               if (rx_limit-- < 2) {
233 +                       early_stop = 1;
234 +                       break;
235 +               }
236 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
237 +               processed++;
238 +#endif
239 +
240                 skb = bf->bf_skb;
241                 if (skb == NULL) {
242                         EPRINTF(sc, "Dropping; buffer contains NULL skbuff.\n");
243 @@ -6450,6 +6537,7 @@ ath_rx_tasklet(TQUEUE_ARG data)
244                                 sc->sc_stats.ast_rx_phyerr++;
245                                 phyerr = rs->rs_phyerr & 0x1f;
246                                 sc->sc_stats.ast_rx_phy[phyerr]++;
247 +                               goto rx_next;
248                         }
249                         if (rs->rs_status & HAL_RXERR_DECRYPT) {
250                                 /*
251 @@ -6645,9 +6733,39 @@ rx_next:
252                 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
253                 ATH_RXBUF_UNLOCK_IRQ(sc);
254         } while (ath_rxbuf_init(sc, bf) == 0);
255 +       if (!early_stop) {
256 +               unsigned long flags;
257 +               /* Check if more data is received while we were
258 +                * processing the descriptor chain.
259 +                */
260 +               local_irq_save(flags);
261 +               if (sc->sc_isr & HAL_INT_RX) {
262 +                       u_int64_t hw_tsf = ath_hal_gettsf64(ah);
263 +                       sc->sc_isr &= ~HAL_INT_RX;
264 +                       local_irq_restore(flags);
265 +                       ath_uapsd_processtriggers(sc, hw_tsf);
266 +                       goto process_rx_again;
267 +               }
268 +               local_irq_restore(flags);
269 +       }
270 +
271 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
272 +       netif_rx_complete(dev, napi);
273 +#else
274 +       netif_rx_complete(dev);
275 +       *budget -= processed;
276 +       dev->quota -= processed;
277 +#endif
278 +       sc->sc_imask |= HAL_INT_RX;
279 +       ath_hal_intrset(ah, sc->sc_imask);
280  
281         /* rx signal state monitoring */
282         ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
283 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
284 +       return processed;
285 +#else
286 +       return early_stop;
287 +#endif
288  #undef PA2DESC
289  }
290  
291 @@ -8298,12 +8416,24 @@ ath_tx_tasklet_q0(TQUEUE_ARG data)
292  {
293         struct net_device *dev = (struct net_device *)data;
294         struct ath_softc *sc = dev->priv;
295 +       unsigned long flags;
296  
297 +process_tx_again:
298         if (txqactive(sc->sc_ah, 0))
299                 ath_tx_processq(sc, &sc->sc_txq[0]);
300         if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
301                 ath_tx_processq(sc, sc->sc_cabq);
302  
303 +       local_irq_save(flags);
304 +       if (sc->sc_isr & HAL_INT_TX) {
305 +               sc->sc_isr &= ~HAL_INT_TX;
306 +               local_irq_restore(flags);
307 +               goto process_tx_again;
308 +       }
309 +       sc->sc_imask |= HAL_INT_TX;
310 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
311 +       local_irq_restore(flags);
312 +
313         netif_wake_queue(dev);
314  
315         if (sc->sc_softled)
316 @@ -8319,7 +8449,9 @@ ath_tx_tasklet_q0123(TQUEUE_ARG data)
317  {
318         struct net_device *dev = (struct net_device *)data;
319         struct ath_softc *sc = dev->priv;
320 +       unsigned long flags;
321  
322 +process_tx_again:
323         /*
324          * Process each active queue.
325          */
326 @@ -8340,6 +8472,16 @@ ath_tx_tasklet_q0123(TQUEUE_ARG data)
327         if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
328                 ath_tx_processq(sc, sc->sc_uapsdq);
329  
330 +       local_irq_save(flags);
331 +       if (sc->sc_isr & HAL_INT_TX) {
332 +               sc->sc_isr &= ~HAL_INT_TX;
333 +               local_irq_restore(flags);
334 +               goto process_tx_again;
335 +       }
336 +       sc->sc_imask |= HAL_INT_TX;
337 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
338 +       local_irq_restore(flags);
339 +
340         netif_wake_queue(dev);
341  
342         if (sc->sc_softled)
343 @@ -8355,13 +8497,25 @@ ath_tx_tasklet(TQUEUE_ARG data)
344         struct net_device *dev = (struct net_device *)data;
345         struct ath_softc *sc = dev->priv;
346         unsigned int i;
347 +       unsigned long flags;
348  
349         /* Process each active queue. This includes sc_cabq, sc_xrtq and
350          * sc_uapsdq */
351 +process_tx_again:
352         for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
353                 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
354                         ath_tx_processq(sc, &sc->sc_txq[i]);
355  
356 +       local_irq_save(flags);
357 +       if (sc->sc_isr & HAL_INT_TX) {
358 +               sc->sc_isr &= ~HAL_INT_TX;
359 +               local_irq_restore(flags);
360 +               goto process_tx_again;
361 +       }
362 +       sc->sc_imask |= HAL_INT_TX;
363 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
364 +       local_irq_restore(flags);
365 +
366         netif_wake_queue(dev);
367  
368         if (sc->sc_softled)
369 @@ -10296,9 +10450,9 @@ ath_change_mtu(struct net_device *dev, i
370         dev->mtu = mtu;
371         if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
372                 /* NB: the rx buffers may need to be reallocated */
373 -               tasklet_disable(&sc->sc_rxtq);
374 +               ath_poll_disable(dev);
375                 error = ath_reset(dev);
376 -               tasklet_enable(&sc->sc_rxtq);
377 +               ath_poll_enable(dev);
378         }
379         ATH_UNLOCK(sc);
380  
381 --- a/ath/if_athvar.h
382 +++ b/ath/if_athvar.h
383 @@ -53,6 +53,10 @@
384  # include      <asm/bitops.h>
385  #endif
386  
387 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
388 +#define irqs_disabled()                        0
389 +#endif
390 +
391  /*
392   * Deduce if tasklets are available.  If not then
393   * fall back to using the immediate work queue.
394 @@ -616,6 +620,9 @@ struct ath_rp {
395  struct ath_softc {
396         struct ieee80211com sc_ic;              /* NB: must be first */
397         struct net_device *sc_dev;
398 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
399 +       struct napi_struct sc_napi;
400 +#endif
401         void __iomem *sc_iobase;                /* address of the device */
402         struct semaphore sc_lock;               /* dev-level lock */
403         struct net_device_stats sc_devstats;    /* device statistics */
404 @@ -730,7 +737,6 @@ struct ath_softc {
405         struct ath_buf *sc_rxbufcur;            /* current rx buffer */
406         u_int32_t *sc_rxlink;                   /* link ptr in last RX desc */
407         spinlock_t sc_rxbuflock;
408 -       struct ATH_TQ_STRUCT sc_rxtq;           /* rx intr tasklet */
409         struct ATH_TQ_STRUCT sc_rxorntq;        /* rxorn intr tasklet */
410         u_int8_t sc_defant;                     /* current default antenna */
411         u_int8_t sc_rxotherant;                 /* RXs on non-default antenna */
412 @@ -745,6 +751,7 @@ struct ath_softc {
413         u_int sc_txintrperiod;                  /* tx interrupt batching */
414         struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
415         struct ath_txq *sc_ac2q[WME_NUM_AC];    /* WME AC -> h/w qnum */
416 +       HAL_INT sc_isr;                         /* unmasked ISR state */
417         struct ATH_TQ_STRUCT sc_txtq;           /* tx intr tasklet */
418         u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
419         struct ath_descdma sc_bdma;             /* beacon descriptors */
420 @@ -858,6 +865,8 @@ typedef void (*ath_callback) (struct ath
421  #define        ATH_TXBUF_LOCK_CHECK(_sc)
422  #endif
423  
424 +#define ATH_DISABLE_INTR               local_irq_disable
425 +#define ATH_ENABLE_INTR                local_irq_enable
426  
427  #define        ATH_RXBUF_LOCK_INIT(_sc)        spin_lock_init(&(_sc)->sc_rxbuflock)
428  #define        ATH_RXBUF_LOCK_DESTROY(_sc)
429 --- a/net80211/ieee80211_input.c
430 +++ b/net80211/ieee80211_input.c
431 @@ -1198,7 +1198,7 @@ ieee80211_deliver_data(struct ieee80211_
432                         /* attach vlan tag */
433                         struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
434                         if (vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan) == NET_RX_DROP) {
435 -                               /* If netif_rx dropped the packet because 
436 +                               /* If netif_receive_skb dropped the packet because
437                                  * device was too busy */
438                                 if (ni_tmp != NULL) {
439                                         /* node reference was leaked */
440 @@ -1209,8 +1209,8 @@ ieee80211_deliver_data(struct ieee80211_
441                         skb = NULL; /* SKB is no longer ours */
442                 } else {
443                         struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
444 -                       if (netif_rx(skb) == NET_RX_DROP) {
445 -                               /* If netif_rx dropped the packet because 
446 +                       if (netif_receive_skb(skb) == NET_RX_DROP) {
447 +                               /* If netif_receive_skb dropped the packet because
448                                  * device was too busy */
449                                 if (ni_tmp != NULL) {
450                                         /* node reference was leaked */
451 @@ -2322,8 +2322,8 @@ forward_mgmt_to_app(struct ieee80211vap
452                 skb1->protocol = __constant_htons(0x0019);  /* ETH_P_80211_RAW */
453  
454                 ni_tmp = SKB_CB(skb1)->ni;
455 -               if (netif_rx(skb1) == NET_RX_DROP) {
456 -                       /* If netif_rx dropped the packet because 
457 +               if (netif_receive_skb(skb1) == NET_RX_DROP) {
458 +                       /* If netif_receive_skb dropped the packet because
459                          * device was too busy */
460                         if (ni_tmp != NULL) {
461                                 /* node reference was leaked */
462 --- a/net80211/ieee80211_monitor.c
463 +++ b/net80211/ieee80211_monitor.c
464 @@ -584,8 +584,8 @@ ieee80211_input_monitor(struct ieee80211
465                         skb1->protocol = 
466                                 __constant_htons(0x0019); /* ETH_P_80211_RAW */
467  
468 -                       if (netif_rx(skb1) == NET_RX_DROP) {
469 -                               /* If netif_rx dropped the packet because 
470 +                       if (netif_receive_skb(skb1) == NET_RX_DROP) {
471 +                               /* If netif_receive_skb dropped the packet because
472                                  * device was too busy, reclaim the ref. in 
473                                  * the skb. */
474                                 if (SKB_CB(skb1)->ni != NULL)
475 --- a/net80211/ieee80211_skb.c
476 +++ b/net80211/ieee80211_skb.c
477 @@ -73,7 +73,7 @@
478  #undef dev_queue_xmit
479  #undef kfree_skb
480  #undef kfree_skb_fast
481 -#undef netif_rx
482 +#undef netif_receive_skb
483  #undef pskb_copy
484  #undef skb_clone
485  #undef skb_copy
486 @@ -638,8 +638,8 @@ int  vlan_hwaccel_receive_skb_debug(stru
487                 grp, vlan_tag);
488  }
489  
490 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line) {
491 -       return netif_rx(untrack_skb(skb, 0, func, line, __func__, __LINE__));
492 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line) {
493 +       return netif_receive_skb(untrack_skb(skb, 0, func, line, __func__, __LINE__));
494  }
495  
496  struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
497 @@ -760,7 +760,7 @@ struct sk_buff * skb_copy_expand_debug(c
498  }
499  
500  EXPORT_SYMBOL(vlan_hwaccel_receive_skb_debug);
501 -EXPORT_SYMBOL(netif_rx_debug);
502 +EXPORT_SYMBOL(netif_receive_skb_debug);
503  EXPORT_SYMBOL(alloc_skb_debug);
504  EXPORT_SYMBOL(dev_alloc_skb_debug);
505  EXPORT_SYMBOL(skb_clone_debug);
506 --- a/net80211/ieee80211_skb.h
507 +++ b/net80211/ieee80211_skb.h
508 @@ -116,7 +116,7 @@ int ieee80211_skb_references(void);
509  int  vlan_hwaccel_receive_skb_debug(struct sk_buff *skb, 
510                                     struct vlan_group *grp, unsigned short vlan_tag, 
511                                     const char* func, int line);
512 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line);
513 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line);
514  struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
515                                  const char *func, int line);
516  struct sk_buff * dev_alloc_skb_debug(unsigned int length,
517 @@ -151,7 +151,7 @@ struct sk_buff * skb_copy_expand_debug(c
518  #undef dev_queue_xmit
519  #undef kfree_skb
520  #undef kfree_skb_fast
521 -#undef netif_rx
522 +#undef netif_receive_skb
523  #undef pskb_copy
524  #undef skb_clone
525  #undef skb_copy
526 @@ -168,8 +168,8 @@ struct sk_buff * skb_copy_expand_debug(c
527         skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
528  #define vlan_hwaccel_receive_skb(_skb, _grp, _tag) \
529         vlan_hwaccel_receive_skb_debug(_skb, _grp, _tag, __func__, __LINE__)
530 -#define netif_rx(_skb) \
531 -       netif_rx_debug(_skb, __func__, __LINE__)
532 +#define netif_receive_skb(_skb) \
533 +       netif_receive_skb_debug(_skb, __func__, __LINE__)
534  #define        alloc_skb(_length, _gfp_mask) \
535         alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
536  #define        dev_alloc_skb(_length) \