update madwifi to latest trunk, include a few more fixes/improvements
[openwrt.git] / package / madwifi / patches / 300-napi_polling.patch
1 Index: madwifi-trunk-r3280/ath/if_ath.c
2 ===================================================================
3 --- madwifi-trunk-r3280.orig/ath/if_ath.c       2008-01-28 17:29:22.989895792 +0100
4 +++ madwifi-trunk-r3280/ath/if_ath.c    2008-01-28 17:46:59.249785581 +0100
5 @@ -184,7 +184,7 @@
6         struct sk_buff *, int, int, u_int64_t);
7  static void ath_setdefantenna(struct ath_softc *, u_int);
8  static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
9 -static void ath_rx_tasklet(TQUEUE_ARG);
10 +static int ath_rx_poll(struct net_device *dev, int *budget);
11  static int ath_hardstart(struct sk_buff *, struct net_device *);
12  static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
13  #ifdef ATH_SUPERG_COMP
14 @@ -542,7 +542,6 @@
15         ATH_TXBUF_LOCK_INIT(sc);
16         ATH_RXBUF_LOCK_INIT(sc);
17  
18 -       ATH_INIT_TQUEUE(&sc->sc_rxtq,     ath_rx_tasklet,       dev);
19         ATH_INIT_TQUEUE(&sc->sc_txtq,     ath_tx_tasklet,       dev);
20         ATH_INIT_TQUEUE(&sc->sc_bmisstq,  ath_bmiss_tasklet,    dev);
21         ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet,   dev);
22 @@ -816,6 +815,8 @@
23         dev->set_mac_address = ath_set_mac_address;
24         dev->change_mtu = ath_change_mtu;
25         dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
26 +       dev->poll = ath_rx_poll;
27 +       dev->weight = 64;
28  #ifdef USE_HEADERLEN_RESV
29         dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
30                                 sizeof(struct llc) +
31 @@ -2206,6 +2207,7 @@
32                 (status & HAL_INT_GLOBAL)       ? " HAL_INT_GLOBAL"     : ""
33                 );
34  
35 +       sc->sc_isr = status;
36         status &= sc->sc_imask;                 /* discard unasked for bits */
37         /* As soon as we know we have a real interrupt we intend to service, 
38          * we will check to see if we need an initial hardware TSF reading. 
39 @@ -2263,7 +2265,14 @@
40                 }
41                 if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
42                         ath_uapsd_processtriggers(sc, hw_tsf);
43 -                       ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
44 +                       sc->sc_isr &= ~HAL_INT_RX;
45 +                       if (netif_rx_schedule_prep(dev)) {
46 +#ifndef ATH_PRECISE_TSF
47 +                               sc->sc_imask &= ~HAL_INT_RX;
48 +                               ath_hal_intrset(ah, sc->sc_imask);
49 +#endif
50 +                               __netif_rx_schedule(dev);
51 +                       }
52                 }
53                 if (status & HAL_INT_TX) {
54  #ifdef ATH_SUPERG_DYNTURBO
55 @@ -2289,6 +2298,11 @@
56                                 }
57                         }
58  #endif
59 +                       /* disable transmit interrupt */
60 +                       sc->sc_isr &= ~HAL_INT_TX;
61 +                       ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
62 +                       sc->sc_imask &= ~HAL_INT_TX;
63 +
64                         ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
65                 }
66                 if (status & HAL_INT_BMISS) {
67 @@ -4011,10 +4025,10 @@
68          *
69          * XXX Using in_softirq is not right since we might
70          * be called from other soft irq contexts than
71 -        * ath_rx_tasklet.
72 +        * ath_rx_poll
73          */
74         if (!in_softirq())
75 -               tasklet_disable(&sc->sc_rxtq);
76 +               netif_poll_disable(dev);
77         netif_stop_queue(dev);
78  }
79  
80 @@ -4027,7 +4041,7 @@
81         DPRINTF(sc, ATH_DEBUG_KEYCACHE, "End\n");
82         netif_wake_queue(dev);
83         if (!in_softirq())              /* NB: see above */
84 -               tasklet_enable(&sc->sc_rxtq);
85 +               netif_poll_enable(dev);
86  }
87  
88  /*
89 @@ -6329,13 +6343,12 @@
90         sc->sc_rxotherant = 0;
91  }
92  
93 -static void
94 -ath_rx_tasklet(TQUEUE_ARG data)
95 +static int
96 +ath_rx_poll(struct net_device *dev, int *budget)
97  {
98  #define        PA2DESC(_sc, _pa) \
99         ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
100                 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
101 -       struct net_device *dev = (struct net_device *)data;
102         struct ath_buf *bf;
103         struct ath_softc *sc = dev->priv;
104         struct ieee80211com *ic = &sc->sc_ic;
105 @@ -6347,8 +6360,11 @@
106         unsigned int len;
107         int type;
108         u_int phyerr;
109 +       u_int processed = 0, early_stop = 0;
110 +       u_int rx_limit = dev->quota;
111  
112         DPRINTF(sc, ATH_DEBUG_RX_PROC, "invoked\n");
113 +process_rx_again:
114         do {
115                 bf = STAILQ_FIRST(&sc->sc_rxbuf);
116                 if (bf == NULL) {               /* XXX ??? can this happen */
117 @@ -6372,6 +6388,13 @@
118                         /* NB: never process the self-linked entry at the end */
119                         break;
120                 }
121 +
122 +               processed++;
123 +               if (rx_limit-- < 0) {
124 +                       early_stop = 1;
125 +                       break;
126 +               }
127 +
128                 skb = bf->bf_skb;
129                 if (skb == NULL) {
130                         EPRINTF(sc, "Dropping; buffer contains NULL skbuff.\n");
131 @@ -6419,6 +6442,7 @@
132                                 sc->sc_stats.ast_rx_phyerr++;
133                                 phyerr = rs->rs_phyerr & 0x1f;
134                                 sc->sc_stats.ast_rx_phy[phyerr]++;
135 +                               goto rx_next;
136                         }
137                         if (rs->rs_status & HAL_RXERR_DECRYPT) {
138                                 /*
139 @@ -6614,9 +6638,35 @@
140                 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
141                 ATH_RXBUF_UNLOCK_IRQ(sc);
142         } while (ath_rxbuf_init(sc, bf) == 0);
143 +       if (!early_stop) {
144 +               unsigned long flags;
145 +               /* Check if more data is received while we were
146 +                * processing the descriptor chain.
147 +                */
148 +#ifndef ATH_PRECISE_TSF
149 +               local_irq_save(flags);
150 +               if (sc->sc_isr & HAL_INT_RX) {
151 +                       u_int64_t hw_tsf = ath_hal_gettsf64(ah);
152 +                       sc->sc_isr &= ~HAL_INT_RX;
153 +                       local_irq_restore(flags);
154 +                       ath_uapsd_processtriggers(sc, hw_tsf);
155 +                       goto process_rx_again;
156 +               }
157 +#endif
158 +               netif_rx_complete(dev);
159 +
160 +#ifndef ATH_PRECISE_TSF
161 +               sc->sc_imask |= HAL_INT_RX;
162 +               ath_hal_intrset(ah, sc->sc_imask);
163 +               local_irq_restore(flags);
164 +#endif
165 +       }
166 +
167 +       *budget -= processed;
168  
169         /* rx signal state monitoring */
170         ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
171 +       return early_stop;
172  #undef PA2DESC
173  }
174  
175 @@ -8268,11 +8318,22 @@
176         struct net_device *dev = (struct net_device *)data;
177         struct ath_softc *sc = dev->priv;
178  
179 +process_tx_again:
180         if (txqactive(sc->sc_ah, 0))
181                 ath_tx_processq(sc, &sc->sc_txq[0]);
182         if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
183                 ath_tx_processq(sc, sc->sc_cabq);
184  
185 +       ATH_DISABLE_INTR();
186 +       if (sc->sc_isr & HAL_INT_TX) {
187 +               sc->sc_isr &= ~HAL_INT_TX;
188 +               ATH_ENABLE_INTR();
189 +               goto process_tx_again;
190 +       }
191 +       sc->sc_imask |= HAL_INT_TX;
192 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
193 +       ATH_ENABLE_INTR();
194 +
195         netif_wake_queue(dev);
196  
197         if (sc->sc_softled)
198 @@ -8289,6 +8350,7 @@
199         struct net_device *dev = (struct net_device *)data;
200         struct ath_softc *sc = dev->priv;
201  
202 +process_tx_again:
203         /*
204          * Process each active queue.
205          */
206 @@ -8309,6 +8371,16 @@
207         if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
208                 ath_tx_processq(sc, sc->sc_uapsdq);
209  
210 +       ATH_DISABLE_INTR();
211 +       if (sc->sc_isr & HAL_INT_TX) {
212 +               sc->sc_isr &= ~HAL_INT_TX;
213 +               ATH_ENABLE_INTR();
214 +               goto process_tx_again;
215 +       }
216 +       sc->sc_imask |= HAL_INT_TX;
217 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
218 +       ATH_ENABLE_INTR();
219 +
220         netif_wake_queue(dev);
221  
222         if (sc->sc_softled)
223 @@ -8327,10 +8399,21 @@
224  
225         /* Process each active queue. This includes sc_cabq, sc_xrtq and
226          * sc_uapsdq */
227 +process_tx_again:
228         for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
229                 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
230                         ath_tx_processq(sc, &sc->sc_txq[i]);
231  
232 +       ATH_DISABLE_INTR();
233 +       if (sc->sc_isr & HAL_INT_TX) {
234 +               sc->sc_isr &= ~HAL_INT_TX;
235 +               ATH_ENABLE_INTR();
236 +               goto process_tx_again;
237 +       }
238 +       sc->sc_imask |= HAL_INT_TX;
239 +       ath_hal_intrset(sc->sc_ah, sc->sc_imask);
240 +       ATH_ENABLE_INTR();
241 +
242         netif_wake_queue(dev);
243  
244         if (sc->sc_softled)
245 @@ -8405,6 +8488,7 @@
246  ath_draintxq(struct ath_softc *sc)
247  {
248         struct ath_hal *ah = sc->sc_ah;
249 +       int npend = 0;
250         unsigned int i;
251  
252         /* XXX return value */
253 @@ -10261,9 +10345,9 @@
254         dev->mtu = mtu;
255         if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
256                 /* NB: the rx buffers may need to be reallocated */
257 -               tasklet_disable(&sc->sc_rxtq);
258 +               netif_poll_disable(dev);
259                 error = ath_reset(dev);
260 -               tasklet_enable(&sc->sc_rxtq);
261 +               netif_poll_enable(dev);
262         }
263         ATH_UNLOCK(sc);
264  
265 Index: madwifi-trunk-r3280/ath/if_athvar.h
266 ===================================================================
267 --- madwifi-trunk-r3280.orig/ath/if_athvar.h    2008-01-28 17:29:22.997896245 +0100
268 +++ madwifi-trunk-r3280/ath/if_athvar.h 2008-01-28 17:45:06.903383316 +0100
269 @@ -50,6 +50,10 @@
270  #include <asm/io.h>
271  #include <linux/list.h>
272  
273 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
274 +#define irqs_disabled()                        0
275 +#endif
276 +
277  /*
278   * Deduce if tasklets are available.  If not then
279   * fall back to using the immediate work queue.
280 @@ -728,7 +732,6 @@
281         struct ath_buf *sc_rxbufcur;            /* current rx buffer */
282         u_int32_t *sc_rxlink;                   /* link ptr in last RX desc */
283         spinlock_t sc_rxbuflock;
284 -       struct ATH_TQ_STRUCT sc_rxtq;           /* rx intr tasklet */
285         struct ATH_TQ_STRUCT sc_rxorntq;        /* rxorn intr tasklet */
286         u_int8_t sc_defant;                     /* current default antenna */
287         u_int8_t sc_rxotherant;                 /* RXs on non-default antenna */
288 @@ -741,6 +744,7 @@
289         u_int sc_txintrperiod;                  /* tx interrupt batching */
290         struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
291         struct ath_txq *sc_ac2q[WME_NUM_AC];    /* WME AC -> h/w qnum */
292 +       HAL_INT sc_isr;                         /* unmasked ISR state */
293         struct ATH_TQ_STRUCT sc_txtq;           /* tx intr tasklet */
294         u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
295         struct ath_descdma sc_bdma;             /* beacon descriptors */
296 @@ -854,6 +858,8 @@
297  #define        ATH_TXBUF_LOCK_CHECK(_sc)
298  #endif
299  
300 +#define ATH_DISABLE_INTR               local_irq_disable
301 +#define ATH_ENABLE_INTR                local_irq_enable
302  
303  #define        ATH_RXBUF_LOCK_INIT(_sc)        spin_lock_init(&(_sc)->sc_rxbuflock)
304  #define        ATH_RXBUF_LOCK_DESTROY(_sc)
305 Index: madwifi-trunk-r3280/net80211/ieee80211_input.c
306 ===================================================================
307 --- madwifi-trunk-r3280.orig/net80211/ieee80211_input.c 2008-01-28 17:29:23.005896702 +0100
308 +++ madwifi-trunk-r3280/net80211/ieee80211_input.c      2008-01-28 17:46:28.108010900 +0100
309 @@ -1197,7 +1197,7 @@
310                         /* attach vlan tag */
311                         struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
312                         if (vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan) == NET_RX_DROP) {
313 -                               /* If netif_rx dropped the packet because 
314 +                               /* If netif_receive_skb dropped the packet because
315                                  * device was too busy */
316                                 if (ni_tmp != NULL) {
317                                         /* node reference was leaked */
318 @@ -1208,8 +1208,8 @@
319                         skb = NULL; /* SKB is no longer ours */
320                 } else {
321                         struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
322 -                       if (netif_rx(skb) == NET_RX_DROP) {
323 -                               /* If netif_rx dropped the packet because 
324 +                       if (netif_receive_skb(skb) == NET_RX_DROP) {
325 +                               /* If netif_receive_skb dropped the packet because
326                                  * device was too busy */
327                                 if (ni_tmp != NULL) {
328                                         /* node reference was leaked */
329 @@ -2314,8 +2314,8 @@
330                 skb1->protocol = __constant_htons(0x0019);  /* ETH_P_80211_RAW */
331  
332                 ni_tmp = SKB_CB(skb1)->ni;
333 -               if (netif_rx(skb1) == NET_RX_DROP) {
334 -                       /* If netif_rx dropped the packet because 
335 +               if (netif_receive_skb(skb1) == NET_RX_DROP) {
336 +                       /* If netif_receive_skb dropped the packet because
337                          * device was too busy */
338                         if (ni_tmp != NULL) {
339                                 /* node reference was leaked */
340 Index: madwifi-trunk-r3280/net80211/ieee80211_monitor.c
341 ===================================================================
342 --- madwifi-trunk-r3280.orig/net80211/ieee80211_monitor.c       2008-01-28 17:29:23.013897159 +0100
343 +++ madwifi-trunk-r3280/net80211/ieee80211_monitor.c    2008-01-28 17:29:26.430091834 +0100
344 @@ -584,8 +584,8 @@
345                         skb1->protocol = 
346                                 __constant_htons(0x0019); /* ETH_P_80211_RAW */
347  
348 -                       if (netif_rx(skb1) == NET_RX_DROP) {
349 -                               /* If netif_rx dropped the packet because 
350 +                       if (netif_receive_skb(skb1) == NET_RX_DROP) {
351 +                               /* If netif_receive_skb dropped the packet because
352                                  * device was too busy, reclaim the ref. in 
353                                  * the skb. */
354                                 if (SKB_CB(skb1)->ni != NULL)
355 Index: madwifi-trunk-r3280/net80211/ieee80211_skb.c
356 ===================================================================
357 --- madwifi-trunk-r3280.orig/net80211/ieee80211_skb.c   2008-01-28 17:29:23.017897384 +0100
358 +++ madwifi-trunk-r3280/net80211/ieee80211_skb.c        2008-01-28 17:29:26.446092748 +0100
359 @@ -73,7 +73,7 @@
360  #undef dev_queue_xmit
361  #undef kfree_skb
362  #undef kfree_skb_fast
363 -#undef netif_rx
364 +#undef netif_receive_skb
365  #undef pskb_copy
366  #undef skb_clone
367  #undef skb_copy
368 @@ -638,8 +638,8 @@
369                 grp, vlan_tag);
370  }
371  
372 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line) {
373 -       return netif_rx(untrack_skb(skb, 0, func, line, __func__, __LINE__));
374 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line) {
375 +       return netif_receive_skb(untrack_skb(skb, 0, func, line, __func__, __LINE__));
376  }
377  
378  struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
379 @@ -760,7 +760,7 @@
380  }
381  
382  EXPORT_SYMBOL(vlan_hwaccel_receive_skb_debug);
383 -EXPORT_SYMBOL(netif_rx_debug);
384 +EXPORT_SYMBOL(netif_receive_skb_debug);
385  EXPORT_SYMBOL(alloc_skb_debug);
386  EXPORT_SYMBOL(dev_alloc_skb_debug);
387  EXPORT_SYMBOL(skb_clone_debug);
388 Index: madwifi-trunk-r3280/net80211/ieee80211_skb.h
389 ===================================================================
390 --- madwifi-trunk-r3280.orig/net80211/ieee80211_skb.h   2008-01-28 17:29:23.029898072 +0100
391 +++ madwifi-trunk-r3280/net80211/ieee80211_skb.h        2008-01-28 17:29:26.458093432 +0100
392 @@ -116,7 +116,7 @@
393  int  vlan_hwaccel_receive_skb_debug(struct sk_buff *skb, 
394                                     struct vlan_group *grp, unsigned short vlan_tag, 
395                                     const char* func, int line);
396 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line);
397 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line);
398  struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
399                                  const char *func, int line);
400  struct sk_buff * dev_alloc_skb_debug(unsigned int length,
401 @@ -151,7 +151,7 @@
402  #undef dev_queue_xmit
403  #undef kfree_skb
404  #undef kfree_skb_fast
405 -#undef netif_rx
406 +#undef netif_receive_skb
407  #undef pskb_copy
408  #undef skb_clone
409  #undef skb_copy
410 @@ -168,8 +168,8 @@
411         skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
412  #define vlan_hwaccel_receive_skb(_skb, _grp, _tag) \
413         vlan_hwaccel_receive_skb_debug(_skb, _grp, _tag, __func__, __LINE__)
414 -#define netif_rx(_skb) \
415 -       netif_rx_debug(_skb, __func__, __LINE__)
416 +#define netif_receive_skb(_skb) \
417 +       netif_receive_skb_debug(_skb, __func__, __LINE__)
418  #define        alloc_skb(_length, _gfp_mask) \
419         alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
420  #define        dev_alloc_skb(_length) \