f74168aeeae1940bfb5a4d4a52bfd5fa96cc9938
[openwrt.git] / obsolete-buildroot / sources / openwrt / kernel / patches / 110-sch_htb.patch
1 --- src/linux/linux/net/sched/sch_htb.c 2003-10-14 01:09:35.000000000 -0700
2 +++ src/linux/linux.2.4.26/net/sched/sch_htb.c  2004-05-10 00:05:51.000000000 -0700
3 @@ -9,6 +9,8 @@
4   * Authors:    Martin Devera, <devik@cdi.cz>
5   *
6   * Credits (in time order) for older HTB versions:
7 + *              Stef Coene <stef.coene@docum.org>
8 + *                     HTB support at LARTC mailing list
9   *             Ondrej Kraus, <krauso@barr.cz> 
10   *                     found missing INIT_QDISC(htb)
11   *             Vladimir Smelhaus, Aamer Akhter, Bert Hubert
12 @@ -17,9 +19,13 @@
13   *                     code review and helpful comments on shaping
14   *             Tomasz Wrona, <tw@eter.tym.pl>
15   *                     created test case so that I was able to fix nasty bug
16 + *             Wilfried Weissmann
17 + *                     spotted bug in dequeue code and helped with fix
18 + *             Jiri Fojtasek
19 + *                     fixed requeue routine
20   *             and many others. thanks.
21   *
22 - * $Id: sch_htb.c,v 1.1.1.4 2003/10/14 08:09:35 sparq Exp $
23 + * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
24   */
25  #include <linux/config.h>
26  #include <linux/module.h>
27 @@ -71,16 +77,12 @@
28  #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
29  #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
30  #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
31 -#define HTB_VER 0x30007        /* major must be matched with number suplied by TC as version */
32 +#define HTB_VER 0x30010        /* major must be matched with number suplied by TC as version */
33  
34  #if HTB_VER >> 16 != TC_HTB_PROTOVER
35  #error "Mismatched sch_htb.c and pkt_sch.h"
36  #endif
37  
38 -/* temporary debug defines to be removed after beta stage */
39 -#define DEVIK_MEND(N)
40 -#define DEVIK_MSTART(N)
41 -
42  /* debugging support; S is subsystem, these are defined:
43    0 - netlink messages
44    1 - enqueue
45 @@ -100,13 +102,16 @@
46   from LSB
47   */
48  #ifdef HTB_DEBUG
49 -#define HTB_DBG(S,L,FMT,ARG...) if (((q->debug>>(2*S))&3) >= L) \
50 +#define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
51 +#define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
52         printk(KERN_DEBUG FMT,##ARG)
53  #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
54  #define HTB_PASSQ q,
55  #define HTB_ARGQ struct htb_sched *q,
56  #define static
57 +#undef __inline__
58  #define __inline__
59 +#undef inline
60  #define inline
61  #define HTB_CMAGIC 0xFEFAFEF1
62  #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
63 @@ -114,6 +119,7 @@
64                 rb_erase(N,R); \
65                 (N)->rb_color = -1; } while (0)
66  #else
67 +#define HTB_DBG_COND(S,L) (0)
68  #define HTB_DBG(S,L,FMT,ARG...)
69  #define HTB_PASSQ
70  #define HTB_ARGQ
71 @@ -219,6 +225,9 @@
72      /* time of nearest event per level (row) */
73      unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
74  
75 +    /* cached value of jiffies in dequeue */
76 +    unsigned long jiffies;
77 +
78      /* whether we hit non-work conserving class during this dequeue; we use */
79      int nwc_hit;       /* this to disable mindelay complaint in dequeue */
80  
81 @@ -297,7 +306,7 @@
82            rules in it */
83         if (skb->priority == sch->handle)
84                 return HTB_DIRECT;  /* X:0 (direct flow) selected */
85 -       if ((cl = htb_find(skb->priority,sch)) != NULL) 
86 +       if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 
87                 return cl;
88  
89         tcf = q->filter_list;
90 @@ -338,7 +347,7 @@
91  static void htb_debug_dump (struct htb_sched *q)
92  {
93         int i,p;
94 -       printk(KERN_DEBUG "htb*g j=%lu\n",jiffies);
95 +       printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
96         /* rows */
97         for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
98                 printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
99 @@ -421,26 +430,24 @@
100         if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
101                 printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
102  #endif
103 -       DEVIK_MSTART(9);
104 -       cl->pq_key = jiffies + PSCHED_US2JIFFIE(delay);
105 -       if (cl->pq_key == jiffies)
106 +       cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
107 +       if (cl->pq_key == q->jiffies)
108                 cl->pq_key++;
109  
110         /* update the nearest event cache */
111 -       if (q->near_ev_cache[cl->level] - cl->pq_key < 0x80000000)
112 +       if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
113                 q->near_ev_cache[cl->level] = cl->pq_key;
114         
115         while (*p) {
116                 struct htb_class *c; parent = *p;
117                 c = rb_entry(parent, struct htb_class, pq_node);
118 -               if (cl->pq_key - c->pq_key < 0x80000000)
119 +               if (time_after_eq(cl->pq_key, c->pq_key))
120                         p = &parent->rb_right;
121                 else 
122                         p = &parent->rb_left;
123         }
124         rb_link_node(&cl->pq_node, parent, p);
125         rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
126 -       DEVIK_MEND(9);
127  }
128  
129  /**
130 @@ -453,12 +460,14 @@
131  {
132         rb_node_t *p;
133         if ((*n)->rb_right) {
134 +               /* child at right. use it or its leftmost ancestor */
135                 *n = (*n)->rb_right;
136                 while ((*n)->rb_left) 
137                         *n = (*n)->rb_left;
138                 return;
139         }
140         while ((p = (*n)->rb_parent) != NULL) {
141 +               /* if we've arrived from left child then we have next node */
142                 if (p->rb_left == *n) break;
143                 *n = p;
144         }
145 @@ -602,7 +611,7 @@
146      long toks;
147  
148      if ((toks = (cl->ctokens + *diff)) < (
149 -#ifdef HTB_HYSTERESIS
150 +#if HTB_HYSTERESIS
151             cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
152  #endif
153                     0)) {
154 @@ -610,7 +619,7 @@
155             return HTB_CANT_SEND;
156      }
157      if ((toks = (cl->tokens + *diff)) >= (
158 -#ifdef HTB_HYSTERESIS
159 +#if HTB_HYSTERESIS
160             cl->cmode == HTB_CAN_SEND ? -cl->buffer :
161  #endif
162             0))
163 @@ -689,7 +698,6 @@
164      struct htb_sched *q = (struct htb_sched *)sch->data;
165      struct htb_class *cl = htb_classify(skb,sch);
166  
167 -    DEVIK_MSTART(0);
168      if (cl == HTB_DIRECT || !cl) {
169         /* enqueue to helper queue */
170         if (q->direct_queue.qlen < q->direct_qlen && cl) {
171 @@ -698,25 +706,20 @@
172         } else {
173             kfree_skb (skb);
174             sch->stats.drops++;
175 -           DEVIK_MEND(0);
176             return NET_XMIT_DROP;
177         }
178      } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
179         sch->stats.drops++;
180         cl->stats.drops++;
181 -       DEVIK_MEND(0);
182         return NET_XMIT_DROP;
183      } else {
184         cl->stats.packets++; cl->stats.bytes += skb->len;
185 -       DEVIK_MSTART(1);
186         htb_activate (q,cl);
187 -       DEVIK_MEND(1);
188      }
189  
190      sch->q.qlen++;
191      sch->stats.packets++; sch->stats.bytes += skb->len;
192 -    HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
193 -    DEVIK_MEND(0);
194 +    HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
195      return NET_XMIT_SUCCESS;
196  }
197  
198 @@ -725,16 +728,18 @@
199  {
200      struct htb_sched *q = (struct htb_sched *)sch->data;
201      struct htb_class *cl = htb_classify(skb,sch);
202 +    struct sk_buff *tskb;
203  
204      if (cl == HTB_DIRECT || !cl) {
205         /* enqueue to helper queue */
206         if (q->direct_queue.qlen < q->direct_qlen && cl) {
207 -           __skb_queue_tail(&q->direct_queue, skb);
208 -           q->direct_pkts++;
209 +           __skb_queue_head(&q->direct_queue, skb);
210         } else {
211 -           kfree_skb (skb);
212 -           sch->stats.drops++;
213 -           return NET_XMIT_DROP;
214 +            __skb_queue_head(&q->direct_queue, skb);
215 +            tskb = __skb_dequeue_tail(&q->direct_queue);
216 +            kfree_skb (tskb);
217 +            sch->stats.drops++;
218 +            return NET_XMIT_CN;        
219         }
220      } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
221         sch->stats.drops++;
222 @@ -744,7 +749,7 @@
223             htb_activate (q,cl);
224  
225      sch->q.qlen++;
226 -    HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
227 +    HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
228      return NET_XMIT_SUCCESS;
229  }
230  
231 @@ -819,7 +824,7 @@
232                                        cl->classid, diff,
233                                        (unsigned long long) q->now,
234                                        (unsigned long long) cl->t_c,
235 -                                      jiffies);
236 +                                      q->jiffies);
237                         diff = 1000;
238                 }
239  #endif
240 @@ -862,6 +867,7 @@
241   *
242   * Scans event queue for pending events and applies them. Returns jiffies to
243   * next pending event (0 for no event in pq).
244 + * Note: Aplied are events whose have cl->pq_key <= jiffies.
245   */
246  static long htb_do_events(struct htb_sched *q,int level)
247  {
248 @@ -876,9 +882,9 @@
249                 while (p->rb_left) p = p->rb_left;
250  
251                 cl = rb_entry(p, struct htb_class, pq_node);
252 -               if (cl->pq_key - (jiffies+1) < 0x80000000) {
253 -                       HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - jiffies);
254 -                       return cl->pq_key - jiffies;
255 +               if (time_after(cl->pq_key, q->jiffies)) {
256 +                       HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
257 +                       return cl->pq_key - q->jiffies;
258                 }
259                 htb_safe_rb_erase(p,q->wait_pq+level);
260                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
261 @@ -889,7 +895,7 @@
262                                        cl->classid, diff,
263                                        (unsigned long long) q->now,
264                                        (unsigned long long) cl->t_c,
265 -                                      jiffies);
266 +                                      q->jiffies);
267                         diff = 1000;
268                 }
269  #endif
270 @@ -916,6 +922,7 @@
271                 rb_node_t **pptr;
272         } stk[TC_HTB_MAXDEPTH],*sp = stk;
273         
274 +       BUG_TRAP(tree->rb_node);
275         sp->root = tree->rb_node;
276         sp->pptr = pptr;
277  
278 @@ -949,16 +956,36 @@
279  htb_dequeue_tree(struct htb_sched *q,int prio,int level)
280  {
281         struct sk_buff *skb = NULL;
282 -       //struct htb_sched *q = (struct htb_sched *)sch->data;
283         struct htb_class *cl,*start;
284         /* look initial class up in the row */
285 -       DEVIK_MSTART(6);
286         start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
287         
288         do {
289 -               BUG_TRAP(cl && cl->un.leaf.q->q.qlen); if (!cl) return NULL;
290 +next:
291 +               BUG_TRAP(cl); 
292 +               if (!cl) return NULL;
293                 HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
294                                 prio,level,cl->classid,cl->un.leaf.deficit[level]);
295 +
296 +               /* class can be empty - it is unlikely but can be true if leaf
297 +                  qdisc drops packets in enqueue routine or if someone used
298 +                  graft operation on the leaf since last dequeue; 
299 +                  simply deactivate and skip such class */
300 +               if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
301 +                       struct htb_class *next;
302 +                       htb_deactivate(q,cl);
303 +
304 +                       /* row/level might become empty */
305 +                       if ((q->row_mask[level] & (1 << prio)) == 0)
306 +                               return NULL; 
307 +                       
308 +                       next = htb_lookup_leaf (q->row[level]+prio,
309 +                                       prio,q->ptr[level]+prio);
310 +                       if (cl == start) /* fix start if we just deleted it */
311 +                               start = next;
312 +                       cl = next;
313 +                       goto next;
314 +               }
315         
316                 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 
317                         break;
318 @@ -971,8 +998,6 @@
319                 cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
320         } while (cl != start);
321  
322 -       DEVIK_MEND(6);
323 -       DEVIK_MSTART(7);
324         if (likely(skb != NULL)) {
325                 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
326                         HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
327 @@ -984,11 +1009,8 @@
328                    gives us slightly better performance */
329                 if (!cl->un.leaf.q->q.qlen)
330                         htb_deactivate (q,cl);
331 -       DEVIK_MSTART(8);
332                 htb_charge_class (q,cl,level,skb->len);
333 -       DEVIK_MEND(8);
334         }
335 -       DEVIK_MEND(7);
336         return skb;
337  }
338  
339 @@ -1002,9 +1024,8 @@
340                         printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
341                 delay = 5*HZ;
342         }
343 -       del_timer(&q->timer);
344 -       q->timer.expires = jiffies + delay;
345 -       add_timer(&q->timer);
346 +       /* why don't use jiffies here ? because expires can be in past */
347 +       mod_timer(&q->timer, q->jiffies + delay);
348         sch->flags |= TCQ_F_THROTTLED;
349         sch->stats.overlimits++;
350         HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
351 @@ -1016,7 +1037,11 @@
352         struct htb_sched *q = (struct htb_sched *)sch->data;
353         int level;
354         long min_delay;
355 +#ifdef HTB_DEBUG
356 +       int evs_used = 0;
357 +#endif
358  
359 +       q->jiffies = jiffies;
360         HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
361                         sch->q.qlen);
362  
363 @@ -1027,27 +1052,26 @@
364                 return skb;
365         }
366  
367 -       DEVIK_MSTART(2);
368         if (!sch->q.qlen) goto fin;
369         PSCHED_GET_TIME(q->now);
370  
371 -       min_delay = HZ*5;
372 +       min_delay = LONG_MAX;
373         q->nwc_hit = 0;
374         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
375                 /* common case optimization - skip event handler quickly */
376                 int m;
377                 long delay;
378 -       DEVIK_MSTART(3);
379 -               if (jiffies - q->near_ev_cache[level] < 0x80000000 || 0) {
380 +               if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
381                         delay = htb_do_events(q,level);
382 -                       q->near_ev_cache[level] += delay ? delay : HZ;
383 +                       q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
384 +#ifdef HTB_DEBUG
385 +                       evs_used++;
386 +#endif
387                 } else
388 -                       delay = q->near_ev_cache[level] - jiffies;      
389 +                       delay = q->near_ev_cache[level] - q->jiffies;   
390                 
391                 if (delay && min_delay > delay) 
392                         min_delay = delay;
393 -       DEVIK_MEND(3);
394 -       DEVIK_MSTART(5);
395                 m = ~q->row_mask[level];
396                 while (m != (int)(-1)) {
397                         int prio = ffz (m);
398 @@ -1056,29 +1080,29 @@
399                         if (likely(skb != NULL)) {
400                                 sch->q.qlen--;
401                                 sch->flags &= ~TCQ_F_THROTTLED;
402 -       DEVIK_MEND(5);
403                                 goto fin;
404                         }
405                 }
406 -       DEVIK_MEND(5);
407         }
408 -       DEVIK_MSTART(4);
409  #ifdef HTB_DEBUG
410 -       if (!q->nwc_hit && min_delay >= 5*HZ && net_ratelimit()) { 
411 -               printk(KERN_ERR "HTB: mindelay=%ld, report it please !\n",min_delay);
412 -               htb_debug_dump(q);
413 +       if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
414 +               if (min_delay == LONG_MAX) {
415 +                       printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
416 +                                       evs_used,q->jiffies,jiffies);
417 +                       htb_debug_dump(q);
418 +               } else 
419 +                       printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
420 +                                       "too small rate\n",min_delay);
421         }
422  #endif
423 -       htb_delay_by (sch,min_delay);
424 -       DEVIK_MEND(4);
425 +       htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
426  fin:
427 -       HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,jiffies,skb);
428 -       DEVIK_MEND(2);
429 +       HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
430         return skb;
431  }
432  
433  /* try to drop from each class (by prio) until one succeed */
434 -static int htb_drop(struct Qdisc* sch)
435 +static unsigned int htb_drop(struct Qdisc* sch)
436  {
437         struct htb_sched *q = (struct htb_sched *)sch->data;
438         int prio;
439 @@ -1086,14 +1110,15 @@
440         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
441                 struct list_head *p;
442                 list_for_each (p,q->drops+prio) {
443 -                       struct htb_class *cl = list_entry(p,struct htb_class,
444 -                                       un.leaf.drop_list);
445 +                       struct htb_class *cl = list_entry(p, struct htb_class,
446 +                                                         un.leaf.drop_list);
447 +                       unsigned int len;
448                         if (cl->un.leaf.q->ops->drop && 
449 -                               cl->un.leaf.q->ops->drop(cl->un.leaf.q)) {
450 +                               (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
451                                 sch->q.qlen--;
452                                 if (!cl->un.leaf.q->q.qlen)
453                                         htb_deactivate (q,cl);
454 -                               return 1;
455 +                               return len;
456                         }
457                 }
458         }
459 @@ -1208,7 +1233,8 @@
460         gopt.direct_pkts = q->direct_pkts;
461  
462  #ifdef HTB_DEBUG
463 -       htb_debug_dump(q);
464 +       if (HTB_DBG_COND(0,2))
465 +               htb_debug_dump(q);
466  #endif
467         gopt.version = HTB_VER;
468         gopt.rate2quantum = q->rate2quantum;
469 @@ -1289,6 +1315,9 @@
470                                         return -ENOBUFS;
471                 sch_tree_lock(sch);
472                 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
473 +                       if (cl->prio_activity)
474 +                               htb_deactivate ((struct htb_sched*)sch->data,cl);
475 +
476                         /* TODO: is it correct ? Why CBQ doesn't do it ? */
477                         sch->q.qlen -= (*old)->q.qlen;  
478                         qdisc_reset(*old);
479 @@ -1323,7 +1352,7 @@
480  
481         while ((tp = *fl) != NULL) {
482                 *fl = tp->next;
483 -               tp->ops->destroy(tp);
484 +               tcf_destroy(tp);
485         }
486  }
487  
488 @@ -1371,11 +1400,16 @@
489  #ifdef HTB_RATECM
490         del_timer_sync (&q->rttim);
491  #endif
492 +       /* This line used to be after htb_destroy_class call below
493 +          and surprisingly it worked in 2.4. But it must precede it 
494 +          because filter need its target class alive to be able to call
495 +          unbind_filter on it (without Oops). */
496 +       htb_destroy_filters(&q->filter_list);
497 +       
498         while (!list_empty(&q->root)) 
499                 htb_destroy_class (sch,list_entry(q->root.next,
500                                         struct htb_class,sibling));
501  
502 -       htb_destroy_filters(&q->filter_list);
503         __skb_queue_purge(&q->direct_queue);
504         MOD_DEC_USE_COUNT;
505  }
506 @@ -1438,12 +1472,13 @@
507         parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
508  
509         hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
510 -       HTB_DBG(0,1,"htb_chg cl=%p, clid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
511 +       HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
512         rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
513         ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
514         if (!rtab || !ctab) goto failure;
515  
516         if (!cl) { /* new class */
517 +               struct Qdisc *new_q;
518                 /* check for valid classid */
519                 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
520                         goto failure;
521 @@ -1467,6 +1502,10 @@
522                 cl->magic = HTB_CMAGIC;
523  #endif
524  
525 +               /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
526 +                  so that can't be used inside of sch_tree_lock
527 +                  -- thanks to Karlis Peisenieks */
528 +               new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
529                 sch_tree_lock(sch);
530                 if (parent && !parent->level) {
531                         /* turn parent into inner node */
532 @@ -1485,8 +1524,7 @@
533                         memset (&parent->un.inner,0,sizeof(parent->un.inner));
534                 }
535                 /* leaf (we) needs elementary qdisc */
536 -               if (!(cl->un.leaf.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
537 -                       cl->un.leaf.q = &noop_qdisc;
538 +               cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
539  
540                 cl->classid = classid; cl->parent = parent;
541  
542 @@ -1514,11 +1552,11 @@
543         if (!cl->level) {
544                 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
545                 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
546 -                       printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.", cl->classid);
547 +                       printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
548                         cl->un.leaf.quantum = 1000;
549                 }
550                 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
551 -                       printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.", cl->classid);
552 +                       printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
553                         cl->un.leaf.quantum = 200000;
554                 }
555                 if (hopt->quantum)
556 --- src/linux/linux/include/net/pkt_cls.h       2003-07-04 01:12:28.000000000 -0700
557 +++ src/linux/linux.2.4.26/include/net/pkt_cls.h        2004-05-10 22:21:40.000000000 -0700
558 @@ -77,7 +77,11 @@
559         return -1;
560  }
561  
562 -
563 +static inline void tcf_destroy(struct tcf_proto *tp)
564 +{
565 +       tp->ops->destroy(tp);
566 +       kfree(tp);
567 +}
568  
569  extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
570  extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);