remove linux 2.4 specific build system code
[15.05/openwrt.git] / target / linux / generic-2.6 / patches-2.6.33 / 150-netfilter_imq.patch
1 --- /dev/null
2 +++ b/drivers/net/imq.c
3 @@ -0,0 +1,632 @@
4 +/*
5 + *             Pseudo-driver for the intermediate queue device.
6 + *
7 + *             This program is free software; you can redistribute it and/or
8 + *             modify it under the terms of the GNU General Public License
9 + *             as published by the Free Software Foundation; either version
10 + *             2 of the License, or (at your option) any later version.
11 + *
12 + * Authors:    Patrick McHardy, <kaber@trash.net>
13 + *
14 + *            The first version was written by Martin Devera, <devik@cdi.cz>
15 + *
16 + * Credits:    Jan Rafaj <imq2t@cedric.vabo.cz>
17 + *              - Update patch to 2.4.21
18 + *             Sebastian Strollo <sstrollo@nortelnetworks.com>
19 + *              - Fix "Dead-loop on netdevice imq"-issue
20 + *             Marcel Sebek <sebek64@post.cz>
21 + *              - Update to 2.6.2-rc1
22 + *
23 + *            After some time of inactivity there is a group taking care
24 + *            of IMQ again: http://www.linuximq.net
25 + *
26 + *
27 + *            2004/06/30 - New version of IMQ patch to kernels <=2.6.7
28 + *             including the following changes:
29 + *
30 + *            - Correction of ipv6 support "+"s issue (Hasso Tepper)
31 + *            - Correction of imq_init_devs() issue that resulted in
32 + *            kernel OOPS unloading IMQ as module (Norbert Buchmuller)
33 + *            - Addition of functionality to choose number of IMQ devices
34 + *            during kernel config (Andre Correa)
35 + *            - Addition of functionality to choose how IMQ hooks on
36 + *            PRE and POSTROUTING (after or before NAT) (Andre Correa)
37 + *            - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
38 + *
39 + *
40 + *             2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
41 + *             released with almost no problems. 2.6.14-x was released
42 + *             with some important changes: nfcache was removed; After
43 + *             some weeks of trouble we figured out that some IMQ fields
44 + *             in skb were missing in skbuff.c - skb_clone and copy_skb_header.
45 + *             These functions are correctly patched by this new patch version.
46 + *
47 + *             Thanks for all who helped to figure out all the problems with
48 + *             2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
49 + *             Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
50 + *             I didn't forget anybody). I apologize again for my lack of time.
51 + *
52 + *
53 + *             2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead 
54 + *             of qdisc_restart() and moved qdisc_run() to tasklet to avoid
55 + *             recursive locking. New initialization routines to fix 'rmmod' not
56 + *             working anymore. Used code from ifb.c. (Jussi Kivilinna)
57 + *
58 + *             2008/08/06 - 2.6.26 - (JK)
59 + *              - Replaced tasklet with 'netif_schedule()'.
60 + *              - Cleaned up and added comments for imq_nf_queue().
61 + *
62 + *             2009/04/12
63 + *              - Add skb_save_cb/skb_restore_cb helper functions for backuping
64 + *                control buffer. This is needed because qdisc-layer on kernels
65 + *                2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
66 + *              - Add better locking for IMQ device. Hopefully this will solve
67 + *                SMP issues. (Jussi Kivilinna)
68 + *              - Port to 2.6.27
69 + *              - Port to 2.6.28
70 + *              - Port to 2.6.29 + fix rmmod not working
71 + *
72 + *             2009/04/20 - (Jussi Kivilinna)
73 + *              - Use netdevice feature flags to avoid extra packet handling
74 + *                by core networking layer and possibly increase performance.
75 + *
76 + *             2009/09/26 - (Jussi Kivilinna)
77 + *              - Add imq_nf_reinject_lockless to fix deadlock with
78 + *                imq_nf_queue/imq_nf_reinject.
79 + *
80 + *             2009/12/08 - (Jussi Kivilinna)
81 + *              - Port to 2.6.32
82 + *              - Add check for skb->nf_queue_entry==NULL in imq_dev_xmit()
83 + *              - Also add better error checking for skb->nf_queue_entry usage
84 + *
85 + *            Also, many thanks to pablo Sebastian Greco for making the initial
86 + *            patch and to those who helped the testing.
87 + *
88 + *             More info at: http://www.linuximq.net/ (Andre Correa)
89 + */
90 +
91 +#include <linux/module.h>
92 +#include <linux/kernel.h>
93 +#include <linux/moduleparam.h>
94 +#include <linux/list.h>
95 +#include <linux/skbuff.h>
96 +#include <linux/netdevice.h>
97 +#include <linux/etherdevice.h>
98 +#include <linux/rtnetlink.h>
99 +#include <linux/if_arp.h>
100 +#include <linux/netfilter.h>
101 +#include <linux/netfilter_ipv4.h>
102 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
103 +       #include <linux/netfilter_ipv6.h>
104 +#endif
105 +#include <linux/imq.h>
106 +#include <net/pkt_sched.h>
107 +#include <net/netfilter/nf_queue.h>
108 +
109 +static nf_hookfn imq_nf_hook;
110 +
111 +static struct nf_hook_ops imq_ingress_ipv4 = {
112 +       .hook           = imq_nf_hook,
113 +       .owner          = THIS_MODULE,
114 +       .pf             = PF_INET,
115 +       .hooknum        = NF_INET_PRE_ROUTING,
116 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
117 +       .priority       = NF_IP_PRI_MANGLE + 1
118 +#else
119 +       .priority       = NF_IP_PRI_NAT_DST + 1
120 +#endif
121 +};
122 +
123 +static struct nf_hook_ops imq_egress_ipv4 = {
124 +       .hook           = imq_nf_hook,
125 +       .owner          = THIS_MODULE,
126 +       .pf             = PF_INET,
127 +       .hooknum        = NF_INET_POST_ROUTING,
128 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
129 +       .priority       = NF_IP_PRI_LAST
130 +#else
131 +       .priority       = NF_IP_PRI_NAT_SRC - 1
132 +#endif
133 +};
134 +
135 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
136 +static struct nf_hook_ops imq_ingress_ipv6 = {
137 +       .hook           = imq_nf_hook,
138 +       .owner          = THIS_MODULE,
139 +       .pf             = PF_INET6,
140 +       .hooknum        = NF_INET_PRE_ROUTING,
141 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
142 +       .priority       = NF_IP6_PRI_MANGLE + 1
143 +#else
144 +       .priority       = NF_IP6_PRI_NAT_DST + 1
145 +#endif
146 +};
147 +
148 +static struct nf_hook_ops imq_egress_ipv6 = {
149 +       .hook           = imq_nf_hook,
150 +       .owner          = THIS_MODULE,
151 +       .pf             = PF_INET6,
152 +       .hooknum        = NF_INET_POST_ROUTING,
153 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
154 +       .priority       = NF_IP6_PRI_LAST
155 +#else
156 +       .priority       = NF_IP6_PRI_NAT_SRC - 1
157 +#endif
158 +};
159 +#endif
160 +
161 +#if defined(CONFIG_IMQ_NUM_DEVS)
162 +static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
163 +#else
164 +static unsigned int numdevs = IMQ_MAX_DEVS;
165 +#endif
166 +
167 +static DEFINE_SPINLOCK(imq_nf_queue_lock);
168 +
169 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
170 +
171 +
172 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
173 +{
174 +       return &dev->stats;
175 +}
176 +
177 +/* called for packets kfree'd in qdiscs at places other than enqueue */
178 +static void imq_skb_destructor(struct sk_buff *skb)
179 +{
180 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
181 +
182 +       skb->nf_queue_entry = NULL;
183 +
184 +       if (entry) {
185 +               nf_queue_entry_release_refs(entry);
186 +               kfree(entry);
187 +       }
188 +
189 +       skb_restore_cb(skb); /* kfree backup */
190 +}
191 +
192 +/* locking not needed when called from imq_nf_queue */
193 +static void imq_nf_reinject_lockless(struct nf_queue_entry *entry,
194 +                                               unsigned int verdict)
195 +{
196 +       int status;
197 +
198 +       if (!entry->next_outfn) {
199 +               nf_reinject(entry, verdict);
200 +               return;
201 +       }
202 +
203 +       status = entry->next_outfn(entry, entry->next_queuenum);
204 +       if (status < 0) {
205 +               nf_queue_entry_release_refs(entry);
206 +               kfree_skb(entry->skb);
207 +               kfree(entry);
208 +       }
209 +}
210 +
211 +static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
212 +{
213 +       int status;
214 +
215 +       if (!entry->next_outfn) {
216 +               spin_lock_bh(&imq_nf_queue_lock);
217 +               nf_reinject(entry, verdict);
218 +               spin_unlock_bh(&imq_nf_queue_lock);
219 +               return;
220 +       }
221 +
222 +       rcu_read_lock();
223 +       local_bh_disable();
224 +       status = entry->next_outfn(entry, entry->next_queuenum);
225 +       local_bh_enable();
226 +       if (status < 0) {
227 +               nf_queue_entry_release_refs(entry);
228 +               kfree_skb(entry->skb);
229 +               kfree(entry);
230 +       }
231 +
232 +       rcu_read_unlock();
233 +}
234 +
235 +static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
236 +{
237 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
238 +
239 +       skb->nf_queue_entry = NULL;
240 +       dev->trans_start = jiffies;
241 +
242 +       dev->stats.tx_bytes += skb->len;
243 +       dev->stats.tx_packets++;
244 +
245 +       if (entry == NULL) {
246 +               /* We don't know what is going on here.. packet is queued for
247 +                * imq device, but (probably) not by us.
248 +                *
249 +                * If this packet was not send here by imq_nf_queue(), then
250 +                * skb_save_cb() was not used and skb_free() should not show:
251 +                *   WARNING: IMQ: kfree_skb: skb->cb_next:..
252 +                * and/or
253 +                *   WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
254 +                *
255 +                * However if this message is shown, then IMQ is somehow broken
256 +                * and you should report this to linuximq.net.
257 +                */
258 +
259 +               /* imq_dev_xmit is black hole that eats all packets, report that
260 +                * we eat this packet happily and increase dropped counters.
261 +                */
262 +
263 +               dev->stats.tx_dropped++;
264 +               dev_kfree_skb(skb);
265 +
266 +               return NETDEV_TX_OK;
267 +       }
268 +
269 +       skb_restore_cb(skb); /* restore skb->cb */
270 +
271 +       skb->imq_flags = 0;
272 +       skb->destructor = NULL;
273 +
274 +       imq_nf_reinject(entry, NF_ACCEPT);
275 +
276 +       return NETDEV_TX_OK;
277 +}
278 +
279 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
280 +{
281 +       struct net_device *dev;
282 +       struct sk_buff *skb_orig, *skb, *skb_shared;
283 +       struct Qdisc *q;
284 +       struct netdev_queue *txq;
285 +       int users, index;
286 +       int retval = -EINVAL;
287 +
288 +       index = entry->skb->imq_flags & IMQ_F_IFMASK;
289 +       if (unlikely(index > numdevs - 1)) {
290 +               if (net_ratelimit())
291 +                       printk(KERN_WARNING
292 +                              "IMQ: invalid device specified, highest is %u\n",
293 +                              numdevs - 1);
294 +               retval = -EINVAL;
295 +               goto out;
296 +       }
297 +
298 +       /* check for imq device by index from cache */
299 +       dev = imq_devs_cache[index];
300 +       if (unlikely(!dev)) {
301 +               char buf[8];
302 +
303 +               /* get device by name and cache result */
304 +               snprintf(buf, sizeof(buf), "imq%d", index);
305 +               dev = dev_get_by_name(&init_net, buf);
306 +               if (!dev) {
307 +                       /* not found ?!*/
308 +                       BUG();
309 +                       retval = -ENODEV;
310 +                       goto out;
311 +               }
312 +
313 +               imq_devs_cache[index] = dev;
314 +               dev_put(dev);
315 +       }
316 +
317 +       if (unlikely(!(dev->flags & IFF_UP))) {
318 +               entry->skb->imq_flags = 0;
319 +               imq_nf_reinject_lockless(entry, NF_ACCEPT);
320 +               retval = 0;
321 +               goto out;
322 +       }
323 +       dev->last_rx = jiffies;
324 +
325 +       skb = entry->skb;
326 +       skb_orig = NULL;
327 +
328 +       /* skb has owner? => make clone */
329 +       if (unlikely(skb->destructor)) {
330 +               skb_orig = skb;
331 +               skb = skb_clone(skb, GFP_ATOMIC);
332 +               if (!skb) {
333 +                       retval = -ENOMEM;
334 +                       goto out;
335 +               }
336 +               entry->skb = skb;
337 +       }
338 +
339 +       skb->nf_queue_entry = entry;
340 +
341 +       dev->stats.rx_bytes += skb->len;
342 +       dev->stats.rx_packets++;
343 +
344 +       txq = dev_pick_tx(dev, skb);
345 +
346 +       q = rcu_dereference(txq->qdisc);
347 +       if (unlikely(!q->enqueue))
348 +               goto packet_not_eaten_by_imq_dev;
349 +
350 +       spin_lock_bh(qdisc_lock(q));
351 +
352 +       users = atomic_read(&skb->users);
353 +
354 +       skb_shared = skb_get(skb); /* increase reference count by one */
355 +       skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will
356 +                                       overwrite it */
357 +       qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
358 +
359 +       if (likely(atomic_read(&skb_shared->users) == users + 1)) {
360 +               kfree_skb(skb_shared); /* decrease reference count by one */
361 +
362 +               skb->destructor = &imq_skb_destructor;
363 +
364 +               /* cloned? */
365 +               if (skb_orig)
366 +                       kfree_skb(skb_orig); /* free original */
367 +
368 +               spin_unlock_bh(qdisc_lock(q));
369 +
370 +               /* schedule qdisc dequeue */
371 +               __netif_schedule(q);
372 +
373 +               retval = 0;
374 +               goto out;
375 +       } else {
376 +               skb_restore_cb(skb_shared); /* restore skb->cb */
377 +               skb->nf_queue_entry = NULL;
378 +               /* qdisc dropped packet and decreased skb reference count of
379 +                * skb, so we don't really want to and try refree as that would
380 +                * actually destroy the skb. */
381 +               spin_unlock_bh(qdisc_lock(q));
382 +               goto packet_not_eaten_by_imq_dev;
383 +       }
384 +
385 +packet_not_eaten_by_imq_dev:
386 +       /* cloned? restore original */
387 +       if (skb_orig) {
388 +               kfree_skb(skb);
389 +               entry->skb = skb_orig;
390 +       }
391 +       retval = -1;
392 +out:
393 +       return retval;
394 +}
395 +
396 +static struct nf_queue_handler nfqh = {
397 +       .name  = "imq",
398 +       .outfn = imq_nf_queue,
399 +};
400 +
401 +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
402 +                               const struct net_device *indev,
403 +                               const struct net_device *outdev,
404 +                               int (*okfn)(struct sk_buff *))
405 +{
406 +       if (pskb->imq_flags & IMQ_F_ENQUEUE)
407 +               return NF_QUEUE;
408 +
409 +       return NF_ACCEPT;
410 +}
411 +
412 +static int imq_close(struct net_device *dev)
413 +{
414 +       netif_stop_queue(dev);
415 +       return 0;
416 +}
417 +
418 +static int imq_open(struct net_device *dev)
419 +{
420 +       netif_start_queue(dev);
421 +       return 0;
422 +}
423 +
424 +static const struct net_device_ops imq_netdev_ops = {
425 +       .ndo_open               = imq_open,
426 +       .ndo_stop               = imq_close,
427 +       .ndo_start_xmit         = imq_dev_xmit,
428 +       .ndo_get_stats          = imq_get_stats,
429 +};
430 +
431 +static void imq_setup(struct net_device *dev)
432 +{
433 +       dev->netdev_ops         = &imq_netdev_ops;
434 +       dev->type               = ARPHRD_VOID;
435 +       dev->mtu                = 16000;
436 +       dev->tx_queue_len       = 11000;
437 +       dev->flags              = IFF_NOARP;
438 +       dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST |
439 +                                 NETIF_F_GSO | NETIF_F_HW_CSUM |
440 +                                 NETIF_F_HIGHDMA;
441 +       dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
442 +}
443 +
444 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
445 +{
446 +       int ret = 0;
447 +
448 +       if (tb[IFLA_ADDRESS]) {
449 +               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
450 +                       ret = -EINVAL;
451 +                       goto end;
452 +               }
453 +               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
454 +                       ret = -EADDRNOTAVAIL;
455 +                       goto end;
456 +               }
457 +       }
458 +       return 0;
459 +end:
460 +       printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
461 +       return ret;
462 +}
463 +
464 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
465 +       .kind           = "imq",
466 +       .priv_size      = 0,
467 +       .setup          = imq_setup,
468 +       .validate       = imq_validate,
469 +};
470 +
471 +static int __init imq_init_hooks(void)
472 +{
473 +       int err;
474 +
475 +       nf_register_queue_imq_handler(&nfqh);
476 +
477 +       err = nf_register_hook(&imq_ingress_ipv4);
478 +       if (err)
479 +               goto err1;
480 +
481 +       err = nf_register_hook(&imq_egress_ipv4);
482 +       if (err)
483 +               goto err2;
484 +
485 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
486 +       err = nf_register_hook(&imq_ingress_ipv6);
487 +       if (err)
488 +               goto err3;
489 +
490 +       err = nf_register_hook(&imq_egress_ipv6);
491 +       if (err)
492 +               goto err4;
493 +#endif
494 +
495 +       return 0;
496 +
497 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
498 +err4:
499 +       nf_unregister_hook(&imq_ingress_ipv6);
500 +err3:
501 +       nf_unregister_hook(&imq_egress_ipv4);
502 +#endif
503 +err2:
504 +       nf_unregister_hook(&imq_ingress_ipv4);
505 +err1:
506 +       nf_unregister_queue_imq_handler();
507 +       return err;
508 +}
509 +
510 +static int __init imq_init_one(int index)
511 +{
512 +       struct net_device *dev;
513 +       int ret;
514 +
515 +       dev = alloc_netdev(0, "imq%d", imq_setup);
516 +       if (!dev)
517 +               return -ENOMEM;
518 +
519 +       ret = dev_alloc_name(dev, dev->name);
520 +       if (ret < 0)
521 +               goto fail;
522 +
523 +       dev->rtnl_link_ops = &imq_link_ops;
524 +       ret = register_netdevice(dev);
525 +       if (ret < 0)
526 +               goto fail;
527 +
528 +       return 0;
529 +fail:
530 +       free_netdev(dev);
531 +       return ret;
532 +}
533 +
534 +static int __init imq_init_devs(void)
535 +{
536 +       int err, i;
537 +
538 +       if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
539 +               printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
540 +                      IMQ_MAX_DEVS);
541 +               return -EINVAL;
542 +       }
543 +
544 +       rtnl_lock();
545 +       err = __rtnl_link_register(&imq_link_ops);
546 +
547 +       for (i = 0; i < numdevs && !err; i++)
548 +               err = imq_init_one(i);
549 +
550 +       if (err) {
551 +               __rtnl_link_unregister(&imq_link_ops);
552 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
553 +       }
554 +       rtnl_unlock();
555 +
556 +       return err;
557 +}
558 +
559 +static int __init imq_init_module(void)
560 +{
561 +       int err;
562 +
563 +#if defined(CONFIG_IMQ_NUM_DEVS)
564 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
565 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
566 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
567 +#endif
568 +
569 +       err = imq_init_devs();
570 +       if (err) {
571 +               printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
572 +               return err;
573 +       }
574 +
575 +       err = imq_init_hooks();
576 +       if (err) {
577 +               printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
578 +               rtnl_link_unregister(&imq_link_ops);
579 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
580 +               return err;
581 +       }
582 +
583 +       printk(KERN_INFO "IMQ driver loaded successfully.\n");
584 +
585 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
586 +       printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
587 +#else
588 +       printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
589 +#endif
590 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
591 +       printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
592 +#else
593 +       printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
594 +#endif
595 +
596 +       return 0;
597 +}
598 +
599 +static void __exit imq_unhook(void)
600 +{
601 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
602 +       nf_unregister_hook(&imq_ingress_ipv6);
603 +       nf_unregister_hook(&imq_egress_ipv6);
604 +#endif
605 +       nf_unregister_hook(&imq_ingress_ipv4);
606 +       nf_unregister_hook(&imq_egress_ipv4);
607 +
608 +       nf_unregister_queue_imq_handler();
609 +}
610 +
611 +static void __exit imq_cleanup_devs(void)
612 +{
613 +       rtnl_link_unregister(&imq_link_ops);
614 +       memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
615 +}
616 +
617 +static void __exit imq_exit_module(void)
618 +{
619 +       imq_unhook();
620 +       imq_cleanup_devs();
621 +       printk(KERN_INFO "IMQ driver unloaded successfully.\n");
622 +}
623 +
624 +module_init(imq_init_module);
625 +module_exit(imq_exit_module);
626 +
627 +module_param(numdevs, int, 0);
628 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
629 +                       "be created)");
630 +MODULE_AUTHOR("http://www.linuximq.net");
631 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
632 +                       "http://www.linuximq.net/ for more information.");
633 +MODULE_LICENSE("GPL");
634 +MODULE_ALIAS_RTNL_LINK("imq");
635 +
636 --- a/drivers/net/Kconfig
637 +++ b/drivers/net/Kconfig
638 @@ -109,6 +109,129 @@ config EQUALIZER
639           To compile this driver as a module, choose M here: the module
640           will be called eql.  If unsure, say N.
641  
642 +config IMQ
643 +       tristate "IMQ (intermediate queueing device) support"
644 +       depends on NETDEVICES && NETFILTER
645 +       ---help---
646 +         The IMQ device(s) is used as placeholder for QoS queueing
647 +         disciplines. Every packet entering/leaving the IP stack can be
648 +         directed through the IMQ device where it's enqueued/dequeued to the
649 +         attached qdisc. This allows you to treat network devices as classes
650 +         and distribute bandwidth among them. Iptables is used to specify
651 +         through which IMQ device, if any, packets travel.
652 +
653 +         More information at: http://www.linuximq.net/
654 +
655 +         To compile this driver as a module, choose M here: the module
656 +         will be called imq.  If unsure, say N.
657 +
658 +choice
659 +       prompt "IMQ behavior (PRE/POSTROUTING)"
660 +       depends on IMQ
661 +       default IMQ_BEHAVIOR_AB
662 +       help
663 +
664 +               This settings defines how IMQ behaves in respect to its
665 +               hooking in PREROUTING and POSTROUTING.
666 +
667 +               IMQ can work in any of the following ways:
668 +
669 +                   PREROUTING   |      POSTROUTING
670 +               -----------------|-------------------
671 +               #1  After NAT    |      After NAT
672 +               #2  After NAT    |      Before NAT
673 +               #3  Before NAT   |      After NAT
674 +               #4  Before NAT   |      Before NAT
675 +
676 +               The default behavior is to hook before NAT on PREROUTING
677 +               and after NAT on POSTROUTING (#3).
678 +
679 +               This settings are specially usefull when trying to use IMQ
680 +               to shape NATed clients.
681 +
682 +               More information can be found at: www.linuximq.net
683 +
684 +               If not sure leave the default settings alone.
685 +
686 +config IMQ_BEHAVIOR_AA
687 +       bool "IMQ AA"
688 +       help
689 +               This settings defines how IMQ behaves in respect to its
690 +               hooking in PREROUTING and POSTROUTING.
691 +
692 +               Choosing this option will make IMQ hook like this:
693 +
694 +               PREROUTING:   After NAT
695 +               POSTROUTING:  After NAT
696 +
697 +               More information can be found at: www.linuximq.net
698 +
699 +               If not sure leave the default settings alone.
700 +
701 +config IMQ_BEHAVIOR_AB
702 +       bool "IMQ AB"
703 +       help
704 +               This settings defines how IMQ behaves in respect to its
705 +               hooking in PREROUTING and POSTROUTING.
706 +
707 +               Choosing this option will make IMQ hook like this:
708 +
709 +               PREROUTING:   After NAT
710 +               POSTROUTING:  Before NAT
711 +
712 +               More information can be found at: www.linuximq.net
713 +
714 +               If not sure leave the default settings alone.
715 +
716 +config IMQ_BEHAVIOR_BA
717 +       bool "IMQ BA"
718 +       help
719 +               This settings defines how IMQ behaves in respect to its
720 +               hooking in PREROUTING and POSTROUTING.
721 +
722 +               Choosing this option will make IMQ hook like this:
723 +
724 +               PREROUTING:   Before NAT
725 +               POSTROUTING:  After NAT
726 +
727 +               More information can be found at: www.linuximq.net
728 +
729 +               If not sure leave the default settings alone.
730 +
731 +config IMQ_BEHAVIOR_BB
732 +       bool "IMQ BB"
733 +       help
734 +               This settings defines how IMQ behaves in respect to its
735 +               hooking in PREROUTING and POSTROUTING.
736 +
737 +               Choosing this option will make IMQ hook like this:
738 +
739 +               PREROUTING:   Before NAT
740 +               POSTROUTING:  Before NAT
741 +
742 +               More information can be found at: www.linuximq.net
743 +
744 +               If not sure leave the default settings alone.
745 +
746 +endchoice
747 +
748 +config IMQ_NUM_DEVS
749 +
750 +       int "Number of IMQ devices"
751 +       range 2 16
752 +       depends on IMQ
753 +       default "16"
754 +       help
755 +
756 +               This settings defines how many IMQ devices will be
757 +               created.
758 +
759 +               The default value is 16.
760 +
761 +               More information can be found at: www.linuximq.net
762 +
763 +               If not sure leave the default settings alone.
764 +
765  config TUN
766         tristate "Universal TUN/TAP device driver support"
767         select CRC32
768 --- a/drivers/net/Makefile
769 +++ b/drivers/net/Makefile
770 @@ -165,6 +165,7 @@ obj-$(CONFIG_SLHC) += slhc.o
771  obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
772  
773  obj-$(CONFIG_DUMMY) += dummy.o
774 +obj-$(CONFIG_IMQ) += imq.o
775  obj-$(CONFIG_IFB) += ifb.o
776  obj-$(CONFIG_MACVLAN) += macvlan.o
777  obj-$(CONFIG_DE600) += de600.o
778 --- /dev/null
779 +++ b/include/linux/imq.h
780 @@ -0,0 +1,13 @@
781 +#ifndef _IMQ_H
782 +#define _IMQ_H
783 +
784 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
785 +#define IMQ_F_BITS     5
786 +
787 +#define IMQ_F_IFMASK   0x0f
788 +#define IMQ_F_ENQUEUE  0x10
789 +
790 +#define IMQ_MAX_DEVS   (IMQ_F_IFMASK + 1)
791 +
792 +#endif /* _IMQ_H */
793 +
794 --- a/include/linux/netdevice.h
795 +++ b/include/linux/netdevice.h
796 @@ -1168,6 +1168,7 @@ extern int                dev_alloc_name(struct net_de
797  extern int             dev_open(struct net_device *dev);
798  extern int             dev_close(struct net_device *dev);
799  extern void            dev_disable_lro(struct net_device *dev);
800 +extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb);
801  extern int             dev_queue_xmit(struct sk_buff *skb);
802  extern int             register_netdevice(struct net_device *dev);
803  extern void            unregister_netdevice_queue(struct net_device *dev,
804 --- /dev/null
805 +++ b/include/linux/netfilter/xt_IMQ.h
806 @@ -0,0 +1,9 @@
807 +#ifndef _XT_IMQ_H
808 +#define _XT_IMQ_H
809 +
810 +struct xt_imq_info {
811 +       unsigned int todev;     /* target imq device */
812 +};
813 +
814 +#endif /* _XT_IMQ_H */
815 +
816 --- /dev/null
817 +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
818 @@ -0,0 +1,10 @@
819 +#ifndef _IPT_IMQ_H
820 +#define _IPT_IMQ_H
821 +
822 +/* Backwards compatibility for old userspace */
823 +#include <linux/netfilter/xt_IMQ.h>
824 +
825 +#define ipt_imq_info xt_imq_info
826 +
827 +#endif /* _IPT_IMQ_H */
828 +
829 --- /dev/null
830 +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
831 @@ -0,0 +1,10 @@
832 +#ifndef _IP6T_IMQ_H
833 +#define _IP6T_IMQ_H
834 +
835 +/* Backwards compatibility for old userspace */
836 +#include <linux/netfilter/xt_IMQ.h>
837 +
838 +#define ip6t_imq_info xt_imq_info
839 +
840 +#endif /* _IP6T_IMQ_H */
841 +
842 --- a/include/linux/skbuff.h
843 +++ b/include/linux/skbuff.h
844 @@ -29,6 +29,9 @@
845  #include <linux/rcupdate.h>
846  #include <linux/dmaengine.h>
847  #include <linux/hrtimer.h>
848 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
849 +#include <linux/imq.h>
850 +#endif
851  
852  /* Don't change this without changing skb_csum_unnecessary! */
853  #define CHECKSUM_NONE 0
854 @@ -323,6 +326,10 @@ struct sk_buff {
855         char                    cb[48] __aligned(8);
856  
857         unsigned long           _skb_dst;
858 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
859 +       void                    *cb_next;
860 +#endif
861 +
862  #ifdef CONFIG_XFRM
863         struct  sec_path        *sp;
864  #endif
865 @@ -357,6 +364,9 @@ struct sk_buff {
866         struct nf_conntrack     *nfct;
867         struct sk_buff          *nfct_reasm;
868  #endif
869 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
870 +       struct nf_queue_entry   *nf_queue_entry;
871 +#endif
872  #ifdef CONFIG_BRIDGE_NETFILTER
873         struct nf_bridge_info   *nf_bridge;
874  #endif
875 @@ -378,6 +388,10 @@ struct sk_buff {
876  
877         /* 0/14 bit hole */
878  
879 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
880 +       __u8                    imq_flags:IMQ_F_BITS;
881 +#endif
882 +
883  #ifdef CONFIG_NET_DMA
884         dma_cookie_t            dma_cookie;
885  #endif
886 @@ -426,6 +440,12 @@ static inline struct rtable *skb_rtable(
887         return (struct rtable *)skb_dst(skb);
888  }
889  
890 +
891 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
892 +extern int skb_save_cb(struct sk_buff *skb);
893 +extern int skb_restore_cb(struct sk_buff *skb);
894 +#endif
895 +
896  extern void kfree_skb(struct sk_buff *skb);
897  extern void consume_skb(struct sk_buff *skb);
898  extern void           __kfree_skb(struct sk_buff *skb);
899 @@ -1970,6 +1990,10 @@ static inline void __nf_copy(struct sk_b
900         dst->nfct_reasm = src->nfct_reasm;
901         nf_conntrack_get_reasm(src->nfct_reasm);
902  #endif
903 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
904 +       dst->imq_flags = src->imq_flags;
905 +       dst->nf_queue_entry = src->nf_queue_entry;
906 +#endif
907  #ifdef CONFIG_BRIDGE_NETFILTER
908         dst->nf_bridge  = src->nf_bridge;
909         nf_bridge_get(src->nf_bridge);
910 --- a/include/net/netfilter/nf_queue.h
911 +++ b/include/net/netfilter/nf_queue.h
912 @@ -13,6 +13,12 @@ struct nf_queue_entry {
913         struct net_device       *indev;
914         struct net_device       *outdev;
915         int                     (*okfn)(struct sk_buff *);
916 +
917 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
918 +       int                     (*next_outfn)(struct nf_queue_entry *entry,
919 +                                             unsigned int queuenum);
920 +       unsigned int            next_queuenum;
921 +#endif
922  };
923  
924  #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
925 @@ -30,5 +36,11 @@ extern int nf_unregister_queue_handler(u
926                                        const struct nf_queue_handler *qh);
927  extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
928  extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
929 +extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
930 +
931 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
932 +extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
933 +extern void nf_unregister_queue_imq_handler(void);
934 +#endif
935  
936  #endif /* _NF_QUEUE_H */
937 --- a/net/core/dev.c
938 +++ b/net/core/dev.c
939 @@ -97,6 +97,9 @@
940  #include <net/net_namespace.h>
941  #include <net/sock.h>
942  #include <linux/rtnetlink.h>
943 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
944 +#include <linux/imq.h>
945 +#endif
946  #include <linux/proc_fs.h>
947  #include <linux/seq_file.h>
948  #include <linux/stat.h>
949 @@ -1810,7 +1813,11 @@ int dev_hard_start_xmit(struct sk_buff *
950         int rc = NETDEV_TX_OK;
951  
952         if (likely(!skb->next)) {
953 -               if (!list_empty(&ptype_all))
954 +               if (!list_empty(&ptype_all)
955 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
956 +                   && !(skb->imq_flags & IMQ_F_ENQUEUE)
957 +#endif
958 +                   )
959                         dev_queue_xmit_nit(skb, dev);
960  
961                 if (netif_needs_gso(dev, skb)) {
962 @@ -1912,8 +1919,7 @@ static inline u16 dev_cap_txqueue(struct
963         return queue_index;
964  }
965  
966 -static struct netdev_queue *dev_pick_tx(struct net_device *dev,
967 -                                       struct sk_buff *skb)
968 +struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
969  {
970         u16 queue_index;
971         struct sock *sk = skb->sk;
972 @@ -1939,6 +1945,7 @@ static struct netdev_queue *dev_pick_tx(
973         skb_set_queue_mapping(skb, queue_index);
974         return netdev_get_tx_queue(dev, queue_index);
975  }
976 +EXPORT_SYMBOL(dev_pick_tx);
977  
978  static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
979                                  struct net_device *dev,
980 --- a/net/core/skbuff.c
981 +++ b/net/core/skbuff.c
982 @@ -72,6 +72,9 @@
983  
984  static struct kmem_cache *skbuff_head_cache __read_mostly;
985  static struct kmem_cache *skbuff_fclone_cache __read_mostly;
986 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
987 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
988 +#endif
989  
990  static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
991                                   struct pipe_buffer *buf)
992 @@ -91,6 +94,83 @@ static int sock_pipe_buf_steal(struct pi
993         return 1;
994  }
995  
996 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
997 +/* Control buffer save/restore for IMQ devices */
998 +struct skb_cb_table {
999 +       void                    *cb_next;
1000 +       atomic_t                refcnt;
1001 +       char                    cb[48];
1002 +};
1003 +
1004 +static DEFINE_SPINLOCK(skb_cb_store_lock);
1005 +
1006 +int skb_save_cb(struct sk_buff *skb)
1007 +{
1008 +       struct skb_cb_table *next;
1009 +
1010 +       next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
1011 +       if (!next)
1012 +               return -ENOMEM;
1013 +
1014 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1015 +
1016 +       memcpy(next->cb, skb->cb, sizeof(skb->cb));
1017 +       next->cb_next = skb->cb_next;
1018 +
1019 +       atomic_set(&next->refcnt, 1);
1020 +
1021 +       skb->cb_next = next;
1022 +       return 0;
1023 +}
1024 +EXPORT_SYMBOL(skb_save_cb);
1025 +
1026 +int skb_restore_cb(struct sk_buff *skb)
1027 +{
1028 +       struct skb_cb_table *next;
1029 +
1030 +       if (!skb->cb_next)
1031 +               return 0;
1032 +
1033 +       next = skb->cb_next;
1034 +
1035 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
1036 +
1037 +       memcpy(skb->cb, next->cb, sizeof(skb->cb));
1038 +       skb->cb_next = next->cb_next;
1039 +
1040 +       spin_lock(&skb_cb_store_lock);
1041 +
1042 +       if (atomic_dec_and_test(&next->refcnt)) {
1043 +               kmem_cache_free(skbuff_cb_store_cache, next);
1044 +       }
1045 +
1046 +       spin_unlock(&skb_cb_store_lock);
1047 +
1048 +       return 0;
1049 +}
1050 +EXPORT_SYMBOL(skb_restore_cb);
1051 +
1052 +static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
1053 +{
1054 +       struct skb_cb_table *next;
1055 +       struct sk_buff *old;
1056 +
1057 +       if (!__old->cb_next) {
1058 +               new->cb_next = NULL;
1059 +               return;
1060 +       }
1061 +
1062 +       spin_lock(&skb_cb_store_lock);
1063 +
1064 +       old = (struct sk_buff *)__old;
1065 +
1066 +       next = old->cb_next;
1067 +       atomic_inc(&next->refcnt);
1068 +       new->cb_next = next;
1069 +
1070 +       spin_unlock(&skb_cb_store_lock);
1071 +}
1072 +#endif
1073  
1074  /* Pipe buffer operations for a socket. */
1075  static const struct pipe_buf_operations sock_pipe_buf_ops = {
1076 @@ -398,6 +478,26 @@ static void skb_release_head_state(struc
1077                 WARN_ON(in_irq());
1078                 skb->destructor(skb);
1079         }
1080 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1081 +       /* This should not happen. When it does, avoid memleak by restoring
1082 +       the chain of cb-backups. */
1083 +       while(skb->cb_next != NULL) {
1084 +               if (net_ratelimit())
1085 +                       printk(KERN_WARNING "IMQ: kfree_skb: skb->cb_next: "
1086 +                               "%08x\n", (unsigned int)skb->cb_next);
1087 +
1088 +               skb_restore_cb(skb);
1089 +       }
1090 +       /* This should not happen either, nf_queue_entry is nullified in
1091 +        * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
1092 +        * leaking entry pointers, maybe memory. We don't know if this is
1093 +        * pointer to already freed memory, or should this be freed.
1094 +        * If this happens we need to add refcounting, etc for nf_queue_entry.
1095 +        */
1096 +       if (skb->nf_queue_entry && net_ratelimit())
1097 +               printk(KERN_WARNING
1098 +                               "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
1099 +#endif
1100  #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1101         nf_conntrack_put(skb->nfct);
1102         nf_conntrack_put_reasm(skb->nfct_reasm);
1103 @@ -538,6 +638,9 @@ static void __copy_skb_header(struct sk_
1104         new->sp                 = secpath_get(old->sp);
1105  #endif
1106         memcpy(new->cb, old->cb, sizeof(old->cb));
1107 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1108 +       skb_copy_stored_cb(new, old);
1109 +#endif
1110         new->csum               = old->csum;
1111         new->local_df           = old->local_df;
1112         new->pkt_type           = old->pkt_type;
1113 @@ -2779,6 +2882,13 @@ void __init skb_init(void)
1114                                                 0,
1115                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1116                                                 NULL);
1117 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1118 +       skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1119 +                                                 sizeof(struct skb_cb_table),
1120 +                                                 0,
1121 +                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1122 +                                                 NULL);
1123 +#endif
1124  }
1125  
1126  /**
1127 --- a/net/netfilter/Kconfig
1128 +++ b/net/netfilter/Kconfig
1129 @@ -396,6 +396,18 @@ config NETFILTER_XT_TARGET_LED
1130           For more information on the LEDs available on your system, see
1131           Documentation/leds-class.txt
1132  
1133 +config NETFILTER_XT_TARGET_IMQ
1134 +        tristate '"IMQ" target support'
1135 +       depends on NETFILTER_XTABLES
1136 +       depends on IP_NF_MANGLE || IP6_NF_MANGLE
1137 +       select IMQ
1138 +       default m if NETFILTER_ADVANCED=n
1139 +        help
1140 +          This option adds a `IMQ' target which is used to specify if and
1141 +          to which imq device packets should get enqueued/dequeued.
1142 +
1143 +          To compile it as a module, choose M here.  If unsure, say N.
1144 +
1145  config NETFILTER_XT_TARGET_MARK
1146         tristate '"MARK" target support'
1147         default m if NETFILTER_ADVANCED=n
1148 --- a/net/netfilter/Makefile
1149 +++ b/net/netfilter/Makefile
1150 @@ -46,6 +46,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMAR
1151  obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
1152  obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1153  obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
1154 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1155  obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
1156  obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
1157  obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
1158 --- a/net/netfilter/nf_queue.c
1159 +++ b/net/netfilter/nf_queue.c
1160 @@ -20,6 +20,26 @@ static const struct nf_queue_handler *qu
1161  
1162  static DEFINE_MUTEX(queue_handler_mutex);
1163  
1164 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1165 +static const struct nf_queue_handler *queue_imq_handler;
1166 +
1167 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1168 +{
1169 +       mutex_lock(&queue_handler_mutex);
1170 +       rcu_assign_pointer(queue_imq_handler, qh);
1171 +       mutex_unlock(&queue_handler_mutex);
1172 +}
1173 +EXPORT_SYMBOL(nf_register_queue_imq_handler);
1174 +
1175 +void nf_unregister_queue_imq_handler(void)
1176 +{
1177 +       mutex_lock(&queue_handler_mutex);
1178 +       rcu_assign_pointer(queue_imq_handler, NULL);
1179 +       mutex_unlock(&queue_handler_mutex);
1180 +}
1181 +EXPORT_SYMBOL(nf_unregister_queue_imq_handler);
1182 +#endif
1183 +
1184  /* return EBUSY when somebody else is registered, return EEXIST if the
1185   * same handler is registered, return 0 in case of success. */
1186  int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
1187 @@ -80,7 +100,7 @@ void nf_unregister_queue_handlers(const 
1188  }
1189  EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
1190  
1191 -static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1192 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1193  {
1194         /* Release those devices we held, or Alexey will kill me. */
1195         if (entry->indev)
1196 @@ -100,6 +120,7 @@ static void nf_queue_entry_release_refs(
1197         /* Drop reference to owner of hook which queued us. */
1198         module_put(entry->elem->owner);
1199  }
1200 +EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
1201  
1202  /*
1203   * Any packet that leaves via this function must come back
1204 @@ -121,12 +142,26 @@ static int __nf_queue(struct sk_buff *sk
1205  #endif
1206         const struct nf_afinfo *afinfo;
1207         const struct nf_queue_handler *qh;
1208 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1209 +       const struct nf_queue_handler *qih = NULL;
1210 +#endif
1211  
1212         /* QUEUE == DROP if noone is waiting, to be safe. */
1213         rcu_read_lock();
1214  
1215         qh = rcu_dereference(queue_handler[pf]);
1216 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1217 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1218 +       if (pf == PF_INET || pf == PF_INET6)
1219 +#else
1220 +       if (pf == PF_INET)
1221 +#endif
1222 +               qih = rcu_dereference(queue_imq_handler);
1223 +
1224 +       if (!qh && !qih)
1225 +#else /* !IMQ */
1226         if (!qh)
1227 +#endif
1228                 goto err_unlock;
1229  
1230         afinfo = nf_get_afinfo(pf);
1231 @@ -145,6 +180,10 @@ static int __nf_queue(struct sk_buff *sk
1232                 .indev  = indev,
1233                 .outdev = outdev,
1234                 .okfn   = okfn,
1235 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1236 +               .next_outfn = qh ? qh->outfn : NULL,
1237 +               .next_queuenum = queuenum,
1238 +#endif
1239         };
1240  
1241         /* If it's going away, ignore hook. */
1242 @@ -170,8 +209,19 @@ static int __nf_queue(struct sk_buff *sk
1243         }
1244  #endif
1245         afinfo->saveroute(skb, entry);
1246 +
1247 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1248 +       if (qih) {
1249 +               status = qih->outfn(entry, queuenum);
1250 +               goto imq_skip_queue;
1251 +       }
1252 +#endif
1253 +
1254         status = qh->outfn(entry, queuenum);
1255  
1256 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1257 +imq_skip_queue:
1258 +#endif
1259         rcu_read_unlock();
1260  
1261         if (status < 0) {
1262 --- /dev/null
1263 +++ b/net/netfilter/xt_IMQ.c
1264 @@ -0,0 +1,73 @@
1265 +/*
1266 + * This target marks packets to be enqueued to an imq device
1267 + */
1268 +#include <linux/module.h>
1269 +#include <linux/skbuff.h>
1270 +#include <linux/netfilter/x_tables.h>
1271 +#include <linux/netfilter/xt_IMQ.h>
1272 +#include <linux/imq.h>
1273 +
1274 +static unsigned int imq_target(struct sk_buff *pskb,
1275 +                               const struct xt_target_param *par)
1276 +{
1277 +       const struct xt_imq_info *mr = par->targinfo;
1278 +
1279 +       pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1280 +
1281 +       return XT_CONTINUE;
1282 +}
1283 +
1284 +static bool imq_checkentry(const struct xt_tgchk_param *par)
1285 +{
1286 +       struct xt_imq_info *mr = par->targinfo;
1287 +
1288 +       if (mr->todev > IMQ_MAX_DEVS - 1) {
1289 +               printk(KERN_WARNING
1290 +                      "IMQ: invalid device specified, highest is %u\n",
1291 +                      IMQ_MAX_DEVS - 1);
1292 +               return 0;
1293 +       }
1294 +
1295 +       return 1;
1296 +}
1297 +
1298 +static struct xt_target xt_imq_reg[] __read_mostly = {
1299 +       {
1300 +               .name           = "IMQ",
1301 +               .family         = AF_INET,
1302 +               .checkentry     = imq_checkentry,
1303 +               .target         = imq_target,
1304 +               .targetsize     = sizeof(struct xt_imq_info),
1305 +               .table          = "mangle",
1306 +               .me             = THIS_MODULE
1307 +       },
1308 +       {
1309 +               .name           = "IMQ",
1310 +               .family         = AF_INET6,
1311 +               .checkentry     = imq_checkentry,
1312 +               .target         = imq_target,
1313 +               .targetsize     = sizeof(struct xt_imq_info),
1314 +               .table          = "mangle",
1315 +               .me             = THIS_MODULE
1316 +       },
1317 +};
1318 +
1319 +static int __init imq_init(void)
1320 +{
1321 +       return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1322 +}
1323 +
1324 +static void __exit imq_fini(void)
1325 +{
1326 +       xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1327 +}
1328 +
1329 +module_init(imq_init);
1330 +module_exit(imq_fini);
1331 +
1332 +MODULE_AUTHOR("http://www.linuximq.net");
1333 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1334 +MODULE_LICENSE("GPL");
1335 +MODULE_ALIAS("ipt_IMQ");
1336 +MODULE_ALIAS("ip6t_IMQ");
1337 +