0814520f9a3711aca7840a29eb9e678d2e7f112c
[openwrt.git] / target / linux / generic-2.6 / patches-2.6.27 / 150-netfilter_imq.patch
1 --- /dev/null
2 +++ b/drivers/net/imq.c
3 @@ -0,0 +1,566 @@
4 +/*
5 + *             Pseudo-driver for the intermediate queue device.
6 + *
7 + *             This program is free software; you can redistribute it and/or
8 + *             modify it under the terms of the GNU General Public License
9 + *             as published by the Free Software Foundation; either version
10 + *             2 of the License, or (at your option) any later version.
11 + *
12 + * Authors:    Patrick McHardy, <kaber@trash.net>
13 + *
14 + *            The first version was written by Martin Devera, <devik@cdi.cz>
15 + *
16 + * Credits:    Jan Rafaj <imq2t@cedric.vabo.cz>
17 + *              - Update patch to 2.4.21
18 + *             Sebastian Strollo <sstrollo@nortelnetworks.com>
19 + *              - Fix "Dead-loop on netdevice imq"-issue
20 + *             Marcel Sebek <sebek64@post.cz>
21 + *              - Update to 2.6.2-rc1
22 + *
23 + *            After some time of inactivity there is a group taking care
24 + *            of IMQ again: http://www.linuximq.net
25 + *
26 + *
27 + *            2004/06/30 - New version of IMQ patch to kernels <=2.6.7
28 + *             including the following changes:
29 + *
30 + *            - Correction of ipv6 support "+"s issue (Hasso Tepper)
31 + *            - Correction of imq_init_devs() issue that resulted in
32 + *            kernel OOPS unloading IMQ as module (Norbert Buchmuller)
33 + *            - Addition of functionality to choose number of IMQ devices
34 + *            during kernel config (Andre Correa)
35 + *            - Addition of functionality to choose how IMQ hooks on
36 + *            PRE and POSTROUTING (after or before NAT) (Andre Correa)
37 + *            - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
38 + *
39 + *
40 + *             2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
41 + *             released with almost no problems. 2.6.14-x was released
42 + *             with some important changes: nfcache was removed; After
43 + *             some weeks of trouble we figured out that some IMQ fields
44 + *             in skb were missing in skbuff.c - skb_clone and copy_skb_header.
45 + *             These functions are correctly patched by this new patch version.
46 + *
47 + *             Thanks for all who helped to figure out all the problems with
48 + *             2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
49 + *             Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
50 + *             I didn't forget anybody). I apologize again for my lack of time.
51 + *
52 + *
53 + *             2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
54 + *             of qdisc_restart() and moved qdisc_run() to tasklet to avoid
55 + *             recursive locking. New initialization routines to fix 'rmmod' not
56 + *             working anymore. Used code from ifb.c. (Jussi Kivilinna)
57 + *
58 + *             2008/08/06 - 2.6.26 - (JK)
59 + *              - Replaced tasklet with 'netif_schedule()'.
60 + *              - Cleaned up and added comments for imq_nf_queue().
61 + *
62 + *             2009/04/12
63 + *              - Add skb_save_cb/skb_restore_cb helper functions for backuping
64 + *                control buffer. This is needed because qdisc-layer on kernels
65 + *                2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
66 + *              - Add better locking for IMQ device. Hopefully this will solve
67 + *                SMP issues. (Jussi Kivilinna)
68 + *              - Port to 2.6.27
69 + *
70 + *             2009/04/20 - (Jussi Kivilinna)
71 + *              - Fix rmmod not working
72 + *              - Use netdevice feature flags to avoid extra packet handling
73 + *                by core networking layer and possibly increase performance.
74 + *
75 + *            Also, many thanks to pablo Sebastian Greco for making the initial
76 + *            patch and to those who helped the testing.
77 + *
78 + *             More info at: http://www.linuximq.net/ (Andre Correa)
79 + */
80 +
81 +#include <linux/module.h>
82 +#include <linux/kernel.h>
83 +#include <linux/moduleparam.h>
84 +#include <linux/list.h>
85 +#include <linux/skbuff.h>
86 +#include <linux/netdevice.h>
87 +#include <linux/etherdevice.h>
88 +#include <linux/rtnetlink.h>
89 +#include <linux/if_arp.h>
90 +#include <linux/netfilter.h>
91 +#include <linux/netfilter_ipv4.h>
92 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
93 +       #include <linux/netfilter_ipv6.h>
94 +#endif
95 +#include <linux/imq.h>
96 +#include <net/pkt_sched.h>
97 +#include <net/netfilter/nf_queue.h>
98 +
99 +static nf_hookfn imq_nf_hook;
100 +
101 +static struct nf_hook_ops imq_ingress_ipv4 = {
102 +       .hook           = imq_nf_hook,
103 +       .owner          = THIS_MODULE,
104 +       .pf             = PF_INET,
105 +       .hooknum        = NF_INET_PRE_ROUTING,
106 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
107 +       .priority       = NF_IP_PRI_MANGLE + 1
108 +#else
109 +       .priority       = NF_IP_PRI_NAT_DST + 1
110 +#endif
111 +};
112 +
113 +static struct nf_hook_ops imq_egress_ipv4 = {
114 +       .hook           = imq_nf_hook,
115 +       .owner          = THIS_MODULE,
116 +       .pf             = PF_INET,
117 +       .hooknum        = NF_INET_POST_ROUTING,
118 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
119 +       .priority       = NF_IP_PRI_LAST
120 +#else
121 +       .priority       = NF_IP_PRI_NAT_SRC - 1
122 +#endif
123 +};
124 +
125 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
126 +static struct nf_hook_ops imq_ingress_ipv6 = {
127 +       .hook           = imq_nf_hook,
128 +       .owner          = THIS_MODULE,
129 +       .pf             = PF_INET6,
130 +       .hooknum        = NF_INET_PRE_ROUTING,
131 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
132 +       .priority       = NF_IP6_PRI_MANGLE + 1
133 +#else
134 +       .priority       = NF_IP6_PRI_NAT_DST + 1
135 +#endif
136 +};
137 +
138 +static struct nf_hook_ops imq_egress_ipv6 = {
139 +       .hook           = imq_nf_hook,
140 +       .owner          = THIS_MODULE,
141 +       .pf             = PF_INET6,
142 +       .hooknum        = NF_INET_POST_ROUTING,
143 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
144 +       .priority       = NF_IP6_PRI_LAST
145 +#else
146 +       .priority       = NF_IP6_PRI_NAT_SRC - 1
147 +#endif
148 +};
149 +#endif
150 +
151 +#if defined(CONFIG_IMQ_NUM_DEVS)
152 +static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
153 +#else
154 +static unsigned int numdevs = IMQ_MAX_DEVS;
155 +#endif
156 +
157 +static DEFINE_SPINLOCK(imq_nf_queue_lock);
158 +
159 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
160 +
161 +
162 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
163 +{
164 +       return &dev->stats;
165 +}
166 +
167 +/* called for packets kfree'd in qdiscs at places other than enqueue */
168 +static void imq_skb_destructor(struct sk_buff *skb)
169 +{
170 +       struct nf_queue_entry *entry = skb->nf_queue_entry;
171 +
172 +       if (entry) {
173 +               nf_queue_entry_release_refs(entry);
174 +               kfree(entry);
175 +       }
176 +
177 +       skb_restore_cb(skb); /* kfree backup */
178 +}
179 +
180 +static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
181 +{
182 +       int status;
183 +
184 +       if (!entry->next_outfn) {
185 +               spin_lock_bh(&imq_nf_queue_lock);
186 +               nf_reinject(entry, verdict);
187 +               spin_unlock_bh(&imq_nf_queue_lock);
188 +               return;
189 +       }
190 +
191 +       rcu_read_lock();
192 +       local_bh_disable();
193 +       status = entry->next_outfn(entry, entry->next_queuenum);
194 +       local_bh_enable();
195 +       if (status < 0) {
196 +               nf_queue_entry_release_refs(entry);
197 +               kfree_skb(entry->skb);
198 +               kfree(entry);
199 +       }
200 +
201 +       rcu_read_unlock();
202 +}
203 +
204 +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
205 +{
206 +       dev->stats.tx_bytes += skb->len;
207 +       dev->stats.tx_packets++;
208 +
209 +       skb->imq_flags = 0;
210 +       skb->destructor = NULL;
211 +
212 +       skb_restore_cb(skb); /* restore skb->cb */
213 +
214 +       dev->trans_start = jiffies;
215 +       imq_nf_reinject(skb->nf_queue_entry, NF_ACCEPT);
216 +       return 0;
217 +}
218 +
219 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
220 +{
221 +       struct net_device *dev;
222 +       struct sk_buff *skb_orig, *skb, *skb_shared;
223 +       struct Qdisc *q;
224 +       struct netdev_queue *txq;
225 +       int users, index;
226 +       int retval = -EINVAL;
227 +
228 +       index = entry->skb->imq_flags & IMQ_F_IFMASK;
229 +       if (unlikely(index > numdevs - 1)) {
230 +               if (net_ratelimit())
231 +                       printk(KERN_WARNING
232 +                              "IMQ: invalid device specified, highest is %u\n",
233 +                              numdevs - 1);
234 +               retval = -EINVAL;
235 +               goto out;
236 +       }
237 +
238 +       /* check for imq device by index from cache */
239 +       dev = imq_devs_cache[index];
240 +       if (unlikely(!dev)) {
241 +               char buf[8];
242 +
243 +               /* get device by name and cache result */
244 +               snprintf(buf, sizeof(buf), "imq%d", index);
245 +               dev = dev_get_by_name(&init_net, buf);
246 +               if (!dev) {
247 +                       /* not found ?!*/
248 +                       BUG();
249 +                       retval = -ENODEV;
250 +                       goto out;
251 +               }
252 +
253 +               imq_devs_cache[index] = dev;
254 +               dev_put(dev);
255 +       }
256 +
257 +       if (unlikely(!(dev->flags & IFF_UP))) {
258 +               entry->skb->imq_flags = 0;
259 +               imq_nf_reinject(entry, NF_ACCEPT);
260 +               retval = 0;
261 +               goto out;
262 +       }
263 +       dev->last_rx = jiffies;
264 +
265 +       skb = entry->skb;
266 +       skb_orig = NULL;
267 +
268 +       /* skb has owner? => make clone */
269 +       if (unlikely(skb->destructor)) {
270 +               skb_orig = skb;
271 +               skb = skb_clone(skb, GFP_ATOMIC);
272 +               if (!skb) {
273 +                       retval = -ENOMEM;
274 +                       goto out;
275 +               }
276 +               entry->skb = skb;
277 +       }
278 +
279 +       skb->nf_queue_entry = entry;
280 +
281 +       dev->stats.rx_bytes += skb->len;
282 +       dev->stats.rx_packets++;
283 +
284 +       txq = dev_pick_tx(dev, skb);
285 +
286 +       q = rcu_dereference(txq->qdisc);
287 +       if (unlikely(!q->enqueue))
288 +               goto packet_not_eaten_by_imq_dev;
289 +
290 +       spin_lock_bh(qdisc_lock(q));
291 +
292 +       users = atomic_read(&skb->users);
293 +
294 +       skb_shared = skb_get(skb); /* increase reference count by one */
295 +       skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will
296 +                                       overwrite it */
297 +       qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
298 +
299 +       if (likely(atomic_read(&skb_shared->users) == users + 1)) {
300 +               kfree_skb(skb_shared); /* decrease reference count by one */
301 +
302 +               skb->destructor = &imq_skb_destructor;
303 +
304 +               /* cloned? */
305 +               if (skb_orig)
306 +                       kfree_skb(skb_orig); /* free original */
307 +
308 +               spin_unlock_bh(qdisc_lock(q));
309 +
310 +               /* schedule qdisc dequeue */
311 +               __netif_schedule(q);
312 +
313 +               retval = 0;
314 +               goto out;
315 +       } else {
316 +               skb_restore_cb(skb_shared); /* restore skb->cb */
317 +               /* qdisc dropped packet and decreased skb reference count of
318 +                * skb, so we don't really want to and try refree as that would
319 +                * actually destroy the skb. */
320 +               spin_unlock_bh(qdisc_lock(q));
321 +               goto packet_not_eaten_by_imq_dev;
322 +       }
323 +
324 +packet_not_eaten_by_imq_dev:
325 +       /* cloned? restore original */
326 +       if (skb_orig) {
327 +               kfree_skb(skb);
328 +               entry->skb = skb_orig;
329 +       }
330 +       retval = -1;
331 +out:
332 +       return retval;
333 +}
334 +
335 +static struct nf_queue_handler nfqh = {
336 +       .name  = "imq",
337 +       .outfn = imq_nf_queue,
338 +};
339 +
340 +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
341 +                               const struct net_device *indev,
342 +                               const struct net_device *outdev,
343 +                               int (*okfn)(struct sk_buff *))
344 +{
345 +       if (pskb->imq_flags & IMQ_F_ENQUEUE)
346 +               return NF_QUEUE;
347 +
348 +       return NF_ACCEPT;
349 +}
350 +
351 +static int imq_close(struct net_device *dev)
352 +{
353 +       netif_stop_queue(dev);
354 +       return 0;
355 +}
356 +
357 +static int imq_open(struct net_device *dev)
358 +{
359 +       netif_start_queue(dev);
360 +       return 0;
361 +}
362 +
363 +static void imq_setup(struct net_device *dev)
364 +{
365 +       dev->hard_start_xmit    = imq_dev_xmit;
366 +       dev->open               = imq_open;
367 +       dev->get_stats          = imq_get_stats;
368 +       dev->stop               = imq_close;
369 +       dev->type               = ARPHRD_VOID;
370 +       dev->mtu                = 16000;
371 +       dev->tx_queue_len       = 11000;
372 +       dev->flags              = IFF_NOARP;
373 +       dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST |
374 +                                 NETIF_F_GSO | NETIF_F_HW_CSUM |
375 +                                 NETIF_F_HIGHDMA;
376 +}
377 +
378 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
379 +{
380 +       int ret = 0;
381 +
382 +       if (tb[IFLA_ADDRESS]) {
383 +               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
384 +                       ret = -EINVAL;
385 +                       goto end;
386 +               }
387 +               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
388 +                       ret = -EADDRNOTAVAIL;
389 +                       goto end;
390 +               }
391 +       }
392 +       return 0;
393 +end:
394 +       printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
395 +       return ret;
396 +}
397 +
398 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
399 +       .kind           = "imq",
400 +       .priv_size      = 0,
401 +       .setup          = imq_setup,
402 +       .validate       = imq_validate,
403 +};
404 +
405 +static int __init imq_init_hooks(void)
406 +{
407 +       int err;
408 +
409 +       nf_register_queue_imq_handler(&nfqh);
410 +
411 +       err = nf_register_hook(&imq_ingress_ipv4);
412 +       if (err)
413 +               goto err1;
414 +
415 +       err = nf_register_hook(&imq_egress_ipv4);
416 +       if (err)
417 +               goto err2;
418 +
419 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
420 +       err = nf_register_hook(&imq_ingress_ipv6);
421 +       if (err)
422 +               goto err3;
423 +
424 +       err = nf_register_hook(&imq_egress_ipv6);
425 +       if (err)
426 +               goto err4;
427 +#endif
428 +
429 +       return 0;
430 +
431 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
432 +err4:
433 +       nf_unregister_hook(&imq_ingress_ipv6);
434 +err3:
435 +       nf_unregister_hook(&imq_egress_ipv4);
436 +#endif
437 +err2:
438 +       nf_unregister_hook(&imq_ingress_ipv4);
439 +err1:
440 +       nf_unregister_queue_imq_handler();
441 +       return err;
442 +}
443 +
444 +static int __init imq_init_one(int index)
445 +{
446 +       struct net_device *dev;
447 +       int ret;
448 +
449 +       dev = alloc_netdev(0, "imq%d", imq_setup);
450 +       if (!dev)
451 +               return -ENOMEM;
452 +
453 +       ret = dev_alloc_name(dev, dev->name);
454 +       if (ret < 0)
455 +               goto fail;
456 +
457 +       dev->rtnl_link_ops = &imq_link_ops;
458 +       ret = register_netdevice(dev);
459 +       if (ret < 0)
460 +               goto fail;
461 +
462 +       return 0;
463 +fail:
464 +       free_netdev(dev);
465 +       return ret;
466 +}
467 +
468 +static int __init imq_init_devs(void)
469 +{
470 +       int err, i;
471 +
472 +       if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
473 +               printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
474 +                      IMQ_MAX_DEVS);
475 +               return -EINVAL;
476 +       }
477 +
478 +       rtnl_lock();
479 +       err = __rtnl_link_register(&imq_link_ops);
480 +
481 +       for (i = 0; i < numdevs && !err; i++)
482 +               err = imq_init_one(i);
483 +
484 +       if (err) {
485 +               __rtnl_link_unregister(&imq_link_ops);
486 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
487 +       }
488 +       rtnl_unlock();
489 +
490 +       return err;
491 +}
492 +
493 +static int __init imq_init_module(void)
494 +{
495 +       int err;
496 +
497 +#if defined(CONFIG_IMQ_NUM_DEVS)
498 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
499 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
500 +       BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
501 +#endif
502 +
503 +       err = imq_init_devs();
504 +       if (err) {
505 +               printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
506 +               return err;
507 +       }
508 +
509 +       err = imq_init_hooks();
510 +       if (err) {
511 +               printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
512 +               rtnl_link_unregister(&imq_link_ops);
513 +               memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
514 +               return err;
515 +       }
516 +
517 +       printk(KERN_INFO "IMQ driver loaded successfully.\n");
518 +
519 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
520 +       printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
521 +#else
522 +       printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
523 +#endif
524 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
525 +       printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
526 +#else
527 +       printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
528 +#endif
529 +
530 +       return 0;
531 +}
532 +
533 +static void __exit imq_unhook(void)
534 +{
535 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
536 +       nf_unregister_hook(&imq_ingress_ipv6);
537 +       nf_unregister_hook(&imq_egress_ipv6);
538 +#endif
539 +       nf_unregister_hook(&imq_ingress_ipv4);
540 +       nf_unregister_hook(&imq_egress_ipv4);
541 +
542 +       nf_unregister_queue_imq_handler();
543 +}
544 +
545 +static void __exit imq_cleanup_devs(void)
546 +{
547 +       rtnl_link_unregister(&imq_link_ops);
548 +       memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
549 +}
550 +
551 +static void __exit imq_exit_module(void)
552 +{
553 +       imq_unhook();
554 +       imq_cleanup_devs();
555 +       printk(KERN_INFO "IMQ driver unloaded successfully.\n");
556 +}
557 +
558 +module_init(imq_init_module);
559 +module_exit(imq_exit_module);
560 +
561 +module_param(numdevs, int, 0);
562 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
563 +                       "be created)");
564 +MODULE_AUTHOR("http://www.linuximq.net");
565 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
566 +                       "http://www.linuximq.net/ for more information.");
567 +MODULE_LICENSE("GPL");
568 +MODULE_ALIAS_RTNL_LINK("imq");
569 +
570 --- a/drivers/net/Kconfig
571 +++ b/drivers/net/Kconfig
572 @@ -109,6 +109,129 @@
573           To compile this driver as a module, choose M here: the module
574           will be called eql.  If unsure, say N.
575  
576 +config IMQ
577 +       tristate "IMQ (intermediate queueing device) support"
578 +       depends on NETDEVICES && NETFILTER
579 +       ---help---
580 +         The IMQ device(s) is used as placeholder for QoS queueing
581 +         disciplines. Every packet entering/leaving the IP stack can be
582 +         directed through the IMQ device where it's enqueued/dequeued to the
583 +         attached qdisc. This allows you to treat network devices as classes
584 +         and distribute bandwidth among them. Iptables is used to specify
585 +         through which IMQ device, if any, packets travel.
586 +
587 +         More information at: http://www.linuximq.net/
588 +
589 +         To compile this driver as a module, choose M here: the module
590 +         will be called imq.  If unsure, say N.
591 +
592 +choice
593 +       prompt "IMQ behavior (PRE/POSTROUTING)"
594 +       depends on IMQ
595 +       default IMQ_BEHAVIOR_AB
596 +       help
597 +
598 +               This settings defines how IMQ behaves in respect to its
599 +               hooking in PREROUTING and POSTROUTING.
600 +
601 +               IMQ can work in any of the following ways:
602 +
603 +                   PREROUTING   |      POSTROUTING
604 +               -----------------|-------------------
605 +               #1  After NAT    |      After NAT
606 +               #2  After NAT    |      Before NAT
607 +               #3  Before NAT   |      After NAT
608 +               #4  Before NAT   |      Before NAT
609 +
610 +               The default behavior is to hook before NAT on PREROUTING
611 +               and after NAT on POSTROUTING (#3).
612 +
613 +               This settings are specially usefull when trying to use IMQ
614 +               to shape NATed clients.
615 +
616 +               More information can be found at: www.linuximq.net
617 +
618 +               If not sure leave the default settings alone.
619 +
620 +config IMQ_BEHAVIOR_AA
621 +       bool "IMQ AA"
622 +       help
623 +               This settings defines how IMQ behaves in respect to its
624 +               hooking in PREROUTING and POSTROUTING.
625 +
626 +               Choosing this option will make IMQ hook like this:
627 +
628 +               PREROUTING:   After NAT
629 +               POSTROUTING:  After NAT
630 +
631 +               More information can be found at: www.linuximq.net
632 +
633 +               If not sure leave the default settings alone.
634 +
635 +config IMQ_BEHAVIOR_AB
636 +       bool "IMQ AB"
637 +       help
638 +               This settings defines how IMQ behaves in respect to its
639 +               hooking in PREROUTING and POSTROUTING.
640 +
641 +               Choosing this option will make IMQ hook like this:
642 +
643 +               PREROUTING:   After NAT
644 +               POSTROUTING:  Before NAT
645 +
646 +               More information can be found at: www.linuximq.net
647 +
648 +               If not sure leave the default settings alone.
649 +
650 +config IMQ_BEHAVIOR_BA
651 +       bool "IMQ BA"
652 +       help
653 +               This settings defines how IMQ behaves in respect to its
654 +               hooking in PREROUTING and POSTROUTING.
655 +
656 +               Choosing this option will make IMQ hook like this:
657 +
658 +               PREROUTING:   Before NAT
659 +               POSTROUTING:  After NAT
660 +
661 +               More information can be found at: www.linuximq.net
662 +
663 +               If not sure leave the default settings alone.
664 +
665 +config IMQ_BEHAVIOR_BB
666 +       bool "IMQ BB"
667 +       help
668 +               This settings defines how IMQ behaves in respect to its
669 +               hooking in PREROUTING and POSTROUTING.
670 +
671 +               Choosing this option will make IMQ hook like this:
672 +
673 +               PREROUTING:   Before NAT
674 +               POSTROUTING:  Before NAT
675 +
676 +               More information can be found at: www.linuximq.net
677 +
678 +               If not sure leave the default settings alone.
679 +
680 +endchoice
681 +
682 +config IMQ_NUM_DEVS
683 +
684 +       int "Number of IMQ devices"
685 +       range 2 16
686 +       depends on IMQ
687 +       default "16"
688 +       help
689 +
690 +               This settings defines how many IMQ devices will be
691 +               created.
692 +
693 +               The default value is 16.
694 +
695 +               More information can be found at: www.linuximq.net
696 +
697 +               If not sure leave the default settings alone.
698 +
699  config TUN
700         tristate "Universal TUN/TAP device driver support"
701         select CRC32
702 --- a/drivers/net/Makefile
703 +++ b/drivers/net/Makefile
704 @@ -144,6 +144,7 @@
705  obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
706  
707  obj-$(CONFIG_DUMMY) += dummy.o
708 +obj-$(CONFIG_IMQ) += imq.o
709  obj-$(CONFIG_IFB) += ifb.o
710  obj-$(CONFIG_MACVLAN) += macvlan.o
711  obj-$(CONFIG_DE600) += de600.o
712 --- /dev/null
713 +++ b/include/linux/imq.h
714 @@ -0,0 +1,13 @@
715 +#ifndef _IMQ_H
716 +#define _IMQ_H
717 +
718 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
719 +#define IMQ_F_BITS     5
720 +
721 +#define IMQ_F_IFMASK   0x0f
722 +#define IMQ_F_ENQUEUE  0x10
723 +
724 +#define IMQ_MAX_DEVS   (IMQ_F_IFMASK + 1)
725 +
726 +#endif /* _IMQ_H */
727 +
728 --- /dev/null
729 +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
730 @@ -0,0 +1,10 @@
731 +#ifndef _IPT_IMQ_H
732 +#define _IPT_IMQ_H
733 +
734 +/* Backwards compatibility for old userspace */
735 +#include <linux/netfilter/xt_IMQ.h>
736 +
737 +#define ipt_imq_info xt_imq_info
738 +
739 +#endif /* _IPT_IMQ_H */
740 +
741 --- /dev/null
742 +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
743 @@ -0,0 +1,10 @@
744 +#ifndef _IP6T_IMQ_H
745 +#define _IP6T_IMQ_H
746 +
747 +/* Backwards compatibility for old userspace */
748 +#include <linux/netfilter/xt_IMQ.h>
749 +
750 +#define ip6t_imq_info xt_imq_info
751 +
752 +#endif /* _IP6T_IMQ_H */
753 +
754 --- a/include/linux/skbuff.h
755 +++ b/include/linux/skbuff.h
756 @@ -28,6 +28,9 @@
757  #include <linux/rcupdate.h>
758  #include <linux/dmaengine.h>
759  #include <linux/hrtimer.h>
760 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
761 +#include <linux/imq.h>
762 +#endif
763  
764  #define HAVE_ALLOC_SKB         /* For the drivers to know */
765  #define HAVE_ALIGNABLE_SKB     /* Ditto 8)                */
766 @@ -272,6 +275,9 @@
767          * first. This is owned by whoever has the skb queued ATM.
768          */
769         char                    cb[48];
770 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
771 +       void                    *cb_next;
772 +#endif
773  
774         unsigned int            len,
775                                 data_len;
776 @@ -302,6 +308,9 @@
777         struct nf_conntrack     *nfct;
778         struct sk_buff          *nfct_reasm;
779  #endif
780 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
781 +       struct nf_queue_entry   *nf_queue_entry;
782 +#endif
783  #ifdef CONFIG_BRIDGE_NETFILTER
784         struct nf_bridge_info   *nf_bridge;
785  #endif
786 @@ -321,6 +330,9 @@
787         __u8                    do_not_encrypt:1;
788  #endif
789         /* 0/13/14 bit hole */
790 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
791 +       __u8                    imq_flags:IMQ_F_BITS;
792 +#endif
793  
794  #ifdef CONFIG_NET_DMA
795         dma_cookie_t            dma_cookie;
796 @@ -353,6 +365,12 @@
797  
798  #include <asm/system.h>
799  
800 +
801 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
802 +extern int skb_save_cb(struct sk_buff *skb);
803 +extern int skb_restore_cb(struct sk_buff *skb);
804 +#endif
805 +
806  extern void kfree_skb(struct sk_buff *skb);
807  extern void           __kfree_skb(struct sk_buff *skb);
808  extern struct sk_buff *__alloc_skb(unsigned int size,
809 @@ -1633,6 +1651,10 @@
810         dst->nfct_reasm = src->nfct_reasm;
811         nf_conntrack_get_reasm(src->nfct_reasm);
812  #endif
813 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
814 +       dst->imq_flags = src->imq_flags;
815 +       dst->nf_queue_entry = src->nf_queue_entry;
816 +#endif
817  #ifdef CONFIG_BRIDGE_NETFILTER
818         dst->nf_bridge  = src->nf_bridge;
819         nf_bridge_get(src->nf_bridge);
820 --- a/net/core/dev.c
821 +++ b/net/core/dev.c
822 @@ -96,6 +96,9 @@
823  #include <net/net_namespace.h>
824  #include <net/sock.h>
825  #include <linux/rtnetlink.h>
826 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
827 +#include <linux/imq.h>
828 +#endif
829  #include <linux/proc_fs.h>
830  #include <linux/seq_file.h>
831  #include <linux/stat.h>
832 @@ -1624,7 +1627,11 @@
833                         struct netdev_queue *txq)
834  {
835         if (likely(!skb->next)) {
836 -               if (!list_empty(&ptype_all))
837 +               if (!list_empty(&ptype_all)
838 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
839 +                   && !(skb->imq_flags & IMQ_F_ENQUEUE)
840 +#endif
841 +                   )
842                         dev_queue_xmit_nit(skb, dev);
843  
844                 if (netif_needs_gso(dev, skb)) {
845 @@ -1715,8 +1722,7 @@
846         return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
847  }
848  
849 -static struct netdev_queue *dev_pick_tx(struct net_device *dev,
850 -                                       struct sk_buff *skb)
851 +struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
852  {
853         u16 queue_index = 0;
854  
855 @@ -1728,6 +1734,7 @@
856         skb_set_queue_mapping(skb, queue_index);
857         return netdev_get_tx_queue(dev, queue_index);
858  }
859 +EXPORT_SYMBOL(dev_pick_tx);
860  
861  /**
862   *     dev_queue_xmit - transmit a buffer
863 --- a/include/linux/netdevice.h
864 +++ b/include/linux/netdevice.h
865 @@ -915,6 +915,7 @@
866  extern int             dev_open(struct net_device *dev);
867  extern int             dev_close(struct net_device *dev);
868  extern void            dev_disable_lro(struct net_device *dev);
869 +extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb);
870  extern int             dev_queue_xmit(struct sk_buff *skb);
871  extern int             register_netdevice(struct net_device *dev);
872  extern void            unregister_netdevice(struct net_device *dev);
873 --- /dev/null
874 +++ b/include/linux/netfilter/xt_IMQ.h
875 @@ -0,0 +1,9 @@
876 +#ifndef _XT_IMQ_H
877 +#define _XT_IMQ_H
878 +
879 +struct xt_imq_info {
880 +       unsigned int todev;     /* target imq device */
881 +};
882 +
883 +#endif /* _XT_IMQ_H */
884 +
885 --- a/include/net/netfilter/nf_queue.h
886 +++ b/include/net/netfilter/nf_queue.h
887 @@ -13,6 +13,12 @@
888         struct net_device       *indev;
889         struct net_device       *outdev;
890         int                     (*okfn)(struct sk_buff *);
891 +
892 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
893 +       int                     (*next_outfn)(struct nf_queue_entry *entry,
894 +                                             unsigned int queuenum);
895 +       unsigned int            next_queuenum;
896 +#endif
897  };
898  
899  #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
900 @@ -30,5 +36,11 @@
901                                        const struct nf_queue_handler *qh);
902  extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
903  extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
904 +extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
905 +
906 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
907 +extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
908 +extern void nf_unregister_queue_imq_handler(void);
909 +#endif
910  
911  #endif /* _NF_QUEUE_H */
912 --- a/net/core/skbuff.c
913 +++ b/net/core/skbuff.c
914 @@ -69,6 +69,9 @@
915  
916  static struct kmem_cache *skbuff_head_cache __read_mostly;
917  static struct kmem_cache *skbuff_fclone_cache __read_mostly;
918 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
919 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
920 +#endif
921  
922  static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
923                                   struct pipe_buffer *buf)
924 @@ -88,6 +91,80 @@
925         return 1;
926  }
927  
928 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
929 +/* Control buffer save/restore for IMQ devices */
930 +struct skb_cb_table {
931 +       void                    *cb_next;
932 +       atomic_t                refcnt;
933 +       char                    cb[48];
934 +};
935 +
936 +static DEFINE_SPINLOCK(skb_cb_store_lock);
937 +
938 +int skb_save_cb(struct sk_buff *skb)
939 +{
940 +       struct skb_cb_table *next;
941 +
942 +       next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
943 +       if (!next)
944 +               return -ENOMEM;
945 +
946 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
947 +
948 +       memcpy(next->cb, skb->cb, sizeof(skb->cb));
949 +       next->cb_next = skb->cb_next;
950 +
951 +       atomic_set(&next->refcnt, 1);
952 +
953 +       skb->cb_next = next;
954 +       return 0;
955 +}
956 +EXPORT_SYMBOL(skb_save_cb);
957 +
958 +int skb_restore_cb(struct sk_buff *skb)
959 +{
960 +       struct skb_cb_table *next;
961 +
962 +       if (!skb->cb_next)
963 +               return 0;
964 +
965 +       next = skb->cb_next;
966 +
967 +       BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
968 +
969 +       memcpy(skb->cb, next->cb, sizeof(skb->cb));
970 +       skb->cb_next = next->cb_next;
971 +
972 +       spin_lock(&skb_cb_store_lock);
973 +
974 +       if (atomic_dec_and_test(&next->refcnt)) {
975 +               kmem_cache_free(skbuff_cb_store_cache, next);
976 +       }
977 +
978 +       spin_unlock(&skb_cb_store_lock);
979 +
980 +       return 0;
981 +}
982 +EXPORT_SYMBOL(skb_restore_cb);
983 +
984 +static void skb_copy_stored_cb(struct sk_buff *new, struct sk_buff *old)
985 +{
986 +       struct skb_cb_table *next;
987 +
988 +       if (!old->cb_next) {
989 +               new->cb_next = 0;
990 +               return;
991 +       }
992 +
993 +       spin_lock(&skb_cb_store_lock);
994 +
995 +       next = old->cb_next;
996 +       atomic_inc(&next->refcnt);
997 +       new->cb_next = next;
998 +
999 +       spin_unlock(&skb_cb_store_lock);
1000 +}
1001 +#endif
1002  
1003  /* Pipe buffer operations for a socket. */
1004  static struct pipe_buf_operations sock_pipe_buf_ops = {
1005 @@ -362,6 +439,15 @@
1006                 WARN_ON(in_irq());
1007                 skb->destructor(skb);
1008         }
1009 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1010 +       /* This should not happen. When it does, avoid memleak by restoring
1011 +       the chain of cb-backups. */
1012 +       while(skb->cb_next != NULL) {
1013 +               printk(KERN_WARNING "kfree_skb: skb->cb_next: %08x\n",
1014 +                       skb->cb_next);
1015 +               skb_restore_cb(skb);
1016 +       }
1017 +#endif
1018  #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1019         nf_conntrack_put(skb->nfct);
1020         nf_conntrack_put_reasm(skb->nfct_reasm);
1021 @@ -424,6 +510,9 @@
1022         new->sp                 = secpath_get(old->sp);
1023  #endif
1024         memcpy(new->cb, old->cb, sizeof(old->cb));
1025 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1026 +       skb_copy_stored_cb(new, old);
1027 +#endif
1028         new->csum_start         = old->csum_start;
1029         new->csum_offset        = old->csum_offset;
1030         new->local_df           = old->local_df;
1031 @@ -2326,6 +2415,13 @@
1032                                                 0,
1033                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1034                                                 NULL);
1035 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1036 +       skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1037 +                                                 sizeof(struct skb_cb_table),
1038 +                                                 0,
1039 +                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1040 +                                                 NULL);
1041 +#endif
1042  }
1043  
1044  /**
1045 --- a/net/netfilter/Kconfig
1046 +++ b/net/netfilter/Kconfig
1047 @@ -342,6 +342,18 @@
1048  
1049           To compile it as a module, choose M here.  If unsure, say N.
1050  
1051 +config NETFILTER_XT_TARGET_IMQ
1052 +        tristate '"IMQ" target support'
1053 +       depends on NETFILTER_XTABLES
1054 +       depends on IP_NF_MANGLE || IP6_NF_MANGLE
1055 +       select IMQ
1056 +       default m if NETFILTER_ADVANCED=n
1057 +        help
1058 +          This option adds a `IMQ' target which is used to specify if and
1059 +          to which imq device packets should get enqueued/dequeued.
1060 +
1061 +          To compile it as a module, choose M here.  If unsure, say N.
1062 +
1063  config NETFILTER_XT_TARGET_MARK
1064         tristate '"MARK" target support'
1065         depends on NETFILTER_XTABLES
1066 --- a/net/netfilter/Makefile
1067 +++ b/net/netfilter/Makefile
1068 @@ -42,6 +42,7 @@
1069  obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
1070  obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
1071  obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1072 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1073  obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
1074  obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
1075  obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
1076 --- a/net/netfilter/nf_queue.c
1077 +++ b/net/netfilter/nf_queue.c
1078 @@ -20,6 +20,26 @@
1079  
1080  static DEFINE_MUTEX(queue_handler_mutex);
1081  
1082 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1083 +static const struct nf_queue_handler *queue_imq_handler;
1084 +
1085 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1086 +{
1087 +       mutex_lock(&queue_handler_mutex);
1088 +       rcu_assign_pointer(queue_imq_handler, qh);
1089 +       mutex_unlock(&queue_handler_mutex);
1090 +}
1091 +EXPORT_SYMBOL(nf_register_queue_imq_handler);
1092 +
1093 +void nf_unregister_queue_imq_handler(void)
1094 +{
1095 +       mutex_lock(&queue_handler_mutex);
1096 +       rcu_assign_pointer(queue_imq_handler, NULL);
1097 +       mutex_unlock(&queue_handler_mutex);
1098 +}
1099 +EXPORT_SYMBOL(nf_unregister_queue_imq_handler);
1100 +#endif
1101 +
1102  /* return EBUSY when somebody else is registered, return EEXIST if the
1103   * same handler is registered, return 0 in case of success. */
1104  int nf_register_queue_handler(int pf, const struct nf_queue_handler *qh)
1105 @@ -80,7 +100,7 @@
1106  }
1107  EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
1108  
1109 -static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1110 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1111  {
1112         /* Release those devices we held, or Alexey will kill me. */
1113         if (entry->indev)
1114 @@ -100,6 +120,7 @@
1115         /* Drop reference to owner of hook which queued us. */
1116         module_put(entry->elem->owner);
1117  }
1118 +EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
1119  
1120  /*
1121   * Any packet that leaves via this function must come back
1122 @@ -121,12 +142,26 @@
1123  #endif
1124         const struct nf_afinfo *afinfo;
1125         const struct nf_queue_handler *qh;
1126 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1127 +       const struct nf_queue_handler *qih = NULL;
1128 +#endif
1129  
1130         /* QUEUE == DROP if noone is waiting, to be safe. */
1131         rcu_read_lock();
1132  
1133         qh = rcu_dereference(queue_handler[pf]);
1134 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1135 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1136 +       if (pf == PF_INET || pf == PF_INET6)
1137 +#else
1138 +       if (pf == PF_INET)
1139 +#endif
1140 +               qih = rcu_dereference(queue_imq_handler);
1141 +
1142 +       if (!qh && !qih)
1143 +#else /* !IMQ */
1144         if (!qh)
1145 +#endif
1146                 goto err_unlock;
1147  
1148         afinfo = nf_get_afinfo(pf);
1149 @@ -145,6 +180,10 @@
1150                 .indev  = indev,
1151                 .outdev = outdev,
1152                 .okfn   = okfn,
1153 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1154 +               .next_outfn = qh ? qh->outfn : NULL,
1155 +               .next_queuenum = queuenum,
1156 +#endif
1157         };
1158  
1159         /* If it's going away, ignore hook. */
1160 @@ -170,8 +209,19 @@
1161         }
1162  #endif
1163         afinfo->saveroute(skb, entry);
1164 +
1165 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1166 +       if (qih) {
1167 +               status = qih->outfn(entry, queuenum);
1168 +               goto imq_skip_queue;
1169 +       }
1170 +#endif
1171 +
1172         status = qh->outfn(entry, queuenum);
1173  
1174 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1175 +imq_skip_queue:
1176 +#endif
1177         rcu_read_unlock();
1178  
1179         if (status < 0) {
1180 --- /dev/null
1181 +++ b/net/netfilter/xt_IMQ.c
1182 @@ -0,0 +1,81 @@
1183 +/*
1184 + * This target marks packets to be enqueued to an imq device
1185 + */
1186 +#include <linux/module.h>
1187 +#include <linux/skbuff.h>
1188 +#include <linux/netfilter/x_tables.h>
1189 +#include <linux/netfilter/xt_IMQ.h>
1190 +#include <linux/imq.h>
1191 +
1192 +static unsigned int imq_target(struct sk_buff *pskb,
1193 +                              const struct net_device *in,
1194 +                              const struct net_device *out,
1195 +                              unsigned int hooknum,
1196 +                              const struct xt_target *target,
1197 +                              const void *targinfo)
1198 +{
1199 +       const struct xt_imq_info *mr = targinfo;
1200 +
1201 +       pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1202 +
1203 +       return XT_CONTINUE;
1204 +}
1205 +
1206 +static bool imq_checkentry(const char *tablename,
1207 +                         const void *entry,
1208 +                         const struct xt_target *target,
1209 +                         void *targinfo,
1210 +                         unsigned int hook_mask)
1211 +{
1212 +       struct xt_imq_info *mr = targinfo;
1213 +
1214 +       if (mr->todev > IMQ_MAX_DEVS - 1) {
1215 +               printk(KERN_WARNING
1216 +                      "IMQ: invalid device specified, highest is %u\n",
1217 +                      IMQ_MAX_DEVS - 1);
1218 +               return 0;
1219 +       }
1220 +
1221 +       return 1;
1222 +}
1223 +
1224 +static struct xt_target xt_imq_reg[] __read_mostly = {
1225 +       {
1226 +               .name           = "IMQ",
1227 +               .family         = AF_INET,
1228 +               .target         = imq_target,
1229 +               .targetsize     = sizeof(struct xt_imq_info),
1230 +               .table          = "mangle",
1231 +               .checkentry     = imq_checkentry,
1232 +               .me             = THIS_MODULE
1233 +       },
1234 +       {
1235 +               .name           = "IMQ",
1236 +               .family         = AF_INET6,
1237 +               .target         = imq_target,
1238 +               .targetsize     = sizeof(struct xt_imq_info),
1239 +               .table          = "mangle",
1240 +               .checkentry     = imq_checkentry,
1241 +               .me             = THIS_MODULE
1242 +       },
1243 +};
1244 +
1245 +static int __init imq_init(void)
1246 +{
1247 +       return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1248 +}
1249 +
1250 +static void __exit imq_fini(void)
1251 +{
1252 +       xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1253 +}
1254 +
1255 +module_init(imq_init);
1256 +module_exit(imq_fini);
1257 +
1258 +MODULE_AUTHOR("http://www.linuximq.net");
1259 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1260 +MODULE_LICENSE("GPL");
1261 +MODULE_ALIAS("ipt_IMQ");
1262 +MODULE_ALIAS("ip6t_IMQ");
1263 +