[kernel] revert 15922 - add back 2.6.29 kernel support
[openwrt.git] / target / linux / generic-2.6 / patches-2.6.29 / 130-netfilter_ipset.patch
1 --- a/include/linux/netfilter_ipv4/Kbuild
2 +++ b/include/linux/netfilter_ipv4/Kbuild
3 @@ -45,3 +45,14 @@ header-y += ipt_ttl.h
4  
5  unifdef-y += ip_queue.h
6  unifdef-y += ip_tables.h
7 +
8 +unifdef-y += ip_set.h
9 +header-y  += ip_set_iphash.h
10 +header-y  += ip_set_ipmap.h
11 +header-y  += ip_set_ipporthash.h
12 +unifdef-y += ip_set_iptree.h
13 +unifdef-y += ip_set_iptreemap.h
14 +header-y  += ip_set_jhash.h
15 +header-y  += ip_set_macipmap.h
16 +unifdef-y += ip_set_nethash.h
17 +header-y  += ip_set_portmap.h
18 --- /dev/null
19 +++ b/include/linux/netfilter_ipv4/ip_set.h
20 @@ -0,0 +1,498 @@
21 +#ifndef _IP_SET_H
22 +#define _IP_SET_H
23 +
24 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
25 + *                         Patrick Schaaf <bof@bof.de>
26 + *                         Martin Josefsson <gandalf@wlug.westbo.se>
27 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
28 + *
29 + * This program is free software; you can redistribute it and/or modify
30 + * it under the terms of the GNU General Public License version 2 as
31 + * published by the Free Software Foundation.
32 + */
33 +
34 +#if 0
35 +#define IP_SET_DEBUG
36 +#endif
37 +
38 +/*
39 + * A sockopt of such quality has hardly ever been seen before on the open
40 + * market!  This little beauty, hardly ever used: above 64, so it's
41 + * traditionally used for firewalling, not touched (even once!) by the
42 + * 2.0, 2.2 and 2.4 kernels!
43 + *
44 + * Comes with its own certificate of authenticity, valid anywhere in the
45 + * Free world!
46 + *
47 + * Rusty, 19.4.2000
48 + */
49 +#define SO_IP_SET              83
50 +
51 +/*
52 + * Heavily modify by Joakim Axelsson 08.03.2002
53 + * - Made it more modulebased
54 + *
55 + * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
56 + * - bindings added
57 + * - in order to "deal with" backward compatibility, renamed to ipset
58 + */
59 +
60 +/*
61 + * Used so that the kernel module and ipset-binary can match their versions
62 + */
63 +#define IP_SET_PROTOCOL_VERSION 2
64 +
65 +#define IP_SET_MAXNAMELEN 32   /* set names and set typenames */
66 +
67 +/* Lets work with our own typedef for representing an IP address.
68 + * We hope to make the code more portable, possibly to IPv6...
69 + *
70 + * The representation works in HOST byte order, because most set types
71 + * will perform arithmetic operations and compare operations.
72 + *
73 + * For now the type is an uint32_t.
74 + *
75 + * Make sure to ONLY use the functions when translating and parsing
76 + * in order to keep the host byte order and make it more portable:
77 + *  parse_ip()
78 + *  parse_mask()
79 + *  parse_ipandmask()
80 + *  ip_tostring()
81 + * (Joakim: where are they???)
82 + */
83 +
84 +typedef uint32_t ip_set_ip_t;
85 +
86 +/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
87 + * and IP_SET_INVALID_ID if you want to increase the max number of sets.
88 + */
89 +typedef uint16_t ip_set_id_t;
90 +
91 +#define IP_SET_INVALID_ID      65535
92 +
93 +/* How deep we follow bindings */
94 +#define IP_SET_MAX_BINDINGS    6
95 +
96 +/*
97 + * Option flags for kernel operations (ipt_set_info)
98 + */
99 +#define IPSET_SRC              0x01    /* Source match/add */
100 +#define IPSET_DST              0x02    /* Destination match/add */
101 +#define IPSET_MATCH_INV                0x04    /* Inverse matching */
102 +
103 +/*
104 + * Set features
105 + */
106 +#define IPSET_TYPE_IP          0x01    /* IP address type of set */
107 +#define IPSET_TYPE_PORT                0x02    /* Port type of set */
108 +#define IPSET_DATA_SINGLE      0x04    /* Single data storage */
109 +#define IPSET_DATA_DOUBLE      0x08    /* Double data storage */
110 +
111 +/* Reserved keywords */
112 +#define IPSET_TOKEN_DEFAULT    ":default:"
113 +#define IPSET_TOKEN_ALL                ":all:"
114 +
115 +/* SO_IP_SET operation constants, and their request struct types.
116 + *
117 + * Operation ids:
118 + *       0-99:  commands with version checking
119 + *     100-199: add/del/test/bind/unbind
120 + *     200-299: list, save, restore
121 + */
122 +
123 +/* Single shot operations:
124 + * version, create, destroy, flush, rename and swap
125 + *
126 + * Sets are identified by name.
127 + */
128 +
129 +#define IP_SET_REQ_STD         \
130 +       unsigned op;            \
131 +       unsigned version;       \
132 +       char name[IP_SET_MAXNAMELEN]
133 +
134 +#define IP_SET_OP_CREATE       0x00000001      /* Create a new (empty) set */
135 +struct ip_set_req_create {
136 +       IP_SET_REQ_STD;
137 +       char typename[IP_SET_MAXNAMELEN];
138 +};
139 +
140 +#define IP_SET_OP_DESTROY      0x00000002      /* Remove a (empty) set */
141 +struct ip_set_req_std {
142 +       IP_SET_REQ_STD;
143 +};
144 +
145 +#define IP_SET_OP_FLUSH                0x00000003      /* Remove all IPs in a set */
146 +/* Uses ip_set_req_std */
147 +
148 +#define IP_SET_OP_RENAME       0x00000004      /* Rename a set */
149 +/* Uses ip_set_req_create */
150 +
151 +#define IP_SET_OP_SWAP         0x00000005      /* Swap two sets */
152 +/* Uses ip_set_req_create */
153 +
154 +union ip_set_name_index {
155 +       char name[IP_SET_MAXNAMELEN];
156 +       ip_set_id_t index;
157 +};
158 +
159 +#define IP_SET_OP_GET_BYNAME   0x00000006      /* Get set index by name */
160 +struct ip_set_req_get_set {
161 +       unsigned op;
162 +       unsigned version;
163 +       union ip_set_name_index set;
164 +};
165 +
166 +#define IP_SET_OP_GET_BYINDEX  0x00000007      /* Get set name by index */
167 +/* Uses ip_set_req_get_set */
168 +
169 +#define IP_SET_OP_VERSION      0x00000100      /* Ask kernel version */
170 +struct ip_set_req_version {
171 +       unsigned op;
172 +       unsigned version;
173 +};
174 +
175 +/* Double shots operations:
176 + * add, del, test, bind and unbind.
177 + *
178 + * First we query the kernel to get the index and type of the target set,
179 + * then issue the command. Validity of IP is checked in kernel in order
180 + * to minimalize sockopt operations.
181 + */
182 +
183 +/* Get minimal set data for add/del/test/bind/unbind IP */
184 +#define IP_SET_OP_ADT_GET      0x00000010      /* Get set and type */
185 +struct ip_set_req_adt_get {
186 +       unsigned op;
187 +       unsigned version;
188 +       union ip_set_name_index set;
189 +       char typename[IP_SET_MAXNAMELEN];
190 +};
191 +
192 +#define IP_SET_REQ_BYINDEX     \
193 +       unsigned op;            \
194 +       ip_set_id_t index;
195 +
196 +struct ip_set_req_adt {
197 +       IP_SET_REQ_BYINDEX;
198 +};
199 +
200 +#define IP_SET_OP_ADD_IP       0x00000101      /* Add an IP to a set */
201 +/* Uses ip_set_req_adt, with type specific addage */
202 +
203 +#define IP_SET_OP_DEL_IP       0x00000102      /* Remove an IP from a set */
204 +/* Uses ip_set_req_adt, with type specific addage */
205 +
206 +#define IP_SET_OP_TEST_IP      0x00000103      /* Test an IP in a set */
207 +/* Uses ip_set_req_adt, with type specific addage */
208 +
209 +#define IP_SET_OP_BIND_SET     0x00000104      /* Bind an IP to a set */
210 +/* Uses ip_set_req_bind, with type specific addage */
211 +struct ip_set_req_bind {
212 +       IP_SET_REQ_BYINDEX;
213 +       char binding[IP_SET_MAXNAMELEN];
214 +};
215 +
216 +#define IP_SET_OP_UNBIND_SET   0x00000105      /* Unbind an IP from a set */
217 +/* Uses ip_set_req_bind, with type speficic addage
218 + * index = 0 means unbinding for all sets */
219 +
220 +#define IP_SET_OP_TEST_BIND_SET        0x00000106      /* Test binding an IP to a set */
221 +/* Uses ip_set_req_bind, with type specific addage */
222 +
223 +/* Multiple shots operations: list, save, restore.
224 + *
225 + * - check kernel version and query the max number of sets
226 + * - get the basic information on all sets
227 + *   and size required for the next step
228 + * - get actual set data: header, data, bindings
229 + */
230 +
231 +/* Get max_sets and the index of a queried set
232 + */
233 +#define IP_SET_OP_MAX_SETS     0x00000020
234 +struct ip_set_req_max_sets {
235 +       unsigned op;
236 +       unsigned version;
237 +       ip_set_id_t max_sets;           /* max_sets */
238 +       ip_set_id_t sets;               /* real number of sets */
239 +       union ip_set_name_index set;    /* index of set if name used */
240 +};
241 +
242 +/* Get the id and name of the sets plus size for next step */
243 +#define IP_SET_OP_LIST_SIZE    0x00000201
244 +#define IP_SET_OP_SAVE_SIZE    0x00000202
245 +struct ip_set_req_setnames {
246 +       unsigned op;
247 +       ip_set_id_t index;              /* set to list/save */
248 +       size_t size;                    /* size to get setdata/bindings */
249 +       /* followed by sets number of struct ip_set_name_list */
250 +};
251 +
252 +struct ip_set_name_list {
253 +       char name[IP_SET_MAXNAMELEN];
254 +       char typename[IP_SET_MAXNAMELEN];
255 +       ip_set_id_t index;
256 +       ip_set_id_t id;
257 +};
258 +
259 +/* The actual list operation */
260 +#define IP_SET_OP_LIST         0x00000203
261 +struct ip_set_req_list {
262 +       IP_SET_REQ_BYINDEX;
263 +       /* sets number of struct ip_set_list in reply */
264 +};
265 +
266 +struct ip_set_list {
267 +       ip_set_id_t index;
268 +       ip_set_id_t binding;
269 +       u_int32_t ref;
270 +       size_t header_size;     /* Set header data of header_size */
271 +       size_t members_size;    /* Set members data of members_size */
272 +       size_t bindings_size;   /* Set bindings data of bindings_size */
273 +};
274 +
275 +struct ip_set_hash_list {
276 +       ip_set_ip_t ip;
277 +       ip_set_id_t binding;
278 +};
279 +
280 +/* The save operation */
281 +#define IP_SET_OP_SAVE         0x00000204
282 +/* Uses ip_set_req_list, in the reply replaced by
283 + * sets number of struct ip_set_save plus a marker
284 + * ip_set_save followed by ip_set_hash_save structures.
285 + */
286 +struct ip_set_save {
287 +       ip_set_id_t index;
288 +       ip_set_id_t binding;
289 +       size_t header_size;     /* Set header data of header_size */
290 +       size_t members_size;    /* Set members data of members_size */
291 +};
292 +
293 +/* At restoring, ip == 0 means default binding for the given set: */
294 +struct ip_set_hash_save {
295 +       ip_set_ip_t ip;
296 +       ip_set_id_t id;
297 +       ip_set_id_t binding;
298 +};
299 +
300 +/* The restore operation */
301 +#define IP_SET_OP_RESTORE      0x00000205
302 +/* Uses ip_set_req_setnames followed by ip_set_restore structures
303 + * plus a marker ip_set_restore, followed by ip_set_hash_save
304 + * structures.
305 + */
306 +struct ip_set_restore {
307 +       char name[IP_SET_MAXNAMELEN];
308 +       char typename[IP_SET_MAXNAMELEN];
309 +       ip_set_id_t index;
310 +       size_t header_size;     /* Create data of header_size */
311 +       size_t members_size;    /* Set members data of members_size */
312 +};
313 +
314 +static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
315 +{
316 +       return 4 * ((((b - a + 8) / 8) + 3) / 4);
317 +}
318 +
319 +#ifdef __KERNEL__
320 +
321 +#define ip_set_printk(format, args...)                         \
322 +       do {                                                    \
323 +               printk("%s: %s: ", __FILE__, __FUNCTION__);     \
324 +               printk(format "\n" , ## args);                  \
325 +       } while (0)
326 +
327 +#if defined(IP_SET_DEBUG)
328 +#define DP(format, args...)                                    \
329 +       do {                                                    \
330 +               printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
331 +               printk(format "\n" , ## args);                  \
332 +       } while (0)
333 +#define IP_SET_ASSERT(x)                                       \
334 +       do {                                                    \
335 +               if (!(x))                                       \
336 +                       printk("IP_SET_ASSERT: %s:%i(%s)\n",    \
337 +                               __FILE__, __LINE__, __FUNCTION__); \
338 +       } while (0)
339 +#else
340 +#define DP(format, args...)
341 +#define IP_SET_ASSERT(x)
342 +#endif
343 +
344 +struct ip_set;
345 +
346 +/*
347 + * The ip_set_type definition - one per set type, e.g. "ipmap".
348 + *
349 + * Each individual set has a pointer, set->type, going to one
350 + * of these structures. Function pointers inside the structure implement
351 + * the real behaviour of the sets.
352 + *
353 + * If not mentioned differently, the implementation behind the function
354 + * pointers of a set_type, is expected to return 0 if ok, and a negative
355 + * errno (e.g. -EINVAL) on error.
356 + */
357 +struct ip_set_type {
358 +       struct list_head list;  /* next in list of set types */
359 +
360 +       /* test for IP in set (kernel: iptables -m set src|dst)
361 +        * return 0 if not in set, 1 if in set.
362 +        */
363 +       int (*testip_kernel) (struct ip_set *set,
364 +                             const struct sk_buff * skb,
365 +                             ip_set_ip_t *ip,
366 +                             const u_int32_t *flags,
367 +                             unsigned char index);
368 +
369 +       /* test for IP in set (userspace: ipset -T set IP)
370 +        * return 0 if not in set, 1 if in set.
371 +        */
372 +       int (*testip) (struct ip_set *set,
373 +                      const void *data, size_t size,
374 +                      ip_set_ip_t *ip);
375 +
376 +       /*
377 +        * Size of the data structure passed by when
378 +        * adding/deletin/testing an entry.
379 +        */
380 +       size_t reqsize;
381 +
382 +       /* Add IP into set (userspace: ipset -A set IP)
383 +        * Return -EEXIST if the address is already in the set,
384 +        * and -ERANGE if the address lies outside the set bounds.
385 +        * If the address was not already in the set, 0 is returned.
386 +        */
387 +       int (*addip) (struct ip_set *set,
388 +                     const void *data, size_t size,
389 +                     ip_set_ip_t *ip);
390 +
391 +       /* Add IP into set (kernel: iptables ... -j SET set src|dst)
392 +        * Return -EEXIST if the address is already in the set,
393 +        * and -ERANGE if the address lies outside the set bounds.
394 +        * If the address was not already in the set, 0 is returned.
395 +        */
396 +       int (*addip_kernel) (struct ip_set *set,
397 +                            const struct sk_buff * skb,
398 +                            ip_set_ip_t *ip,
399 +                            const u_int32_t *flags,
400 +                            unsigned char index);
401 +
402 +       /* remove IP from set (userspace: ipset -D set --entry x)
403 +        * Return -EEXIST if the address is NOT in the set,
404 +        * and -ERANGE if the address lies outside the set bounds.
405 +        * If the address really was in the set, 0 is returned.
406 +        */
407 +       int (*delip) (struct ip_set *set,
408 +                     const void *data, size_t size,
409 +                     ip_set_ip_t *ip);
410 +
411 +       /* remove IP from set (kernel: iptables ... -j SET --entry x)
412 +        * Return -EEXIST if the address is NOT in the set,
413 +        * and -ERANGE if the address lies outside the set bounds.
414 +        * If the address really was in the set, 0 is returned.
415 +        */
416 +       int (*delip_kernel) (struct ip_set *set,
417 +                            const struct sk_buff * skb,
418 +                            ip_set_ip_t *ip,
419 +                            const u_int32_t *flags,
420 +                            unsigned char index);
421 +
422 +       /* new set creation - allocated type specific items
423 +        */
424 +       int (*create) (struct ip_set *set,
425 +                      const void *data, size_t size);
426 +
427 +       /* retry the operation after successfully tweaking the set
428 +        */
429 +       int (*retry) (struct ip_set *set);
430 +
431 +       /* set destruction - free type specific items
432 +        * There is no return value.
433 +        * Can be called only when child sets are destroyed.
434 +        */
435 +       void (*destroy) (struct ip_set *set);
436 +
437 +       /* set flushing - reset all bits in the set, or something similar.
438 +        * There is no return value.
439 +        */
440 +       void (*flush) (struct ip_set *set);
441 +
442 +       /* Listing: size needed for header
443 +        */
444 +       size_t header_size;
445 +
446 +       /* Listing: Get the header
447 +        *
448 +        * Fill in the information in "data".
449 +        * This function is always run after list_header_size() under a
450 +        * writelock on the set. Therefor is the length of "data" always
451 +        * correct.
452 +        */
453 +       void (*list_header) (const struct ip_set *set,
454 +                            void *data);
455 +
456 +       /* Listing: Get the size for the set members
457 +        */
458 +       int (*list_members_size) (const struct ip_set *set);
459 +
460 +       /* Listing: Get the set members
461 +        *
462 +        * Fill in the information in "data".
463 +        * This function is always run after list_member_size() under a
464 +        * writelock on the set. Therefor is the length of "data" always
465 +        * correct.
466 +        */
467 +       void (*list_members) (const struct ip_set *set,
468 +                             void *data);
469 +
470 +       char typename[IP_SET_MAXNAMELEN];
471 +       unsigned char features;
472 +       int protocol_version;
473 +
474 +       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
475 +       struct module *me;
476 +};
477 +
478 +extern int ip_set_register_set_type(struct ip_set_type *set_type);
479 +extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
480 +
481 +/* A generic ipset */
482 +struct ip_set {
483 +       char name[IP_SET_MAXNAMELEN];   /* the name of the set */
484 +       rwlock_t lock;                  /* lock for concurrency control */
485 +       ip_set_id_t id;                 /* set id for swapping */
486 +       ip_set_id_t binding;            /* default binding for the set */
487 +       atomic_t ref;                   /* in kernel and in hash references */
488 +       struct ip_set_type *type;       /* the set types */
489 +       void *data;                     /* pooltype specific data */
490 +};
491 +
492 +/* Structure to bind set elements to sets */
493 +struct ip_set_hash {
494 +       struct list_head list;          /* list of clashing entries in hash */
495 +       ip_set_ip_t ip;                 /* ip from set */
496 +       ip_set_id_t id;                 /* set id */
497 +       ip_set_id_t binding;            /* set we bind the element to */
498 +};
499 +
500 +/* register and unregister set references */
501 +extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
502 +extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
503 +extern void ip_set_put(ip_set_id_t id);
504 +
505 +/* API for iptables set match, and SET target */
506 +extern void ip_set_addip_kernel(ip_set_id_t id,
507 +                               const struct sk_buff *skb,
508 +                               const u_int32_t *flags);
509 +extern void ip_set_delip_kernel(ip_set_id_t id,
510 +                               const struct sk_buff *skb,
511 +                               const u_int32_t *flags);
512 +extern int ip_set_testip_kernel(ip_set_id_t id,
513 +                               const struct sk_buff *skb,
514 +                               const u_int32_t *flags);
515 +
516 +#endif                         /* __KERNEL__ */
517 +
518 +#endif /*_IP_SET_H*/
519 --- /dev/null
520 +++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
521 @@ -0,0 +1,30 @@
522 +#ifndef __IP_SET_IPHASH_H
523 +#define __IP_SET_IPHASH_H
524 +
525 +#include <linux/netfilter_ipv4/ip_set.h>
526 +
527 +#define SETTYPE_NAME "iphash"
528 +#define MAX_RANGE 0x0000FFFF
529 +
530 +struct ip_set_iphash {
531 +       ip_set_ip_t *members;           /* the iphash proper */
532 +       uint32_t elements;              /* number of elements */
533 +       uint32_t hashsize;              /* hash size */
534 +       uint16_t probes;                /* max number of probes  */
535 +       uint16_t resize;                /* resize factor in percent */
536 +       ip_set_ip_t netmask;            /* netmask */
537 +       void *initval[0];               /* initvals for jhash_1word */
538 +};
539 +
540 +struct ip_set_req_iphash_create {
541 +       uint32_t hashsize;
542 +       uint16_t probes;
543 +       uint16_t resize;
544 +       ip_set_ip_t netmask;
545 +};
546 +
547 +struct ip_set_req_iphash {
548 +       ip_set_ip_t ip;
549 +};
550 +
551 +#endif /* __IP_SET_IPHASH_H */
552 --- /dev/null
553 +++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
554 @@ -0,0 +1,56 @@
555 +#ifndef __IP_SET_IPMAP_H
556 +#define __IP_SET_IPMAP_H
557 +
558 +#include <linux/netfilter_ipv4/ip_set.h>
559 +
560 +#define SETTYPE_NAME "ipmap"
561 +#define MAX_RANGE 0x0000FFFF
562 +
563 +struct ip_set_ipmap {
564 +       void *members;                  /* the ipmap proper */
565 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
566 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
567 +       ip_set_ip_t netmask;            /* subnet netmask */
568 +       ip_set_ip_t sizeid;             /* size of set in IPs */
569 +       ip_set_ip_t hosts;              /* number of hosts in a subnet */
570 +};
571 +
572 +struct ip_set_req_ipmap_create {
573 +       ip_set_ip_t from;
574 +       ip_set_ip_t to;
575 +       ip_set_ip_t netmask;
576 +};
577 +
578 +struct ip_set_req_ipmap {
579 +       ip_set_ip_t ip;
580 +};
581 +
582 +unsigned int
583 +mask_to_bits(ip_set_ip_t mask)
584 +{
585 +       unsigned int bits = 32;
586 +       ip_set_ip_t maskaddr;
587 +
588 +       if (mask == 0xFFFFFFFF)
589 +               return bits;
590 +
591 +       maskaddr = 0xFFFFFFFE;
592 +       while (--bits >= 0 && maskaddr != mask)
593 +               maskaddr <<= 1;
594 +
595 +       return bits;
596 +}
597 +
598 +ip_set_ip_t
599 +range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
600 +{
601 +       ip_set_ip_t mask = 0xFFFFFFFE;
602 +
603 +       *bits = 32;
604 +       while (--(*bits) >= 0 && mask && (to & mask) != from)
605 +               mask <<= 1;
606 +
607 +       return mask;
608 +}
609 +
610 +#endif /* __IP_SET_IPMAP_H */
611 --- /dev/null
612 +++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
613 @@ -0,0 +1,34 @@
614 +#ifndef __IP_SET_IPPORTHASH_H
615 +#define __IP_SET_IPPORTHASH_H
616 +
617 +#include <linux/netfilter_ipv4/ip_set.h>
618 +
619 +#define SETTYPE_NAME "ipporthash"
620 +#define MAX_RANGE 0x0000FFFF
621 +#define INVALID_PORT   (MAX_RANGE + 1)
622 +
623 +struct ip_set_ipporthash {
624 +       ip_set_ip_t *members;           /* the ipporthash proper */
625 +       uint32_t elements;              /* number of elements */
626 +       uint32_t hashsize;              /* hash size */
627 +       uint16_t probes;                /* max number of probes  */
628 +       uint16_t resize;                /* resize factor in percent */
629 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
630 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
631 +       void *initval[0];               /* initvals for jhash_1word */
632 +};
633 +
634 +struct ip_set_req_ipporthash_create {
635 +       uint32_t hashsize;
636 +       uint16_t probes;
637 +       uint16_t resize;
638 +       ip_set_ip_t from;
639 +       ip_set_ip_t to;
640 +};
641 +
642 +struct ip_set_req_ipporthash {
643 +       ip_set_ip_t ip;
644 +       ip_set_ip_t port;
645 +};
646 +
647 +#endif /* __IP_SET_IPPORTHASH_H */
648 --- /dev/null
649 +++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
650 @@ -0,0 +1,40 @@
651 +#ifndef __IP_SET_IPTREE_H
652 +#define __IP_SET_IPTREE_H
653 +
654 +#include <linux/netfilter_ipv4/ip_set.h>
655 +
656 +#define SETTYPE_NAME "iptree"
657 +#define MAX_RANGE 0x0000FFFF
658 +
659 +struct ip_set_iptreed {
660 +       unsigned long expires[256];             /* x.x.x.ADDR */
661 +};
662 +
663 +struct ip_set_iptreec {
664 +       struct ip_set_iptreed *tree[256];       /* x.x.ADDR.* */
665 +};
666 +
667 +struct ip_set_iptreeb {
668 +       struct ip_set_iptreec *tree[256];       /* x.ADDR.*.* */
669 +};
670 +
671 +struct ip_set_iptree {
672 +       unsigned int timeout;
673 +       unsigned int gc_interval;
674 +#ifdef __KERNEL__
675 +       uint32_t elements;              /* number of elements */
676 +       struct timer_list gc;
677 +       struct ip_set_iptreeb *tree[256];       /* ADDR.*.*.* */
678 +#endif
679 +};
680 +
681 +struct ip_set_req_iptree_create {
682 +       unsigned int timeout;
683 +};
684 +
685 +struct ip_set_req_iptree {
686 +       ip_set_ip_t ip;
687 +       unsigned int timeout;
688 +};
689 +
690 +#endif /* __IP_SET_IPTREE_H */
691 --- /dev/null
692 +++ b/include/linux/netfilter_ipv4/ip_set_iptreemap.h
693 @@ -0,0 +1,40 @@
694 +#ifndef __IP_SET_IPTREEMAP_H
695 +#define __IP_SET_IPTREEMAP_H
696 +
697 +#include <linux/netfilter_ipv4/ip_set.h>
698 +
699 +#define SETTYPE_NAME "iptreemap"
700 +
701 +#ifdef __KERNEL__
702 +struct ip_set_iptreemap_d {
703 +       unsigned char bitmap[32]; /* x.x.x.y */
704 +};
705 +
706 +struct ip_set_iptreemap_c {
707 +       struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
708 +};
709 +
710 +struct ip_set_iptreemap_b {
711 +       struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
712 +       unsigned char dirty[32];
713 +};
714 +#endif
715 +
716 +struct ip_set_iptreemap {
717 +       unsigned int gc_interval;
718 +#ifdef __KERNEL__
719 +       struct timer_list gc;
720 +       struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
721 +#endif
722 +};
723 +
724 +struct ip_set_req_iptreemap_create {
725 +       unsigned int gc_interval;
726 +};
727 +
728 +struct ip_set_req_iptreemap {
729 +       ip_set_ip_t start;
730 +       ip_set_ip_t end;
731 +};
732 +
733 +#endif /* __IP_SET_IPTREEMAP_H */
734 --- /dev/null
735 +++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
736 @@ -0,0 +1,148 @@
737 +#ifndef _LINUX_IPSET_JHASH_H
738 +#define _LINUX_IPSET_JHASH_H
739 +
740 +/* This is a copy of linux/jhash.h but the types u32/u8 are changed
741 + * to __u32/__u8 so that the header file can be included into
742 + * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
743 + */
744 +
745 +/* jhash.h: Jenkins hash support.
746 + *
747 + * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
748 + *
749 + * http://burtleburtle.net/bob/hash/
750 + *
751 + * These are the credits from Bob's sources:
752 + *
753 + * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
754 + * hash(), hash2(), hash3, and mix() are externally useful functions.
755 + * Routines to test the hash are included if SELF_TEST is defined.
756 + * You can use this free for any purpose.  It has no warranty.
757 + *
758 + * Copyright (C) 2003 David S. Miller (davem@redhat.com)
759 + *
760 + * I've modified Bob's hash to be useful in the Linux kernel, and
761 + * any bugs present are surely my fault.  -DaveM
762 + */
763 +
764 +/* NOTE: Arguments are modified. */
765 +#define __jhash_mix(a, b, c) \
766 +{ \
767 +  a -= b; a -= c; a ^= (c>>13); \
768 +  b -= c; b -= a; b ^= (a<<8); \
769 +  c -= a; c -= b; c ^= (b>>13); \
770 +  a -= b; a -= c; a ^= (c>>12);  \
771 +  b -= c; b -= a; b ^= (a<<16); \
772 +  c -= a; c -= b; c ^= (b>>5); \
773 +  a -= b; a -= c; a ^= (c>>3);  \
774 +  b -= c; b -= a; b ^= (a<<10); \
775 +  c -= a; c -= b; c ^= (b>>15); \
776 +}
777 +
778 +/* The golden ration: an arbitrary value */
779 +#define JHASH_GOLDEN_RATIO     0x9e3779b9
780 +
781 +/* The most generic version, hashes an arbitrary sequence
782 + * of bytes.  No alignment or length assumptions are made about
783 + * the input key.
784 + */
785 +static inline __u32 jhash(void *key, __u32 length, __u32 initval)
786 +{
787 +       __u32 a, b, c, len;
788 +       __u8 *k = key;
789 +
790 +       len = length;
791 +       a = b = JHASH_GOLDEN_RATIO;
792 +       c = initval;
793 +
794 +       while (len >= 12) {
795 +               a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
796 +               b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
797 +               c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
798 +
799 +               __jhash_mix(a,b,c);
800 +
801 +               k += 12;
802 +               len -= 12;
803 +       }
804 +
805 +       c += length;
806 +       switch (len) {
807 +       case 11: c += ((__u32)k[10]<<24);
808 +       case 10: c += ((__u32)k[9]<<16);
809 +       case 9 : c += ((__u32)k[8]<<8);
810 +       case 8 : b += ((__u32)k[7]<<24);
811 +       case 7 : b += ((__u32)k[6]<<16);
812 +       case 6 : b += ((__u32)k[5]<<8);
813 +       case 5 : b += k[4];
814 +       case 4 : a += ((__u32)k[3]<<24);
815 +       case 3 : a += ((__u32)k[2]<<16);
816 +       case 2 : a += ((__u32)k[1]<<8);
817 +       case 1 : a += k[0];
818 +       };
819 +
820 +       __jhash_mix(a,b,c);
821 +
822 +       return c;
823 +}
824 +
825 +/* A special optimized version that handles 1 or more of __u32s.
826 + * The length parameter here is the number of __u32s in the key.
827 + */
828 +static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
829 +{
830 +       __u32 a, b, c, len;
831 +
832 +       a = b = JHASH_GOLDEN_RATIO;
833 +       c = initval;
834 +       len = length;
835 +
836 +       while (len >= 3) {
837 +               a += k[0];
838 +               b += k[1];
839 +               c += k[2];
840 +               __jhash_mix(a, b, c);
841 +               k += 3; len -= 3;
842 +       }
843 +
844 +       c += length * 4;
845 +
846 +       switch (len) {
847 +       case 2 : b += k[1];
848 +       case 1 : a += k[0];
849 +       };
850 +
851 +       __jhash_mix(a,b,c);
852 +
853 +       return c;
854 +}
855 +
856 +
857 +/* A special ultra-optimized versions that knows they are hashing exactly
858 + * 3, 2 or 1 word(s).
859 + *
860 + * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
861 + *       done at the end is not done here.
862 + */
863 +static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
864 +{
865 +       a += JHASH_GOLDEN_RATIO;
866 +       b += JHASH_GOLDEN_RATIO;
867 +       c += initval;
868 +
869 +       __jhash_mix(a, b, c);
870 +
871 +       return c;
872 +}
873 +
874 +static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
875 +{
876 +       return jhash_3words(a, b, 0, initval);
877 +}
878 +
879 +static inline __u32 jhash_1word(__u32 a, __u32 initval)
880 +{
881 +       return jhash_3words(a, 0, 0, initval);
882 +}
883 +
884 +#endif /* _LINUX_IPSET_JHASH_H */
885 --- /dev/null
886 +++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
887 @@ -0,0 +1,38 @@
888 +#ifndef __IP_SET_MACIPMAP_H
889 +#define __IP_SET_MACIPMAP_H
890 +
891 +#include <linux/netfilter_ipv4/ip_set.h>
892 +
893 +#define SETTYPE_NAME "macipmap"
894 +#define MAX_RANGE 0x0000FFFF
895 +
896 +/* general flags */
897 +#define IPSET_MACIP_MATCHUNSET 1
898 +
899 +/* per ip flags */
900 +#define IPSET_MACIP_ISSET      1
901 +
902 +struct ip_set_macipmap {
903 +       void *members;                  /* the macipmap proper */
904 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
905 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
906 +       u_int32_t flags;
907 +};
908 +
909 +struct ip_set_req_macipmap_create {
910 +       ip_set_ip_t from;
911 +       ip_set_ip_t to;
912 +       u_int32_t flags;
913 +};
914 +
915 +struct ip_set_req_macipmap {
916 +       ip_set_ip_t ip;
917 +       unsigned char ethernet[ETH_ALEN];
918 +};
919 +
920 +struct ip_set_macip {
921 +       unsigned short flags;
922 +       unsigned char ethernet[ETH_ALEN];
923 +};
924 +
925 +#endif /* __IP_SET_MACIPMAP_H */
926 --- /dev/null
927 +++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
928 @@ -0,0 +1,116 @@
929 +#ifndef _IP_SET_MALLOC_H
930 +#define _IP_SET_MALLOC_H
931 +
932 +#ifdef __KERNEL__
933 +
934 +/* Memory allocation and deallocation */
935 +static size_t max_malloc_size = 0;
936 +
937 +static inline void init_max_malloc_size(void)
938 +{
939 +#define CACHE(x) max_malloc_size = x;
940 +#include <linux/kmalloc_sizes.h>
941 +#undef CACHE
942 +}
943 +
944 +static inline void * ip_set_malloc(size_t bytes)
945 +{
946 +       if (bytes > max_malloc_size)
947 +               return vmalloc(bytes);
948 +       else
949 +               return kmalloc(bytes, GFP_KERNEL);
950 +}
951 +
952 +static inline void ip_set_free(void * data, size_t bytes)
953 +{
954 +       if (bytes > max_malloc_size)
955 +               vfree(data);
956 +       else
957 +               kfree(data);
958 +}
959 +
960 +struct harray {
961 +       size_t max_elements;
962 +       void *arrays[0];
963 +};
964 +
965 +static inline void *
966 +harray_malloc(size_t hashsize, size_t typesize, int flags)
967 +{
968 +       struct harray *harray;
969 +       size_t max_elements, size, i, j;
970 +
971 +       if (!max_malloc_size)
972 +               init_max_malloc_size();
973 +
974 +       if (typesize > max_malloc_size)
975 +               return NULL;
976 +
977 +       max_elements = max_malloc_size/typesize;
978 +       size = hashsize/max_elements;
979 +       if (hashsize % max_elements)
980 +               size++;
981 +
982 +       /* Last pointer signals end of arrays */
983 +       harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
984 +                        flags);
985 +
986 +       if (!harray)
987 +               return NULL;
988 +
989 +       for (i = 0; i < size - 1; i++) {
990 +               harray->arrays[i] = kmalloc(max_elements * typesize, flags);
991 +               if (!harray->arrays[i])
992 +                       goto undo;
993 +               memset(harray->arrays[i], 0, max_elements * typesize);
994 +       }
995 +       harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
996 +                                   flags);
997 +       if (!harray->arrays[i])
998 +               goto undo;
999 +       memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
1000 +
1001 +       harray->max_elements = max_elements;
1002 +       harray->arrays[size] = NULL;
1003 +
1004 +       return (void *)harray;
1005 +
1006 +    undo:
1007 +       for (j = 0; j < i; j++) {
1008 +               kfree(harray->arrays[j]);
1009 +       }
1010 +       kfree(harray);
1011 +       return NULL;
1012 +}
1013 +
1014 +static inline void harray_free(void *h)
1015 +{
1016 +       struct harray *harray = (struct harray *) h;
1017 +       size_t i;
1018 +
1019 +       for (i = 0; harray->arrays[i] != NULL; i++)
1020 +               kfree(harray->arrays[i]);
1021 +       kfree(harray);
1022 +}
1023 +
1024 +static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
1025 +{
1026 +       struct harray *harray = (struct harray *) h;
1027 +       size_t i;
1028 +
1029 +       for (i = 0; harray->arrays[i+1] != NULL; i++)
1030 +               memset(harray->arrays[i], 0, harray->max_elements * typesize);
1031 +       memset(harray->arrays[i], 0,
1032 +              (hashsize - i * harray->max_elements) * typesize);
1033 +}
1034 +
1035 +#define HARRAY_ELEM(h, type, which)                            \
1036 +({                                                             \
1037 +       struct harray *__h = (struct harray *)(h);              \
1038 +       ((type)((__h)->arrays[(which)/(__h)->max_elements])     \
1039 +               + (which)%(__h)->max_elements);                 \
1040 +})
1041 +
1042 +#endif                         /* __KERNEL__ */
1043 +
1044 +#endif /*_IP_SET_MALLOC_H*/
1045 --- /dev/null
1046 +++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
1047 @@ -0,0 +1,55 @@
1048 +#ifndef __IP_SET_NETHASH_H
1049 +#define __IP_SET_NETHASH_H
1050 +
1051 +#include <linux/netfilter_ipv4/ip_set.h>
1052 +
1053 +#define SETTYPE_NAME "nethash"
1054 +#define MAX_RANGE 0x0000FFFF
1055 +
1056 +struct ip_set_nethash {
1057 +       ip_set_ip_t *members;           /* the nethash proper */
1058 +       uint32_t elements;              /* number of elements */
1059 +       uint32_t hashsize;              /* hash size */
1060 +       uint16_t probes;                /* max number of probes  */
1061 +       uint16_t resize;                /* resize factor in percent */
1062 +       unsigned char cidr[30];         /* CIDR sizes */
1063 +       void *initval[0];               /* initvals for jhash_1word */
1064 +};
1065 +
1066 +struct ip_set_req_nethash_create {
1067 +       uint32_t hashsize;
1068 +       uint16_t probes;
1069 +       uint16_t resize;
1070 +};
1071 +
1072 +struct ip_set_req_nethash {
1073 +       ip_set_ip_t ip;
1074 +       unsigned char cidr;
1075 +};
1076 +
1077 +static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
1078 +
1079 +static inline ip_set_ip_t
1080 +pack(ip_set_ip_t ip, unsigned char cidr)
1081 +{
1082 +       ip_set_ip_t addr, *paddr = &addr;
1083 +       unsigned char n, t, *a;
1084 +
1085 +       addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
1086 +#ifdef __KERNEL__
1087 +       DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
1088 +#endif
1089 +       n = cidr / 8;
1090 +       t = cidr % 8;
1091 +       a = &((unsigned char *)paddr)[n];
1092 +       *a = *a /(1 << (8 - t)) + shifts[t];
1093 +#ifdef __KERNEL__
1094 +       DP("n: %u, t: %u, a: %u", n, t, *a);
1095 +       DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
1096 +          HIPQUAD(ip), cidr, NIPQUAD(addr));
1097 +#endif
1098 +
1099 +       return ntohl(addr);
1100 +}
1101 +
1102 +#endif /* __IP_SET_NETHASH_H */
1103 --- /dev/null
1104 +++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
1105 @@ -0,0 +1,25 @@
1106 +#ifndef __IP_SET_PORTMAP_H
1107 +#define __IP_SET_PORTMAP_H
1108 +
1109 +#include <linux/netfilter_ipv4/ip_set.h>
1110 +
1111 +#define SETTYPE_NAME   "portmap"
1112 +#define MAX_RANGE      0x0000FFFF
1113 +#define INVALID_PORT   (MAX_RANGE + 1)
1114 +
1115 +struct ip_set_portmap {
1116 +       void *members;                  /* the portmap proper */
1117 +       ip_set_ip_t first_port;         /* host byte order, included in range */
1118 +       ip_set_ip_t last_port;          /* host byte order, included in range */
1119 +};
1120 +
1121 +struct ip_set_req_portmap_create {
1122 +       ip_set_ip_t from;
1123 +       ip_set_ip_t to;
1124 +};
1125 +
1126 +struct ip_set_req_portmap {
1127 +       ip_set_ip_t port;
1128 +};
1129 +
1130 +#endif /* __IP_SET_PORTMAP_H */
1131 --- /dev/null
1132 +++ b/include/linux/netfilter_ipv4/ipt_set.h
1133 @@ -0,0 +1,21 @@
1134 +#ifndef _IPT_SET_H
1135 +#define _IPT_SET_H
1136 +
1137 +#include <linux/netfilter_ipv4/ip_set.h>
1138 +
1139 +struct ipt_set_info {
1140 +       ip_set_id_t index;
1141 +       u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
1142 +};
1143 +
1144 +/* match info */
1145 +struct ipt_set_info_match {
1146 +       struct ipt_set_info match_set;
1147 +};
1148 +
1149 +struct ipt_set_info_target {
1150 +       struct ipt_set_info add_set;
1151 +       struct ipt_set_info del_set;
1152 +};
1153 +
1154 +#endif /*_IPT_SET_H*/
1155 --- /dev/null
1156 +++ b/net/ipv4/netfilter/ip_set.c
1157 @@ -0,0 +1,2003 @@
1158 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
1159 + *                         Patrick Schaaf <bof@bof.de>
1160 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
1161 + *
1162 + * This program is free software; you can redistribute it and/or modify
1163 + * it under the terms of the GNU General Public License version 2 as
1164 + * published by the Free Software Foundation.
1165 + */
1166 +
1167 +/* Kernel module for IP set management */
1168 +
1169 +#include <linux/version.h>
1170 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1171 +#include <linux/config.h>
1172 +#endif
1173 +#include <linux/module.h>
1174 +#include <linux/moduleparam.h>
1175 +#include <linux/kmod.h>
1176 +#include <linux/ip.h>
1177 +#include <linux/skbuff.h>
1178 +#include <linux/random.h>
1179 +#include <linux/jhash.h>
1180 +#include <linux/netfilter_ipv4/ip_tables.h>
1181 +#include <linux/errno.h>
1182 +#include <linux/semaphore.h>
1183 +#include <asm/uaccess.h>
1184 +#include <asm/bitops.h>
1185 +#include <linux/spinlock.h>
1186 +#include <linux/vmalloc.h>
1187 +
1188 +#define ASSERT_READ_LOCK(x)
1189 +#define ASSERT_WRITE_LOCK(x)
1190 +#include <linux/netfilter_ipv4/ip_set.h>
1191 +
1192 +static struct list_head set_type_list;         /* all registered sets */
1193 +static struct ip_set **ip_set_list;            /* all individual sets */
1194 +static DEFINE_RWLOCK(ip_set_lock);             /* protects the lists and the hash */
1195 +static DECLARE_MUTEX(ip_set_app_mutex);                /* serializes user access */
1196 +static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
1197 +static ip_set_id_t ip_set_bindings_hash_size =  CONFIG_IP_NF_SET_HASHSIZE;
1198 +static struct list_head *ip_set_hash;          /* hash of bindings */
1199 +static unsigned int ip_set_hash_random;                /* random seed */
1200 +
1201 +/*
1202 + * Sets are identified either by the index in ip_set_list or by id.
1203 + * The id never changes and is used to find a key in the hash.
1204 + * The index may change by swapping and used at all other places
1205 + * (set/SET netfilter modules, binding value, etc.)
1206 + *
1207 + * Userspace requests are serialized by ip_set_mutex and sets can
1208 + * be deleted only from userspace. Therefore ip_set_list locking
1209 + * must obey the following rules:
1210 + *
1211 + * - kernel requests: read and write locking mandatory
1212 + * - user requests: read locking optional, write locking mandatory
1213 + */
1214 +
1215 +static inline void
1216 +__ip_set_get(ip_set_id_t index)
1217 +{
1218 +       atomic_inc(&ip_set_list[index]->ref);
1219 +}
1220 +
1221 +static inline void
1222 +__ip_set_put(ip_set_id_t index)
1223 +{
1224 +       atomic_dec(&ip_set_list[index]->ref);
1225 +}
1226 +
1227 +/*
1228 + * Binding routines
1229 + */
1230 +
1231 +static inline struct ip_set_hash *
1232 +__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
1233 +{
1234 +       struct ip_set_hash *set_hash;
1235 +
1236 +       list_for_each_entry(set_hash, &ip_set_hash[key], list)
1237 +               if (set_hash->id == id && set_hash->ip == ip)
1238 +                       return set_hash;
1239 +
1240 +       return NULL;
1241 +}
1242 +
1243 +static ip_set_id_t
1244 +ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
1245 +{
1246 +       u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1247 +                               % ip_set_bindings_hash_size;
1248 +       struct ip_set_hash *set_hash;
1249 +
1250 +       ASSERT_READ_LOCK(&ip_set_lock);
1251 +       IP_SET_ASSERT(ip_set_list[id]);
1252 +       DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1253 +
1254 +       set_hash = __ip_set_find(key, id, ip);
1255 +
1256 +       DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1257 +          HIPQUAD(ip),
1258 +          set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1259 +
1260 +       return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
1261 +}
1262 +
1263 +static inline void
1264 +__set_hash_del(struct ip_set_hash *set_hash)
1265 +{
1266 +       ASSERT_WRITE_LOCK(&ip_set_lock);
1267 +       IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1268 +
1269 +       __ip_set_put(set_hash->binding);
1270 +       list_del(&set_hash->list);
1271 +       kfree(set_hash);
1272 +}
1273 +
1274 +static int
1275 +ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
1276 +{
1277 +       u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1278 +                               % ip_set_bindings_hash_size;
1279 +       struct ip_set_hash *set_hash;
1280 +
1281 +       IP_SET_ASSERT(ip_set_list[id]);
1282 +       DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1283 +       write_lock_bh(&ip_set_lock);
1284 +       set_hash = __ip_set_find(key, id, ip);
1285 +       DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1286 +          HIPQUAD(ip),
1287 +          set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1288 +
1289 +       if (set_hash != NULL)
1290 +               __set_hash_del(set_hash);
1291 +       write_unlock_bh(&ip_set_lock);
1292 +       return 0;
1293 +}
1294 +
1295 +static int
1296 +ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
1297 +{
1298 +       u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1299 +                               % ip_set_bindings_hash_size;
1300 +       struct ip_set_hash *set_hash;
1301 +       int ret = 0;
1302 +
1303 +       IP_SET_ASSERT(ip_set_list[id]);
1304 +       IP_SET_ASSERT(ip_set_list[binding]);
1305 +       DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1306 +          HIPQUAD(ip), ip_set_list[binding]->name);
1307 +       write_lock_bh(&ip_set_lock);
1308 +       set_hash = __ip_set_find(key, id, ip);
1309 +       if (!set_hash) {
1310 +               set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
1311 +               if (!set_hash) {
1312 +                       ret = -ENOMEM;
1313 +                       goto unlock;
1314 +               }
1315 +               INIT_LIST_HEAD(&set_hash->list);
1316 +               set_hash->id = id;
1317 +               set_hash->ip = ip;
1318 +               list_add(&set_hash->list, &ip_set_hash[key]);
1319 +       } else {
1320 +               IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1321 +               DP("overwrite binding: %s",
1322 +                  ip_set_list[set_hash->binding]->name);
1323 +               __ip_set_put(set_hash->binding);
1324 +       }
1325 +       set_hash->binding = binding;
1326 +       __ip_set_get(set_hash->binding);
1327 +       DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
1328 +          key, id, ip_set_list[id]->name,
1329 +          HIPQUAD(ip), binding, ip_set_list[binding]->name);
1330 +    unlock:
1331 +       write_unlock_bh(&ip_set_lock);
1332 +       return ret;
1333 +}
1334 +
1335 +#define FOREACH_HASH_DO(fn, args...)                                           \
1336 +({                                                                             \
1337 +       ip_set_id_t __key;                                                      \
1338 +       struct ip_set_hash *__set_hash;                                         \
1339 +                                                                               \
1340 +       for (__key = 0; __key < ip_set_bindings_hash_size; __key++) {           \
1341 +               list_for_each_entry(__set_hash, &ip_set_hash[__key], list)      \
1342 +                       fn(__set_hash , ## args);                               \
1343 +       }                                                                       \
1344 +})
1345 +
1346 +#define FOREACH_HASH_RW_DO(fn, args...)                                                \
1347 +({                                                                             \
1348 +       ip_set_id_t __key;                                                      \
1349 +       struct ip_set_hash *__set_hash, *__n;                                   \
1350 +                                                                               \
1351 +       ASSERT_WRITE_LOCK(&ip_set_lock);                                        \
1352 +       for (__key = 0; __key < ip_set_bindings_hash_size; __key++) {           \
1353 +               list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
1354 +                       fn(__set_hash , ## args);                               \
1355 +       }                                                                       \
1356 +})
1357 +
1358 +/* Add, del and test set entries from kernel */
1359 +
1360 +#define follow_bindings(index, set, ip)                                        \
1361 +((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID     \
1362 + || (index = (set)->binding) != IP_SET_INVALID_ID)
1363 +
1364 +int
1365 +ip_set_testip_kernel(ip_set_id_t index,
1366 +                    const struct sk_buff *skb,
1367 +                    const u_int32_t *flags)
1368 +{
1369 +       struct ip_set *set;
1370 +       ip_set_ip_t ip;
1371 +       int res;
1372 +       unsigned char i = 0;
1373 +
1374 +       IP_SET_ASSERT(flags[i]);
1375 +       read_lock_bh(&ip_set_lock);
1376 +       do {
1377 +               set = ip_set_list[index];
1378 +               IP_SET_ASSERT(set);
1379 +               DP("set %s, index %u", set->name, index);
1380 +               read_lock_bh(&set->lock);
1381 +               res = set->type->testip_kernel(set, skb, &ip, flags, i++);
1382 +               read_unlock_bh(&set->lock);
1383 +               i += !!(set->type->features & IPSET_DATA_DOUBLE);
1384 +       } while (res > 0
1385 +                && flags[i]
1386 +                && follow_bindings(index, set, ip));
1387 +       read_unlock_bh(&ip_set_lock);
1388 +
1389 +       return res;
1390 +}
1391 +
1392 +void
1393 +ip_set_addip_kernel(ip_set_id_t index,
1394 +                   const struct sk_buff *skb,
1395 +                   const u_int32_t *flags)
1396 +{
1397 +       struct ip_set *set;
1398 +       ip_set_ip_t ip;
1399 +       int res;
1400 +       unsigned char i = 0;
1401 +
1402 +       IP_SET_ASSERT(flags[i]);
1403 +   retry:
1404 +       read_lock_bh(&ip_set_lock);
1405 +       do {
1406 +               set = ip_set_list[index];
1407 +               IP_SET_ASSERT(set);
1408 +               DP("set %s, index %u", set->name, index);
1409 +               write_lock_bh(&set->lock);
1410 +               res = set->type->addip_kernel(set, skb, &ip, flags, i++);
1411 +               write_unlock_bh(&set->lock);
1412 +               i += !!(set->type->features & IPSET_DATA_DOUBLE);
1413 +       } while ((res == 0 || res == -EEXIST)
1414 +                && flags[i]
1415 +                && follow_bindings(index, set, ip));
1416 +       read_unlock_bh(&ip_set_lock);
1417 +
1418 +       if (res == -EAGAIN
1419 +           && set->type->retry
1420 +           && (res = set->type->retry(set)) == 0)
1421 +               goto retry;
1422 +}
1423 +
1424 +void
1425 +ip_set_delip_kernel(ip_set_id_t index,
1426 +                   const struct sk_buff *skb,
1427 +                   const u_int32_t *flags)
1428 +{
1429 +       struct ip_set *set;
1430 +       ip_set_ip_t ip;
1431 +       int res;
1432 +       unsigned char i = 0;
1433 +
1434 +       IP_SET_ASSERT(flags[i]);
1435 +       read_lock_bh(&ip_set_lock);
1436 +       do {
1437 +               set = ip_set_list[index];
1438 +               IP_SET_ASSERT(set);
1439 +               DP("set %s, index %u", set->name, index);
1440 +               write_lock_bh(&set->lock);
1441 +               res = set->type->delip_kernel(set, skb, &ip, flags, i++);
1442 +               write_unlock_bh(&set->lock);
1443 +               i += !!(set->type->features & IPSET_DATA_DOUBLE);
1444 +       } while ((res == 0 || res == -EEXIST)
1445 +                && flags[i]
1446 +                && follow_bindings(index, set, ip));
1447 +       read_unlock_bh(&ip_set_lock);
1448 +}
1449 +
1450 +/* Register and deregister settype */
1451 +
1452 +static inline struct ip_set_type *
1453 +find_set_type(const char *name)
1454 +{
1455 +       struct ip_set_type *set_type;
1456 +
1457 +       list_for_each_entry(set_type, &set_type_list, list)
1458 +               if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
1459 +                       return set_type;
1460 +       return NULL;
1461 +}
1462 +
1463 +int
1464 +ip_set_register_set_type(struct ip_set_type *set_type)
1465 +{
1466 +       int ret = 0;
1467 +
1468 +       if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
1469 +               ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
1470 +                             set_type->typename,
1471 +                             set_type->protocol_version,
1472 +                             IP_SET_PROTOCOL_VERSION);
1473 +               return -EINVAL;
1474 +       }
1475 +
1476 +       write_lock_bh(&ip_set_lock);
1477 +       if (find_set_type(set_type->typename)) {
1478 +               /* Duplicate! */
1479 +               ip_set_printk("'%s' already registered!",
1480 +                             set_type->typename);
1481 +               ret = -EINVAL;
1482 +               goto unlock;
1483 +       }
1484 +       if (!try_module_get(THIS_MODULE)) {
1485 +               ret = -EFAULT;
1486 +               goto unlock;
1487 +       }
1488 +       list_add(&set_type->list, &set_type_list);
1489 +       DP("'%s' registered.", set_type->typename);
1490 +   unlock:
1491 +       write_unlock_bh(&ip_set_lock);
1492 +       return ret;
1493 +}
1494 +
1495 +void
1496 +ip_set_unregister_set_type(struct ip_set_type *set_type)
1497 +{
1498 +       write_lock_bh(&ip_set_lock);
1499 +       if (!find_set_type(set_type->typename)) {
1500 +               ip_set_printk("'%s' not registered?",
1501 +                             set_type->typename);
1502 +               goto unlock;
1503 +       }
1504 +       list_del(&set_type->list);
1505 +       module_put(THIS_MODULE);
1506 +       DP("'%s' unregistered.", set_type->typename);
1507 +   unlock:
1508 +       write_unlock_bh(&ip_set_lock);
1509 +
1510 +}
1511 +
1512 +/*
1513 + * Userspace routines
1514 + */
1515 +
1516 +/*
1517 + * Find set by name, reference it once. The reference makes sure the
1518 + * thing pointed to, does not go away under our feet. Drop the reference
1519 + * later, using ip_set_put().
1520 + */
1521 +ip_set_id_t
1522 +ip_set_get_byname(const char *name)
1523 +{
1524 +       ip_set_id_t i, index = IP_SET_INVALID_ID;
1525 +
1526 +       down(&ip_set_app_mutex);
1527 +       for (i = 0; i < ip_set_max; i++) {
1528 +               if (ip_set_list[i] != NULL
1529 +                   && strcmp(ip_set_list[i]->name, name) == 0) {
1530 +                       __ip_set_get(i);
1531 +                       index = i;
1532 +                       break;
1533 +               }
1534 +       }
1535 +       up(&ip_set_app_mutex);
1536 +       return index;
1537 +}
1538 +
1539 +/*
1540 + * Find set by index, reference it once. The reference makes sure the
1541 + * thing pointed to, does not go away under our feet. Drop the reference
1542 + * later, using ip_set_put().
1543 + */
1544 +ip_set_id_t
1545 +ip_set_get_byindex(ip_set_id_t index)
1546 +{
1547 +       down(&ip_set_app_mutex);
1548 +
1549 +       if (index >= ip_set_max)
1550 +               return IP_SET_INVALID_ID;
1551 +
1552 +       if (ip_set_list[index])
1553 +               __ip_set_get(index);
1554 +       else
1555 +               index = IP_SET_INVALID_ID;
1556 +
1557 +       up(&ip_set_app_mutex);
1558 +       return index;
1559 +}
1560 +
1561 +/*
1562 + * If the given set pointer points to a valid set, decrement
1563 + * reference count by 1. The caller shall not assume the index
1564 + * to be valid, after calling this function.
1565 + */
1566 +void ip_set_put(ip_set_id_t index)
1567 +{
1568 +       down(&ip_set_app_mutex);
1569 +       if (ip_set_list[index])
1570 +               __ip_set_put(index);
1571 +       up(&ip_set_app_mutex);
1572 +}
1573 +
1574 +/* Find a set by name or index */
1575 +static ip_set_id_t
1576 +ip_set_find_byname(const char *name)
1577 +{
1578 +       ip_set_id_t i, index = IP_SET_INVALID_ID;
1579 +
1580 +       for (i = 0; i < ip_set_max; i++) {
1581 +               if (ip_set_list[i] != NULL
1582 +                   && strcmp(ip_set_list[i]->name, name) == 0) {
1583 +                       index = i;
1584 +                       break;
1585 +               }
1586 +       }
1587 +       return index;
1588 +}
1589 +
1590 +static ip_set_id_t
1591 +ip_set_find_byindex(ip_set_id_t index)
1592 +{
1593 +       if (index >= ip_set_max || ip_set_list[index] == NULL)
1594 +               index = IP_SET_INVALID_ID;
1595 +
1596 +       return index;
1597 +}
1598 +
1599 +/*
1600 + * Add, del, test, bind and unbind
1601 + */
1602 +
1603 +static inline int
1604 +__ip_set_testip(struct ip_set *set,
1605 +               const void *data,
1606 +               size_t size,
1607 +               ip_set_ip_t *ip)
1608 +{
1609 +       int res;
1610 +
1611 +       read_lock_bh(&set->lock);
1612 +       res = set->type->testip(set, data, size, ip);
1613 +       read_unlock_bh(&set->lock);
1614 +
1615 +       return res;
1616 +}
1617 +
1618 +static int
1619 +__ip_set_addip(ip_set_id_t index,
1620 +              const void *data,
1621 +              size_t size)
1622 +{
1623 +       struct ip_set *set = ip_set_list[index];
1624 +       ip_set_ip_t ip;
1625 +       int res;
1626 +
1627 +       IP_SET_ASSERT(set);
1628 +       do {
1629 +               write_lock_bh(&set->lock);
1630 +               res = set->type->addip(set, data, size, &ip);
1631 +               write_unlock_bh(&set->lock);
1632 +       } while (res == -EAGAIN
1633 +                && set->type->retry
1634 +                && (res = set->type->retry(set)) == 0);
1635 +
1636 +       return res;
1637 +}
1638 +
1639 +static int
1640 +ip_set_addip(ip_set_id_t index,
1641 +            const void *data,
1642 +            size_t size)
1643 +{
1644 +
1645 +       return __ip_set_addip(index,
1646 +                             data + sizeof(struct ip_set_req_adt),
1647 +                             size - sizeof(struct ip_set_req_adt));
1648 +}
1649 +
1650 +static int
1651 +ip_set_delip(ip_set_id_t index,
1652 +            const void *data,
1653 +            size_t size)
1654 +{
1655 +       struct ip_set *set = ip_set_list[index];
1656 +       ip_set_ip_t ip;
1657 +       int res;
1658 +
1659 +       IP_SET_ASSERT(set);
1660 +       write_lock_bh(&set->lock);
1661 +       res = set->type->delip(set,
1662 +                              data + sizeof(struct ip_set_req_adt),
1663 +                              size - sizeof(struct ip_set_req_adt),
1664 +                              &ip);
1665 +       write_unlock_bh(&set->lock);
1666 +
1667 +       return res;
1668 +}
1669 +
1670 +static int
1671 +ip_set_testip(ip_set_id_t index,
1672 +             const void *data,
1673 +             size_t size)
1674 +{
1675 +       struct ip_set *set = ip_set_list[index];
1676 +       ip_set_ip_t ip;
1677 +       int res;
1678 +
1679 +       IP_SET_ASSERT(set);
1680 +       res = __ip_set_testip(set,
1681 +                             data + sizeof(struct ip_set_req_adt),
1682 +                             size - sizeof(struct ip_set_req_adt),
1683 +                             &ip);
1684 +
1685 +       return (res > 0 ? -EEXIST : res);
1686 +}
1687 +
1688 +static int
1689 +ip_set_bindip(ip_set_id_t index,
1690 +             const void *data,
1691 +             size_t size)
1692 +{
1693 +       struct ip_set *set = ip_set_list[index];
1694 +       struct ip_set_req_bind *req_bind;
1695 +       ip_set_id_t binding;
1696 +       ip_set_ip_t ip;
1697 +       int res;
1698 +
1699 +       IP_SET_ASSERT(set);
1700 +       if (size < sizeof(struct ip_set_req_bind))
1701 +               return -EINVAL;
1702 +
1703 +       req_bind = (struct ip_set_req_bind *) data;
1704 +       req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1705 +
1706 +       if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1707 +               /* Default binding of a set */
1708 +               char *binding_name;
1709 +
1710 +               if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1711 +                       return -EINVAL;
1712 +
1713 +               binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1714 +               binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1715 +
1716 +               binding = ip_set_find_byname(binding_name);
1717 +               if (binding == IP_SET_INVALID_ID)
1718 +                       return -ENOENT;
1719 +
1720 +               write_lock_bh(&ip_set_lock);
1721 +               /* Sets as binding values are referenced */
1722 +               if (set->binding != IP_SET_INVALID_ID)
1723 +                       __ip_set_put(set->binding);
1724 +               set->binding = binding;
1725 +               __ip_set_get(set->binding);
1726 +               write_unlock_bh(&ip_set_lock);
1727 +
1728 +               return 0;
1729 +       }
1730 +       binding = ip_set_find_byname(req_bind->binding);
1731 +       if (binding == IP_SET_INVALID_ID)
1732 +               return -ENOENT;
1733 +
1734 +       res = __ip_set_testip(set,
1735 +                             data + sizeof(struct ip_set_req_bind),
1736 +                             size - sizeof(struct ip_set_req_bind),
1737 +                             &ip);
1738 +       DP("set %s, ip: %u.%u.%u.%u, binding %s",
1739 +          set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1740 +
1741 +       if (res >= 0)
1742 +               res = ip_set_hash_add(set->id, ip, binding);
1743 +
1744 +       return res;
1745 +}
1746 +
1747 +#define FOREACH_SET_DO(fn, args...)                            \
1748 +({                                                             \
1749 +       ip_set_id_t __i;                                        \
1750 +       struct ip_set *__set;                                   \
1751 +                                                               \
1752 +       for (__i = 0; __i < ip_set_max; __i++) {                \
1753 +               __set = ip_set_list[__i];                       \
1754 +               if (__set != NULL)                              \
1755 +                       fn(__set , ##args);                     \
1756 +       }                                                       \
1757 +})
1758 +
1759 +static inline void
1760 +__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
1761 +{
1762 +       if (set_hash->id == id)
1763 +               __set_hash_del(set_hash);
1764 +}
1765 +
1766 +static inline void
1767 +__unbind_default(struct ip_set *set)
1768 +{
1769 +       if (set->binding != IP_SET_INVALID_ID) {
1770 +               /* Sets as binding values are referenced */
1771 +               __ip_set_put(set->binding);
1772 +               set->binding = IP_SET_INVALID_ID;
1773 +       }
1774 +}
1775 +
1776 +static int
1777 +ip_set_unbindip(ip_set_id_t index,
1778 +               const void *data,
1779 +               size_t size)
1780 +{
1781 +       struct ip_set *set;
1782 +       struct ip_set_req_bind *req_bind;
1783 +       ip_set_ip_t ip;
1784 +       int res;
1785 +
1786 +       DP("");
1787 +       if (size < sizeof(struct ip_set_req_bind))
1788 +               return -EINVAL;
1789 +
1790 +       req_bind = (struct ip_set_req_bind *) data;
1791 +       req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1792 +
1793 +       DP("%u %s", index, req_bind->binding);
1794 +       if (index == IP_SET_INVALID_ID) {
1795 +               /* unbind :all: */
1796 +               if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1797 +                       /* Default binding of sets */
1798 +                       write_lock_bh(&ip_set_lock);
1799 +                       FOREACH_SET_DO(__unbind_default);
1800 +                       write_unlock_bh(&ip_set_lock);
1801 +                       return 0;
1802 +               } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1803 +                       /* Flush all bindings of all sets*/
1804 +                       write_lock_bh(&ip_set_lock);
1805 +                       FOREACH_HASH_RW_DO(__set_hash_del);
1806 +                       write_unlock_bh(&ip_set_lock);
1807 +                       return 0;
1808 +               }
1809 +               DP("unreachable reached!");
1810 +               return -EINVAL;
1811 +       }
1812 +
1813 +       set = ip_set_list[index];
1814 +       IP_SET_ASSERT(set);
1815 +       if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1816 +               /* Default binding of set */
1817 +               ip_set_id_t binding = ip_set_find_byindex(set->binding);
1818 +
1819 +               if (binding == IP_SET_INVALID_ID)
1820 +                       return -ENOENT;
1821 +
1822 +               write_lock_bh(&ip_set_lock);
1823 +               /* Sets in hash values are referenced */
1824 +               __ip_set_put(set->binding);
1825 +               set->binding = IP_SET_INVALID_ID;
1826 +               write_unlock_bh(&ip_set_lock);
1827 +
1828 +               return 0;
1829 +       } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1830 +               /* Flush all bindings */
1831 +
1832 +               write_lock_bh(&ip_set_lock);
1833 +               FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
1834 +               write_unlock_bh(&ip_set_lock);
1835 +               return 0;
1836 +       }
1837 +
1838 +       res = __ip_set_testip(set,
1839 +                             data + sizeof(struct ip_set_req_bind),
1840 +                             size - sizeof(struct ip_set_req_bind),
1841 +                             &ip);
1842 +
1843 +       DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
1844 +       if (res >= 0)
1845 +               res = ip_set_hash_del(set->id, ip);
1846 +
1847 +       return res;
1848 +}
1849 +
1850 +static int
1851 +ip_set_testbind(ip_set_id_t index,
1852 +               const void *data,
1853 +               size_t size)
1854 +{
1855 +       struct ip_set *set = ip_set_list[index];
1856 +       struct ip_set_req_bind *req_bind;
1857 +       ip_set_id_t binding;
1858 +       ip_set_ip_t ip;
1859 +       int res;
1860 +
1861 +       IP_SET_ASSERT(set);
1862 +       if (size < sizeof(struct ip_set_req_bind))
1863 +               return -EINVAL;
1864 +
1865 +       req_bind = (struct ip_set_req_bind *) data;
1866 +       req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1867 +
1868 +       if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1869 +               /* Default binding of set */
1870 +               char *binding_name;
1871 +
1872 +               if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1873 +                       return -EINVAL;
1874 +
1875 +               binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1876 +               binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1877 +
1878 +               binding = ip_set_find_byname(binding_name);
1879 +               if (binding == IP_SET_INVALID_ID)
1880 +                       return -ENOENT;
1881 +
1882 +               res = (set->binding == binding) ? -EEXIST : 0;
1883 +
1884 +               return res;
1885 +       }
1886 +       binding = ip_set_find_byname(req_bind->binding);
1887 +       if (binding == IP_SET_INVALID_ID)
1888 +               return -ENOENT;
1889 +
1890 +
1891 +       res = __ip_set_testip(set,
1892 +                             data + sizeof(struct ip_set_req_bind),
1893 +                             size - sizeof(struct ip_set_req_bind),
1894 +                             &ip);
1895 +       DP("set %s, ip: %u.%u.%u.%u, binding %s",
1896 +          set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1897 +
1898 +       if (res >= 0)
1899 +               res = (ip_set_find_in_hash(set->id, ip) == binding)
1900 +                       ? -EEXIST : 0;
1901 +
1902 +       return res;
1903 +}
1904 +
1905 +static struct ip_set_type *
1906 +find_set_type_rlock(const char *typename)
1907 +{
1908 +       struct ip_set_type *type;
1909 +
1910 +       read_lock_bh(&ip_set_lock);
1911 +       type = find_set_type(typename);
1912 +       if (type == NULL)
1913 +               read_unlock_bh(&ip_set_lock);
1914 +
1915 +       return type;
1916 +}
1917 +
1918 +static int
1919 +find_free_id(const char *name,
1920 +            ip_set_id_t *index,
1921 +            ip_set_id_t *id)
1922 +{
1923 +       ip_set_id_t i;
1924 +
1925 +       *id = IP_SET_INVALID_ID;
1926 +       for (i = 0;  i < ip_set_max; i++) {
1927 +               if (ip_set_list[i] == NULL) {
1928 +                       if (*id == IP_SET_INVALID_ID)
1929 +                               *id = *index = i;
1930 +               } else if (strcmp(name, ip_set_list[i]->name) == 0)
1931 +                       /* Name clash */
1932 +                       return -EEXIST;
1933 +       }
1934 +       if (*id == IP_SET_INVALID_ID)
1935 +               /* No free slot remained */
1936 +               return -ERANGE;
1937 +       /* Check that index is usable as id (swapping) */
1938 +    check:
1939 +       for (i = 0;  i < ip_set_max; i++) {
1940 +               if (ip_set_list[i] != NULL
1941 +                   && ip_set_list[i]->id == *id) {
1942 +                   *id = i;
1943 +                   goto check;
1944 +               }
1945 +       }
1946 +       return 0;
1947 +}
1948 +
1949 +/*
1950 + * Create a set
1951 + */
1952 +static int
1953 +ip_set_create(const char *name,
1954 +             const char *typename,
1955 +             ip_set_id_t restore,
1956 +             const void *data,
1957 +             size_t size)
1958 +{
1959 +       struct ip_set *set;
1960 +       ip_set_id_t index = 0, id;
1961 +       int res = 0;
1962 +
1963 +       DP("setname: %s, typename: %s, id: %u", name, typename, restore);
1964 +       /*
1965 +        * First, and without any locks, allocate and initialize
1966 +        * a normal base set structure.
1967 +        */
1968 +       set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
1969 +       if (!set)
1970 +               return -ENOMEM;
1971 +       set->lock = RW_LOCK_UNLOCKED;
1972 +       strncpy(set->name, name, IP_SET_MAXNAMELEN);
1973 +       set->binding = IP_SET_INVALID_ID;
1974 +       atomic_set(&set->ref, 0);
1975 +
1976 +       /*
1977 +        * Next, take the &ip_set_lock, check that we know the type,
1978 +        * and take a reference on the type, to make sure it
1979 +        * stays available while constructing our new set.
1980 +        *
1981 +        * After referencing the type, we drop the &ip_set_lock,
1982 +        * and let the new set construction run without locks.
1983 +        */
1984 +       set->type = find_set_type_rlock(typename);
1985 +       if (set->type == NULL) {
1986 +               /* Try loading the module */
1987 +               char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
1988 +               strcpy(modulename, "ip_set_");
1989 +               strcat(modulename, typename);
1990 +               DP("try to load %s", modulename);
1991 +               request_module(modulename);
1992 +               set->type = find_set_type_rlock(typename);
1993 +       }
1994 +       if (set->type == NULL) {
1995 +               ip_set_printk("no set type '%s', set '%s' not created",
1996 +                             typename, name);
1997 +               res = -ENOENT;
1998 +               goto out;
1999 +       }
2000 +       if (!try_module_get(set->type->me)) {
2001 +               read_unlock_bh(&ip_set_lock);
2002 +               res = -EFAULT;
2003 +               goto out;
2004 +       }
2005 +       read_unlock_bh(&ip_set_lock);
2006 +
2007 +       /*
2008 +        * Without holding any locks, create private part.
2009 +        */
2010 +       res = set->type->create(set, data, size);
2011 +       if (res != 0)
2012 +               goto put_out;
2013 +
2014 +       /* BTW, res==0 here. */
2015 +
2016 +       /*
2017 +        * Here, we have a valid, constructed set. &ip_set_lock again,
2018 +        * find free id/index and check that it is not already in
2019 +        * ip_set_list.
2020 +        */
2021 +       write_lock_bh(&ip_set_lock);
2022 +       if ((res = find_free_id(set->name, &index, &id)) != 0) {
2023 +               DP("no free id!");
2024 +               goto cleanup;
2025 +       }
2026 +
2027 +       /* Make sure restore gets the same index */
2028 +       if (restore != IP_SET_INVALID_ID && index != restore) {
2029 +               DP("Can't restore, sets are screwed up");
2030 +               res = -ERANGE;
2031 +               goto cleanup;
2032 +       }
2033 +
2034 +       /*
2035 +        * Finally! Add our shiny new set to the list, and be done.
2036 +        */
2037 +       DP("create: '%s' created with index %u, id %u!", set->name, index, id);
2038 +       set->id = id;
2039 +       ip_set_list[index] = set;
2040 +       write_unlock_bh(&ip_set_lock);
2041 +       return res;
2042 +
2043 +    cleanup:
2044 +       write_unlock_bh(&ip_set_lock);
2045 +       set->type->destroy(set);
2046 +    put_out:
2047 +       module_put(set->type->me);
2048 +    out:
2049 +       kfree(set);
2050 +       return res;
2051 +}
2052 +
2053 +/*
2054 + * Destroy a given existing set
2055 + */
2056 +static void
2057 +ip_set_destroy_set(ip_set_id_t index)
2058 +{
2059 +       struct ip_set *set = ip_set_list[index];
2060 +
2061 +       IP_SET_ASSERT(set);
2062 +       DP("set: %s",  set->name);
2063 +       write_lock_bh(&ip_set_lock);
2064 +       FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2065 +       if (set->binding != IP_SET_INVALID_ID)
2066 +               __ip_set_put(set->binding);
2067 +       ip_set_list[index] = NULL;
2068 +       write_unlock_bh(&ip_set_lock);
2069 +
2070 +       /* Must call it without holding any lock */
2071 +       set->type->destroy(set);
2072 +       module_put(set->type->me);
2073 +       kfree(set);
2074 +}
2075 +
2076 +/*
2077 + * Destroy a set - or all sets
2078 + * Sets must not be referenced/used.
2079 + */
2080 +static int
2081 +ip_set_destroy(ip_set_id_t index)
2082 +{
2083 +       ip_set_id_t i;
2084 +
2085 +       /* ref modification always protected by the mutex */
2086 +       if (index != IP_SET_INVALID_ID) {
2087 +               if (atomic_read(&ip_set_list[index]->ref))
2088 +                       return -EBUSY;
2089 +               ip_set_destroy_set(index);
2090 +       } else {
2091 +               for (i = 0; i < ip_set_max; i++) {
2092 +                       if (ip_set_list[i] != NULL
2093 +                           && (atomic_read(&ip_set_list[i]->ref)))
2094 +                               return -EBUSY;
2095 +               }
2096 +
2097 +               for (i = 0; i < ip_set_max; i++) {
2098 +                       if (ip_set_list[i] != NULL)
2099 +                               ip_set_destroy_set(i);
2100 +               }
2101 +       }
2102 +       return 0;
2103 +}
2104 +
2105 +static void
2106 +ip_set_flush_set(struct ip_set *set)
2107 +{
2108 +       DP("set: %s %u",  set->name, set->id);
2109 +
2110 +       write_lock_bh(&set->lock);
2111 +       set->type->flush(set);
2112 +       write_unlock_bh(&set->lock);
2113 +}
2114 +
2115 +/*
2116 + * Flush data in a set - or in all sets
2117 + */
2118 +static int
2119 +ip_set_flush(ip_set_id_t index)
2120 +{
2121 +       if (index != IP_SET_INVALID_ID) {
2122 +               IP_SET_ASSERT(ip_set_list[index]);
2123 +               ip_set_flush_set(ip_set_list[index]);
2124 +       } else
2125 +               FOREACH_SET_DO(ip_set_flush_set);
2126 +
2127 +       return 0;
2128 +}
2129 +
2130 +/* Rename a set */
2131 +static int
2132 +ip_set_rename(ip_set_id_t index, const char *name)
2133 +{
2134 +       struct ip_set *set = ip_set_list[index];
2135 +       ip_set_id_t i;
2136 +       int res = 0;
2137 +
2138 +       DP("set: %s to %s",  set->name, name);
2139 +       write_lock_bh(&ip_set_lock);
2140 +       for (i = 0; i < ip_set_max; i++) {
2141 +               if (ip_set_list[i] != NULL
2142 +                   && strncmp(ip_set_list[i]->name,
2143 +                              name,
2144 +                              IP_SET_MAXNAMELEN - 1) == 0) {
2145 +                       res = -EEXIST;
2146 +                       goto unlock;
2147 +               }
2148 +       }
2149 +       strncpy(set->name, name, IP_SET_MAXNAMELEN);
2150 +    unlock:
2151 +       write_unlock_bh(&ip_set_lock);
2152 +       return res;
2153 +}
2154 +
2155 +/*
2156 + * Swap two sets so that name/index points to the other.
2157 + * References are also swapped.
2158 + */
2159 +static int
2160 +ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
2161 +{
2162 +       struct ip_set *from = ip_set_list[from_index];
2163 +       struct ip_set *to = ip_set_list[to_index];
2164 +       char from_name[IP_SET_MAXNAMELEN];
2165 +       u_int32_t from_ref;
2166 +
2167 +       DP("set: %s to %s",  from->name, to->name);
2168 +       /* Features must not change. Artifical restriction. */
2169 +       if (from->type->features != to->type->features)
2170 +               return -ENOEXEC;
2171 +
2172 +       /* No magic here: ref munging protected by the mutex */
2173 +       write_lock_bh(&ip_set_lock);
2174 +       strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
2175 +       from_ref = atomic_read(&from->ref);
2176 +
2177 +       strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
2178 +       atomic_set(&from->ref, atomic_read(&to->ref));
2179 +       strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
2180 +       atomic_set(&to->ref, from_ref);
2181 +
2182 +       ip_set_list[from_index] = to;
2183 +       ip_set_list[to_index] = from;
2184 +
2185 +       write_unlock_bh(&ip_set_lock);
2186 +       return 0;
2187 +}
2188 +
2189 +/*
2190 + * List set data
2191 + */
2192 +
2193 +static inline void
2194 +__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
2195 +                             ip_set_id_t id, size_t *size)
2196 +{
2197 +       if (set_hash->id == id)
2198 +               *size += sizeof(struct ip_set_hash_list);
2199 +}
2200 +
2201 +static inline void
2202 +__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
2203 +                             ip_set_id_t id, size_t *size)
2204 +{
2205 +       if (set_hash->id == id)
2206 +               *size += sizeof(struct ip_set_hash_save);
2207 +}
2208 +
2209 +static inline void
2210 +__set_hash_bindings(struct ip_set_hash *set_hash,
2211 +                   ip_set_id_t id, void *data, int *used)
2212 +{
2213 +       if (set_hash->id == id) {
2214 +               struct ip_set_hash_list *hash_list =
2215 +                       (struct ip_set_hash_list *)(data + *used);
2216 +
2217 +               hash_list->ip = set_hash->ip;
2218 +               hash_list->binding = set_hash->binding;
2219 +               *used += sizeof(struct ip_set_hash_list);
2220 +       }
2221 +}
2222 +
2223 +static int ip_set_list_set(ip_set_id_t index,
2224 +                          void *data,
2225 +                          int *used,
2226 +                          int len)
2227 +{
2228 +       struct ip_set *set = ip_set_list[index];
2229 +       struct ip_set_list *set_list;
2230 +
2231 +       /* Pointer to our header */
2232 +       set_list = (struct ip_set_list *) (data + *used);
2233 +
2234 +       DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
2235 +
2236 +       /* Get and ensure header size */
2237 +       if (*used + sizeof(struct ip_set_list) > len)
2238 +               goto not_enough_mem;
2239 +       *used += sizeof(struct ip_set_list);
2240 +
2241 +       read_lock_bh(&set->lock);
2242 +       /* Get and ensure set specific header size */
2243 +       set_list->header_size = set->type->header_size;
2244 +       if (*used + set_list->header_size > len)
2245 +               goto unlock_set;
2246 +
2247 +       /* Fill in the header */
2248 +       set_list->index = index;
2249 +       set_list->binding = set->binding;
2250 +       set_list->ref = atomic_read(&set->ref);
2251 +
2252 +       /* Fill in set spefific header data */
2253 +       set->type->list_header(set, data + *used);
2254 +       *used += set_list->header_size;
2255 +
2256 +       /* Get and ensure set specific members size */
2257 +       set_list->members_size = set->type->list_members_size(set);
2258 +       if (*used + set_list->members_size > len)
2259 +               goto unlock_set;
2260 +
2261 +       /* Fill in set spefific members data */
2262 +       set->type->list_members(set, data + *used);
2263 +       *used += set_list->members_size;
2264 +       read_unlock_bh(&set->lock);
2265 +
2266 +       /* Bindings */
2267 +
2268 +       /* Get and ensure set specific bindings size */
2269 +       set_list->bindings_size = 0;
2270 +       FOREACH_HASH_DO(__set_hash_bindings_size_list,
2271 +                       set->id, &set_list->bindings_size);
2272 +       if (*used + set_list->bindings_size > len)
2273 +               goto not_enough_mem;
2274 +
2275 +       /* Fill in set spefific bindings data */
2276 +       FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
2277 +
2278 +       return 0;
2279 +
2280 +    unlock_set:
2281 +       read_unlock_bh(&set->lock);
2282 +    not_enough_mem:
2283 +       DP("not enough mem, try again");
2284 +       return -EAGAIN;
2285 +}
2286 +
2287 +/*
2288 + * Save sets
2289 + */
2290 +static int ip_set_save_set(ip_set_id_t index,
2291 +                          void *data,
2292 +                          int *used,
2293 +                          int len)
2294 +{
2295 +       struct ip_set *set;
2296 +       struct ip_set_save *set_save;
2297 +
2298 +       /* Pointer to our header */
2299 +       set_save = (struct ip_set_save *) (data + *used);
2300 +
2301 +       /* Get and ensure header size */
2302 +       if (*used + sizeof(struct ip_set_save) > len)
2303 +               goto not_enough_mem;
2304 +       *used += sizeof(struct ip_set_save);
2305 +
2306 +       set = ip_set_list[index];
2307 +       DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
2308 +          data, data + *used);
2309 +
2310 +       read_lock_bh(&set->lock);
2311 +       /* Get and ensure set specific header size */
2312 +       set_save->header_size = set->type->header_size;
2313 +       if (*used + set_save->header_size > len)
2314 +               goto unlock_set;
2315 +
2316 +       /* Fill in the header */
2317 +       set_save->index = index;
2318 +       set_save->binding = set->binding;
2319 +
2320 +       /* Fill in set spefific header data */
2321 +       set->type->list_header(set, data + *used);
2322 +       *used += set_save->header_size;
2323 +
2324 +       DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
2325 +          set_save->header_size, data, data + *used);
2326 +       /* Get and ensure set specific members size */
2327 +       set_save->members_size = set->type->list_members_size(set);
2328 +       if (*used + set_save->members_size > len)
2329 +               goto unlock_set;
2330 +
2331 +       /* Fill in set spefific members data */
2332 +       set->type->list_members(set, data + *used);
2333 +       *used += set_save->members_size;
2334 +       read_unlock_bh(&set->lock);
2335 +       DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
2336 +          set_save->members_size, data, data + *used);
2337 +       return 0;
2338 +
2339 +    unlock_set:
2340 +       read_unlock_bh(&set->lock);
2341 +    not_enough_mem:
2342 +       DP("not enough mem, try again");
2343 +       return -EAGAIN;
2344 +}
2345 +
2346 +static inline void
2347 +__set_hash_save_bindings(struct ip_set_hash *set_hash,
2348 +                        ip_set_id_t id,
2349 +                        void *data,
2350 +                        int *used,
2351 +                        int len,
2352 +                        int *res)
2353 +{
2354 +       if (*res == 0
2355 +           && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
2356 +               struct ip_set_hash_save *hash_save =
2357 +                       (struct ip_set_hash_save *)(data + *used);
2358 +               /* Ensure bindings size */
2359 +               if (*used + sizeof(struct ip_set_hash_save) > len) {
2360 +                       *res = -ENOMEM;
2361 +                       return;
2362 +               }
2363 +               hash_save->id = set_hash->id;
2364 +               hash_save->ip = set_hash->ip;
2365 +               hash_save->binding = set_hash->binding;
2366 +               *used += sizeof(struct ip_set_hash_save);
2367 +       }
2368 +}
2369 +
2370 +static int ip_set_save_bindings(ip_set_id_t index,
2371 +                               void *data,
2372 +                               int *used,
2373 +                               int len)
2374 +{
2375 +       int res = 0;
2376 +       struct ip_set_save *set_save;
2377 +
2378 +       DP("used %u, len %u", *used, len);
2379 +       /* Get and ensure header size */
2380 +       if (*used + sizeof(struct ip_set_save) > len)
2381 +               return -ENOMEM;
2382 +
2383 +       /* Marker */
2384 +       set_save = (struct ip_set_save *) (data + *used);
2385 +       set_save->index = IP_SET_INVALID_ID;
2386 +       set_save->header_size = 0;
2387 +       set_save->members_size = 0;
2388 +       *used += sizeof(struct ip_set_save);
2389 +
2390 +       DP("marker added used %u, len %u", *used, len);
2391 +       /* Fill in bindings data */
2392 +       if (index != IP_SET_INVALID_ID)
2393 +               /* Sets are identified by id in hash */
2394 +               index = ip_set_list[index]->id;
2395 +       FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
2396 +
2397 +       return res;
2398 +}
2399 +
2400 +/*
2401 + * Restore sets
2402 + */
2403 +static int ip_set_restore(void *data,
2404 +                         int len)
2405 +{
2406 +       int res = 0;
2407 +       int line = 0, used = 0, members_size;
2408 +       struct ip_set *set;
2409 +       struct ip_set_hash_save *hash_save;
2410 +       struct ip_set_restore *set_restore;
2411 +       ip_set_id_t index;
2412 +
2413 +       /* Loop to restore sets */
2414 +       while (1) {
2415 +               line++;
2416 +
2417 +               DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
2418 +               /* Get and ensure header size */
2419 +               if (used + sizeof(struct ip_set_restore) > len)
2420 +                       return line;
2421 +               set_restore = (struct ip_set_restore *) (data + used);
2422 +               used += sizeof(struct ip_set_restore);
2423 +
2424 +               /* Ensure data size */
2425 +               if (used
2426 +                   + set_restore->header_size
2427 +                   + set_restore->members_size > len)
2428 +                       return line;
2429 +
2430 +               /* Check marker */
2431 +               if (set_restore->index == IP_SET_INVALID_ID) {
2432 +                       line--;
2433 +                       goto bindings;
2434 +               }
2435 +
2436 +               /* Try to create the set */
2437 +               DP("restore %s %s", set_restore->name, set_restore->typename);
2438 +               res = ip_set_create(set_restore->name,
2439 +                                   set_restore->typename,
2440 +                                   set_restore->index,
2441 +                                   data + used,
2442 +                                   set_restore->header_size);
2443 +
2444 +               if (res != 0)
2445 +                       return line;
2446 +               used += set_restore->header_size;
2447 +
2448 +               index = ip_set_find_byindex(set_restore->index);
2449 +               DP("index %u, restore_index %u", index, set_restore->index);
2450 +               if (index != set_restore->index)
2451 +                       return line;
2452 +               /* Try to restore members data */
2453 +               set = ip_set_list[index];
2454 +               members_size = 0;
2455 +               DP("members_size %u reqsize %u",
2456 +                  set_restore->members_size, set->type->reqsize);
2457 +               while (members_size + set->type->reqsize <=
2458 +                      set_restore->members_size) {
2459 +                       line++;
2460 +                       DP("members: %u, line %u", members_size, line);
2461 +                       res = __ip_set_addip(index,
2462 +                                          data + used + members_size,
2463 +                                          set->type->reqsize);
2464 +                       if (!(res == 0 || res == -EEXIST))
2465 +                               return line;
2466 +                       members_size += set->type->reqsize;
2467 +               }
2468 +
2469 +               DP("members_size %u  %u",
2470 +                  set_restore->members_size, members_size);
2471 +               if (members_size != set_restore->members_size)
2472 +                       return line++;
2473 +               used += set_restore->members_size;
2474 +       }
2475 +
2476 +   bindings:
2477 +       /* Loop to restore bindings */
2478 +       while (used < len) {
2479 +               line++;
2480 +
2481 +               DP("restore binding, line %u", line);
2482 +               /* Get and ensure size */
2483 +               if (used + sizeof(struct ip_set_hash_save) > len)
2484 +                       return line;
2485 +               hash_save = (struct ip_set_hash_save *) (data + used);
2486 +               used += sizeof(struct ip_set_hash_save);
2487 +
2488 +               /* hash_save->id is used to store the index */
2489 +               index = ip_set_find_byindex(hash_save->id);
2490 +               DP("restore binding index %u, id %u, %u -> %u",
2491 +                  index, hash_save->id, hash_save->ip, hash_save->binding);
2492 +               if (index != hash_save->id)
2493 +                       return line;
2494 +               if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
2495 +                       DP("corrupt binding set index %u", hash_save->binding);
2496 +                       return line;
2497 +               }
2498 +               set = ip_set_list[hash_save->id];
2499 +               /* Null valued IP means default binding */
2500 +               if (hash_save->ip)
2501 +                       res = ip_set_hash_add(set->id,
2502 +                                             hash_save->ip,
2503 +                                             hash_save->binding);
2504 +               else {
2505 +                       IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
2506 +                       write_lock_bh(&ip_set_lock);
2507 +                       set->binding = hash_save->binding;
2508 +                       __ip_set_get(set->binding);
2509 +                       write_unlock_bh(&ip_set_lock);
2510 +                       DP("default binding: %u", set->binding);
2511 +               }
2512 +               if (res != 0)
2513 +                       return line;
2514 +       }
2515 +       if (used != len)
2516 +               return line;
2517 +
2518 +       return 0;
2519 +}
2520 +
2521 +static int
2522 +ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
2523 +{
2524 +       void *data;
2525 +       int res = 0;            /* Assume OK */
2526 +       unsigned *op;
2527 +       struct ip_set_req_adt *req_adt;
2528 +       ip_set_id_t index = IP_SET_INVALID_ID;
2529 +       int (*adtfn)(ip_set_id_t index,
2530 +                    const void *data, size_t size);
2531 +       struct fn_table {
2532 +               int (*fn)(ip_set_id_t index,
2533 +                         const void *data, size_t size);
2534 +       } adtfn_table[] =
2535 +       { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
2536 +         { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
2537 +       };
2538 +
2539 +       DP("optval=%d, user=%p, len=%d", optval, user, len);
2540 +       if (!capable(CAP_NET_ADMIN))
2541 +               return -EPERM;
2542 +       if (optval != SO_IP_SET)
2543 +               return -EBADF;
2544 +       if (len <= sizeof(unsigned)) {
2545 +               ip_set_printk("short userdata (want >%zu, got %u)",
2546 +                             sizeof(unsigned), len);
2547 +               return -EINVAL;
2548 +       }
2549 +       data = vmalloc(len);
2550 +       if (!data) {
2551 +               DP("out of mem for %u bytes", len);
2552 +               return -ENOMEM;
2553 +       }
2554 +       if (copy_from_user(data, user, len) != 0) {
2555 +               res = -EFAULT;
2556 +               goto done;
2557 +       }
2558 +       if (down_interruptible(&ip_set_app_mutex)) {
2559 +               res = -EINTR;
2560 +               goto done;
2561 +       }
2562 +
2563 +       op = (unsigned *)data;
2564 +       DP("op=%x", *op);
2565 +
2566 +       if (*op < IP_SET_OP_VERSION) {
2567 +               /* Check the version at the beginning of operations */
2568 +               struct ip_set_req_version *req_version =
2569 +                       (struct ip_set_req_version *) data;
2570 +               if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2571 +                       res = -EPROTO;
2572 +                       goto done;
2573 +               }
2574 +       }
2575 +
2576 +       switch (*op) {
2577 +       case IP_SET_OP_CREATE:{
2578 +               struct ip_set_req_create *req_create
2579 +                       = (struct ip_set_req_create *) data;
2580 +
2581 +               if (len < sizeof(struct ip_set_req_create)) {
2582 +                       ip_set_printk("short CREATE data (want >=%zu, got %u)",
2583 +                                     sizeof(struct ip_set_req_create), len);
2584 +                       res = -EINVAL;
2585 +                       goto done;
2586 +               }
2587 +               req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
2588 +               req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2589 +               res = ip_set_create(req_create->name,
2590 +                                   req_create->typename,
2591 +                                   IP_SET_INVALID_ID,
2592 +                                   data + sizeof(struct ip_set_req_create),
2593 +                                   len - sizeof(struct ip_set_req_create));
2594 +               goto done;
2595 +       }
2596 +       case IP_SET_OP_DESTROY:{
2597 +               struct ip_set_req_std *req_destroy
2598 +                       = (struct ip_set_req_std *) data;
2599 +
2600 +               if (len != sizeof(struct ip_set_req_std)) {
2601 +                       ip_set_printk("invalid DESTROY data (want %zu, got %u)",
2602 +                                     sizeof(struct ip_set_req_std), len);
2603 +                       res = -EINVAL;
2604 +                       goto done;
2605 +               }
2606 +               if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
2607 +                       /* Destroy all sets */
2608 +                       index = IP_SET_INVALID_ID;
2609 +               } else {
2610 +                       req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
2611 +                       index = ip_set_find_byname(req_destroy->name);
2612 +
2613 +                       if (index == IP_SET_INVALID_ID) {
2614 +                               res = -ENOENT;
2615 +                               goto done;
2616 +                       }
2617 +               }
2618 +
2619 +               res = ip_set_destroy(index);
2620 +               goto done;
2621 +       }
2622 +       case IP_SET_OP_FLUSH:{
2623 +               struct ip_set_req_std *req_flush =
2624 +                       (struct ip_set_req_std *) data;
2625 +
2626 +               if (len != sizeof(struct ip_set_req_std)) {
2627 +                       ip_set_printk("invalid FLUSH data (want %zu, got %u)",
2628 +                                     sizeof(struct ip_set_req_std), len);
2629 +                       res = -EINVAL;
2630 +                       goto done;
2631 +               }
2632 +               if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
2633 +                       /* Flush all sets */
2634 +                       index = IP_SET_INVALID_ID;
2635 +               } else {
2636 +                       req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
2637 +                       index = ip_set_find_byname(req_flush->name);
2638 +
2639 +                       if (index == IP_SET_INVALID_ID) {
2640 +                               res = -ENOENT;
2641 +                               goto done;
2642 +                       }
2643 +               }
2644 +               res = ip_set_flush(index);
2645 +               goto done;
2646 +       }
2647 +       case IP_SET_OP_RENAME:{
2648 +               struct ip_set_req_create *req_rename
2649 +                       = (struct ip_set_req_create *) data;
2650 +
2651 +               if (len != sizeof(struct ip_set_req_create)) {
2652 +                       ip_set_printk("invalid RENAME data (want %zu, got %u)",
2653 +                                     sizeof(struct ip_set_req_create), len);
2654 +                       res = -EINVAL;
2655 +                       goto done;
2656 +               }
2657 +
2658 +               req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
2659 +               req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2660 +
2661 +               index = ip_set_find_byname(req_rename->name);
2662 +               if (index == IP_SET_INVALID_ID) {
2663 +                       res = -ENOENT;
2664 +                       goto done;
2665 +               }
2666 +               res = ip_set_rename(index, req_rename->typename);
2667 +               goto done;
2668 +       }
2669 +       case IP_SET_OP_SWAP:{
2670 +               struct ip_set_req_create *req_swap
2671 +                       = (struct ip_set_req_create *) data;
2672 +               ip_set_id_t to_index;
2673 +
2674 +               if (len != sizeof(struct ip_set_req_create)) {
2675 +                       ip_set_printk("invalid SWAP data (want %zu, got %u)",
2676 +                                     sizeof(struct ip_set_req_create), len);
2677 +                       res = -EINVAL;
2678 +                       goto done;
2679 +               }
2680 +
2681 +               req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
2682 +               req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2683 +
2684 +               index = ip_set_find_byname(req_swap->name);
2685 +               if (index == IP_SET_INVALID_ID) {
2686 +                       res = -ENOENT;
2687 +                       goto done;
2688 +               }
2689 +               to_index = ip_set_find_byname(req_swap->typename);
2690 +               if (to_index == IP_SET_INVALID_ID) {
2691 +                       res = -ENOENT;
2692 +                       goto done;
2693 +               }
2694 +               res = ip_set_swap(index, to_index);
2695 +               goto done;
2696 +       }
2697 +       default:
2698 +               break;  /* Set identified by id */
2699 +       }
2700 +
2701 +       /* There we may have add/del/test/bind/unbind/test_bind operations */
2702 +       if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
2703 +               res = -EBADMSG;
2704 +               goto done;
2705 +       }
2706 +       adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
2707 +
2708 +       if (len < sizeof(struct ip_set_req_adt)) {
2709 +               ip_set_printk("short data in adt request (want >=%zu, got %u)",
2710 +                             sizeof(struct ip_set_req_adt), len);
2711 +               res = -EINVAL;
2712 +               goto done;
2713 +       }
2714 +       req_adt = (struct ip_set_req_adt *) data;
2715 +
2716 +       /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
2717 +       if (!(*op == IP_SET_OP_UNBIND_SET
2718 +             && req_adt->index == IP_SET_INVALID_ID)) {
2719 +               index = ip_set_find_byindex(req_adt->index);
2720 +               if (index == IP_SET_INVALID_ID) {
2721 +                       res = -ENOENT;
2722 +                       goto done;
2723 +               }
2724 +       }
2725 +       res = adtfn(index, data, len);
2726 +
2727 +    done:
2728 +       up(&ip_set_app_mutex);
2729 +       vfree(data);
2730 +       if (res > 0)
2731 +               res = 0;
2732 +       DP("final result %d", res);
2733 +       return res;
2734 +}
2735 +
2736 +static int
2737 +ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
2738 +{
2739 +       int res = 0;
2740 +       unsigned *op;
2741 +       ip_set_id_t index = IP_SET_INVALID_ID;
2742 +       void *data;
2743 +       int copylen = *len;
2744 +
2745 +       DP("optval=%d, user=%p, len=%d", optval, user, *len);
2746 +       if (!capable(CAP_NET_ADMIN))
2747 +               return -EPERM;
2748 +       if (optval != SO_IP_SET)
2749 +               return -EBADF;
2750 +       if (*len < sizeof(unsigned)) {
2751 +               ip_set_printk("short userdata (want >=%zu, got %d)",
2752 +                             sizeof(unsigned), *len);
2753 +               return -EINVAL;
2754 +       }
2755 +       data = vmalloc(*len);
2756 +       if (!data) {
2757 +               DP("out of mem for %d bytes", *len);
2758 +               return -ENOMEM;
2759 +       }
2760 +       if (copy_from_user(data, user, *len) != 0) {
2761 +               res = -EFAULT;
2762 +               goto done;
2763 +       }
2764 +       if (down_interruptible(&ip_set_app_mutex)) {
2765 +               res = -EINTR;
2766 +               goto done;
2767 +       }
2768 +
2769 +       op = (unsigned *) data;
2770 +       DP("op=%x", *op);
2771 +
2772 +       if (*op < IP_SET_OP_VERSION) {
2773 +               /* Check the version at the beginning of operations */
2774 +               struct ip_set_req_version *req_version =
2775 +                       (struct ip_set_req_version *) data;
2776 +               if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2777 +                       res = -EPROTO;
2778 +                       goto done;
2779 +               }
2780 +       }
2781 +
2782 +       switch (*op) {
2783 +       case IP_SET_OP_VERSION: {
2784 +               struct ip_set_req_version *req_version =
2785 +                   (struct ip_set_req_version *) data;
2786 +
2787 +               if (*len != sizeof(struct ip_set_req_version)) {
2788 +                       ip_set_printk("invalid VERSION (want %zu, got %d)",
2789 +                                     sizeof(struct ip_set_req_version),
2790 +                                     *len);
2791 +                       res = -EINVAL;
2792 +                       goto done;
2793 +               }
2794 +
2795 +               req_version->version = IP_SET_PROTOCOL_VERSION;
2796 +               res = copy_to_user(user, req_version,
2797 +                                  sizeof(struct ip_set_req_version));
2798 +               goto done;
2799 +       }
2800 +       case IP_SET_OP_GET_BYNAME: {
2801 +               struct ip_set_req_get_set *req_get
2802 +                       = (struct ip_set_req_get_set *) data;
2803 +
2804 +               if (*len != sizeof(struct ip_set_req_get_set)) {
2805 +                       ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
2806 +                                     sizeof(struct ip_set_req_get_set), *len);
2807 +                       res = -EINVAL;
2808 +                       goto done;
2809 +               }
2810 +               req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2811 +               index = ip_set_find_byname(req_get->set.name);
2812 +               req_get->set.index = index;
2813 +               goto copy;
2814 +       }
2815 +       case IP_SET_OP_GET_BYINDEX: {
2816 +               struct ip_set_req_get_set *req_get
2817 +                       = (struct ip_set_req_get_set *) data;
2818 +
2819 +               if (*len != sizeof(struct ip_set_req_get_set)) {
2820 +                       ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
2821 +                                     sizeof(struct ip_set_req_get_set), *len);
2822 +                       res = -EINVAL;
2823 +                       goto done;
2824 +               }
2825 +               req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2826 +               index = ip_set_find_byindex(req_get->set.index);
2827 +               strncpy(req_get->set.name,
2828 +                       index == IP_SET_INVALID_ID ? ""
2829 +                       : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
2830 +               goto copy;
2831 +       }
2832 +       case IP_SET_OP_ADT_GET: {
2833 +               struct ip_set_req_adt_get *req_get
2834 +                       = (struct ip_set_req_adt_get *) data;
2835 +
2836 +               if (*len != sizeof(struct ip_set_req_adt_get)) {
2837 +                       ip_set_printk("invalid ADT_GET (want %zu, got %d)",
2838 +                                     sizeof(struct ip_set_req_adt_get), *len);
2839 +                       res = -EINVAL;
2840 +                       goto done;
2841 +               }
2842 +               req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2843 +               index = ip_set_find_byname(req_get->set.name);
2844 +               if (index != IP_SET_INVALID_ID) {
2845 +                       req_get->set.index = index;
2846 +                       strncpy(req_get->typename,
2847 +                               ip_set_list[index]->type->typename,
2848 +                               IP_SET_MAXNAMELEN - 1);
2849 +               } else {
2850 +                       res = -ENOENT;
2851 +                       goto done;
2852 +               }
2853 +               goto copy;
2854 +       }
2855 +       case IP_SET_OP_MAX_SETS: {
2856 +               struct ip_set_req_max_sets *req_max_sets
2857 +                       = (struct ip_set_req_max_sets *) data;
2858 +               ip_set_id_t i;
2859 +
2860 +               if (*len != sizeof(struct ip_set_req_max_sets)) {
2861 +                       ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
2862 +                                     sizeof(struct ip_set_req_max_sets), *len);
2863 +                       res = -EINVAL;
2864 +                       goto done;
2865 +               }
2866 +
2867 +               if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
2868 +                       req_max_sets->set.index = IP_SET_INVALID_ID;
2869 +               } else {
2870 +                       req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2871 +                       req_max_sets->set.index =
2872 +                               ip_set_find_byname(req_max_sets->set.name);
2873 +                       if (req_max_sets->set.index == IP_SET_INVALID_ID) {
2874 +                               res = -ENOENT;
2875 +                               goto done;
2876 +                       }
2877 +               }
2878 +               req_max_sets->max_sets = ip_set_max;
2879 +               req_max_sets->sets = 0;
2880 +               for (i = 0; i < ip_set_max; i++) {
2881 +                       if (ip_set_list[i] != NULL)
2882 +                               req_max_sets->sets++;
2883 +               }
2884 +               goto copy;
2885 +       }
2886 +       case IP_SET_OP_LIST_SIZE:
2887 +       case IP_SET_OP_SAVE_SIZE: {
2888 +               struct ip_set_req_setnames *req_setnames
2889 +                       = (struct ip_set_req_setnames *) data;
2890 +               struct ip_set_name_list *name_list;
2891 +               struct ip_set *set;
2892 +               ip_set_id_t i;
2893 +               int used;
2894 +
2895 +               if (*len < sizeof(struct ip_set_req_setnames)) {
2896 +                       ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
2897 +                                     sizeof(struct ip_set_req_setnames), *len);
2898 +                       res = -EINVAL;
2899 +                       goto done;
2900 +               }
2901 +
2902 +               req_setnames->size = 0;
2903 +               used = sizeof(struct ip_set_req_setnames);
2904 +               for (i = 0; i < ip_set_max; i++) {
2905 +                       if (ip_set_list[i] == NULL)
2906 +                               continue;
2907 +                       name_list = (struct ip_set_name_list *)
2908 +                               (data + used);
2909 +                       used += sizeof(struct ip_set_name_list);
2910 +                       if (used > copylen) {
2911 +                               res = -EAGAIN;
2912 +                               goto done;
2913 +                       }
2914 +                       set = ip_set_list[i];
2915 +                       /* Fill in index, name, etc. */
2916 +                       name_list->index = i;
2917 +                       name_list->id = set->id;
2918 +                       strncpy(name_list->name,
2919 +                               set->name,
2920 +                               IP_SET_MAXNAMELEN - 1);
2921 +                       strncpy(name_list->typename,
2922 +                               set->type->typename,
2923 +                               IP_SET_MAXNAMELEN - 1);
2924 +                       DP("filled %s of type %s, index %u\n",
2925 +                          name_list->name, name_list->typename,
2926 +                          name_list->index);
2927 +                       if (!(req_setnames->index == IP_SET_INVALID_ID
2928 +                             || req_setnames->index == i))
2929 +                             continue;
2930 +                       /* Update size */
2931 +                       switch (*op) {
2932 +                       case IP_SET_OP_LIST_SIZE: {
2933 +                               req_setnames->size += sizeof(struct ip_set_list)
2934 +                                       + set->type->header_size
2935 +                                       + set->type->list_members_size(set);
2936 +                               /* Sets are identified by id in the hash */
2937 +                               FOREACH_HASH_DO(__set_hash_bindings_size_list,
2938 +                                               set->id, &req_setnames->size);
2939 +                               break;
2940 +                       }
2941 +                       case IP_SET_OP_SAVE_SIZE: {
2942 +                               req_setnames->size += sizeof(struct ip_set_save)
2943 +                                       + set->type->header_size
2944 +                                       + set->type->list_members_size(set);
2945 +                               FOREACH_HASH_DO(__set_hash_bindings_size_save,
2946 +                                               set->id, &req_setnames->size);
2947 +                               break;
2948 +                       }
2949 +                       default:
2950 +                               break;
2951 +                       }
2952 +               }
2953 +               if (copylen != used) {
2954 +                       res = -EAGAIN;
2955 +                       goto done;
2956 +               }
2957 +               goto copy;
2958 +       }
2959 +       case IP_SET_OP_LIST: {
2960 +               struct ip_set_req_list *req_list
2961 +                       = (struct ip_set_req_list *) data;
2962 +               ip_set_id_t i;
2963 +               int used;
2964 +
2965 +               if (*len < sizeof(struct ip_set_req_list)) {
2966 +                       ip_set_printk("short LIST (want >=%zu, got %d)",
2967 +                                     sizeof(struct ip_set_req_list), *len);
2968 +                       res = -EINVAL;
2969 +                       goto done;
2970 +               }
2971 +               index = req_list->index;
2972 +               if (index != IP_SET_INVALID_ID
2973 +                   && ip_set_find_byindex(index) != index) {
2974 +                       res = -ENOENT;
2975 +                       goto done;
2976 +               }
2977 +               used = 0;
2978 +               if (index == IP_SET_INVALID_ID) {
2979 +                       /* List all sets */
2980 +                       for (i = 0; i < ip_set_max && res == 0; i++) {
2981 +                               if (ip_set_list[i] != NULL)
2982 +                                       res = ip_set_list_set(i, data, &used, *len);
2983 +                       }
2984 +               } else {
2985 +                       /* List an individual set */
2986 +                       res = ip_set_list_set(index, data, &used, *len);
2987 +               }
2988 +               if (res != 0)
2989 +                       goto done;
2990 +               else if (copylen != used) {
2991 +                       res = -EAGAIN;
2992 +                       goto done;
2993 +               }
2994 +               goto copy;
2995 +       }
2996 +       case IP_SET_OP_SAVE: {
2997 +               struct ip_set_req_list *req_save
2998 +                       = (struct ip_set_req_list *) data;
2999 +               ip_set_id_t i;
3000 +               int used;
3001 +
3002 +               if (*len < sizeof(struct ip_set_req_list)) {
3003 +                       ip_set_printk("short SAVE (want >=%zu, got %d)",
3004 +                                     sizeof(struct ip_set_req_list), *len);
3005 +                       res = -EINVAL;
3006 +                       goto done;
3007 +               }
3008 +               index = req_save->index;
3009 +               if (index != IP_SET_INVALID_ID
3010 +                   && ip_set_find_byindex(index) != index) {
3011 +                       res = -ENOENT;
3012 +                       goto done;
3013 +               }
3014 +               used = 0;
3015 +               if (index == IP_SET_INVALID_ID) {
3016 +                       /* Save all sets */
3017 +                       for (i = 0; i < ip_set_max && res == 0; i++) {
3018 +                               if (ip_set_list[i] != NULL)
3019 +                                       res = ip_set_save_set(i, data, &used, *len);
3020 +                       }
3021 +               } else {
3022 +                       /* Save an individual set */
3023 +                       res = ip_set_save_set(index, data, &used, *len);
3024 +               }
3025 +               if (res == 0)
3026 +                       res = ip_set_save_bindings(index, data, &used, *len);
3027 +
3028 +               if (res != 0)
3029 +                       goto done;
3030 +               else if (copylen != used) {
3031 +                       res = -EAGAIN;
3032 +                       goto done;
3033 +               }
3034 +               goto copy;
3035 +       }
3036 +       case IP_SET_OP_RESTORE: {
3037 +               struct ip_set_req_setnames *req_restore
3038 +                       = (struct ip_set_req_setnames *) data;
3039 +               int line;
3040 +
3041 +               if (*len < sizeof(struct ip_set_req_setnames)
3042 +                   || *len != req_restore->size) {
3043 +                       ip_set_printk("invalid RESTORE (want =%zu, got %d)",
3044 +                                     req_restore->size, *len);
3045 +                       res = -EINVAL;
3046 +                       goto done;
3047 +               }
3048 +               line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
3049 +                                     req_restore->size - sizeof(struct ip_set_req_setnames));
3050 +               DP("ip_set_restore: %u", line);
3051 +               if (line != 0) {
3052 +                       res = -EAGAIN;
3053 +                       req_restore->size = line;
3054 +                       copylen = sizeof(struct ip_set_req_setnames);
3055 +                       goto copy;
3056 +               }
3057 +               goto done;
3058 +       }
3059 +       default:
3060 +               res = -EBADMSG;
3061 +               goto done;
3062 +       }       /* end of switch(op) */
3063 +
3064 +    copy:
3065 +       DP("set %s, copylen %u", index != IP_SET_INVALID_ID
3066 +                                && ip_set_list[index]
3067 +                    ? ip_set_list[index]->name
3068 +                    : ":all:", copylen);
3069 +       res = copy_to_user(user, data, copylen);
3070 +
3071 +    done:
3072 +       up(&ip_set_app_mutex);
3073 +       vfree(data);
3074 +       if (res > 0)
3075 +               res = 0;
3076 +       DP("final result %d", res);
3077 +       return res;
3078 +}
3079 +
3080 +static struct nf_sockopt_ops so_set = {
3081 +       .pf             = PF_INET,
3082 +       .set_optmin     = SO_IP_SET,
3083 +       .set_optmax     = SO_IP_SET + 1,
3084 +       .set            = &ip_set_sockfn_set,
3085 +       .get_optmin     = SO_IP_SET,
3086 +       .get_optmax     = SO_IP_SET + 1,
3087 +       .get            = &ip_set_sockfn_get,
3088 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
3089 +       .owner          = THIS_MODULE,
3090 +#endif
3091 +};
3092 +
3093 +static int max_sets, hash_size;
3094 +module_param(max_sets, int, 0600);
3095 +MODULE_PARM_DESC(max_sets, "maximal number of sets");
3096 +module_param(hash_size, int, 0600);
3097 +MODULE_PARM_DESC(hash_size, "hash size for bindings");
3098 +MODULE_LICENSE("GPL");
3099 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3100 +MODULE_DESCRIPTION("module implementing core IP set support");
3101 +
3102 +static int __init ip_set_init(void)
3103 +{
3104 +       int res;
3105 +       ip_set_id_t i;
3106 +
3107 +       get_random_bytes(&ip_set_hash_random, 4);
3108 +       if (max_sets)
3109 +               ip_set_max = max_sets;
3110 +       ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
3111 +       if (!ip_set_list) {
3112 +               printk(KERN_ERR "Unable to create ip_set_list\n");
3113 +               return -ENOMEM;
3114 +       }
3115 +       memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
3116 +       if (hash_size)
3117 +               ip_set_bindings_hash_size = hash_size;
3118 +       ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
3119 +       if (!ip_set_hash) {
3120 +               printk(KERN_ERR "Unable to create ip_set_hash\n");
3121 +               vfree(ip_set_list);
3122 +               return -ENOMEM;
3123 +       }
3124 +       for (i = 0; i < ip_set_bindings_hash_size; i++)
3125 +               INIT_LIST_HEAD(&ip_set_hash[i]);
3126 +
3127 +       INIT_LIST_HEAD(&set_type_list);
3128 +
3129 +       res = nf_register_sockopt(&so_set);
3130 +       if (res != 0) {
3131 +               ip_set_printk("SO_SET registry failed: %d", res);
3132 +               vfree(ip_set_list);
3133 +               vfree(ip_set_hash);
3134 +               return res;
3135 +       }
3136 +       return 0;
3137 +}
3138 +
3139 +static void __exit ip_set_fini(void)
3140 +{
3141 +       /* There can't be any existing set or binding */
3142 +       nf_unregister_sockopt(&so_set);
3143 +       vfree(ip_set_list);
3144 +       vfree(ip_set_hash);
3145 +       DP("these are the famous last words");
3146 +}
3147 +
3148 +EXPORT_SYMBOL(ip_set_register_set_type);
3149 +EXPORT_SYMBOL(ip_set_unregister_set_type);
3150 +
3151 +EXPORT_SYMBOL(ip_set_get_byname);
3152 +EXPORT_SYMBOL(ip_set_get_byindex);
3153 +EXPORT_SYMBOL(ip_set_put);
3154 +
3155 +EXPORT_SYMBOL(ip_set_addip_kernel);
3156 +EXPORT_SYMBOL(ip_set_delip_kernel);
3157 +EXPORT_SYMBOL(ip_set_testip_kernel);
3158 +
3159 +module_init(ip_set_init);
3160 +module_exit(ip_set_fini);
3161 --- /dev/null
3162 +++ b/net/ipv4/netfilter/ip_set_iphash.c
3163 @@ -0,0 +1,429 @@
3164 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3165 + *
3166 + * This program is free software; you can redistribute it and/or modify
3167 + * it under the terms of the GNU General Public License version 2 as
3168 + * published by the Free Software Foundation.
3169 + */
3170 +
3171 +/* Kernel module implementing an ip hash set */
3172 +
3173 +#include <linux/module.h>
3174 +#include <linux/ip.h>
3175 +#include <linux/skbuff.h>
3176 +#include <linux/version.h>
3177 +#include <linux/jhash.h>
3178 +#include <linux/netfilter_ipv4/ip_tables.h>
3179 +#include <linux/netfilter_ipv4/ip_set.h>
3180 +#include <linux/errno.h>
3181 +#include <asm/uaccess.h>
3182 +#include <asm/bitops.h>
3183 +#include <linux/spinlock.h>
3184 +#include <linux/vmalloc.h>
3185 +#include <linux/random.h>
3186 +
3187 +#include <net/ip.h>
3188 +
3189 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3190 +#include <linux/netfilter_ipv4/ip_set_iphash.h>
3191 +
3192 +static int limit = MAX_RANGE;
3193 +
3194 +static inline __u32
3195 +jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
3196 +{
3197 +       return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
3198 +}
3199 +
3200 +static inline __u32
3201 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3202 +{
3203 +       struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3204 +       __u32 id;
3205 +       u_int16_t i;
3206 +       ip_set_ip_t *elem;
3207 +
3208 +       *hash_ip = ip & map->netmask;
3209 +       DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
3210 +          set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
3211 +
3212 +       for (i = 0; i < map->probes; i++) {
3213 +               id = jhash_ip(map, i, *hash_ip) % map->hashsize;
3214 +               DP("hash key: %u", id);
3215 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3216 +               if (*elem == *hash_ip)
3217 +                       return id;
3218 +               /* No shortcut at testing - there can be deleted
3219 +                * entries. */
3220 +       }
3221 +       return UINT_MAX;
3222 +}
3223 +
3224 +static inline int
3225 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3226 +{
3227 +       return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
3228 +}
3229 +
3230 +static int
3231 +testip(struct ip_set *set, const void *data, size_t size,
3232 +       ip_set_ip_t *hash_ip)
3233 +{
3234 +       struct ip_set_req_iphash *req =
3235 +           (struct ip_set_req_iphash *) data;
3236 +
3237 +       if (size != sizeof(struct ip_set_req_iphash)) {
3238 +               ip_set_printk("data length wrong (want %zu, have %zu)",
3239 +                             sizeof(struct ip_set_req_iphash),
3240 +                             size);
3241 +               return -EINVAL;
3242 +       }
3243 +       return __testip(set, req->ip, hash_ip);
3244 +}
3245 +
3246 +static int
3247 +testip_kernel(struct ip_set *set,
3248 +             const struct sk_buff *skb,
3249 +             ip_set_ip_t *hash_ip,
3250 +             const u_int32_t *flags,
3251 +             unsigned char index)
3252 +{
3253 +       return __testip(set,
3254 +                       ntohl(flags[index] & IPSET_SRC
3255 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3256 +                               ? ip_hdr(skb)->saddr
3257 +                               : ip_hdr(skb)->daddr),
3258 +#else
3259 +                               ? skb->nh.iph->saddr
3260 +                               : skb->nh.iph->daddr),
3261 +#endif
3262 +                       hash_ip);
3263 +}
3264 +
3265 +static inline int
3266 +__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3267 +{
3268 +       __u32 probe;
3269 +       u_int16_t i;
3270 +       ip_set_ip_t *elem;
3271 +
3272 +       if (!ip || map->elements >= limit)
3273 +               return -ERANGE;
3274 +
3275 +       *hash_ip = ip & map->netmask;
3276 +
3277 +       for (i = 0; i < map->probes; i++) {
3278 +               probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
3279 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
3280 +               if (*elem == *hash_ip)
3281 +                       return -EEXIST;
3282 +               if (!*elem) {
3283 +                       *elem = *hash_ip;
3284 +                       map->elements++;
3285 +                       return 0;
3286 +               }
3287 +       }
3288 +       /* Trigger rehashing */
3289 +       return -EAGAIN;
3290 +}
3291 +
3292 +static int
3293 +addip(struct ip_set *set, const void *data, size_t size,
3294 +        ip_set_ip_t *hash_ip)
3295 +{
3296 +       struct ip_set_req_iphash *req =
3297 +           (struct ip_set_req_iphash *) data;
3298 +
3299 +       if (size != sizeof(struct ip_set_req_iphash)) {
3300 +               ip_set_printk("data length wrong (want %zu, have %zu)",
3301 +                             sizeof(struct ip_set_req_iphash),
3302 +                             size);
3303 +               return -EINVAL;
3304 +       }
3305 +       return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
3306 +}
3307 +
3308 +static int
3309 +addip_kernel(struct ip_set *set,
3310 +            const struct sk_buff *skb,
3311 +            ip_set_ip_t *hash_ip,
3312 +            const u_int32_t *flags,
3313 +            unsigned char index)
3314 +{
3315 +       return __addip((struct ip_set_iphash *) set->data,
3316 +                      ntohl(flags[index] & IPSET_SRC
3317 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3318 +                               ? ip_hdr(skb)->saddr
3319 +                               : ip_hdr(skb)->daddr),
3320 +#else
3321 +                               ? skb->nh.iph->saddr
3322 +                               : skb->nh.iph->daddr),
3323 +#endif
3324 +                      hash_ip);
3325 +}
3326 +
3327 +static int retry(struct ip_set *set)
3328 +{
3329 +       struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3330 +       ip_set_ip_t hash_ip, *elem;
3331 +       void *members;
3332 +       u_int32_t i, hashsize = map->hashsize;
3333 +       int res;
3334 +       struct ip_set_iphash *tmp;
3335 +
3336 +       if (map->resize == 0)
3337 +               return -ERANGE;
3338 +
3339 +    again:
3340 +       res = 0;
3341 +
3342 +       /* Calculate new hash size */
3343 +       hashsize += (hashsize * map->resize)/100;
3344 +       if (hashsize == map->hashsize)
3345 +               hashsize++;
3346 +
3347 +       ip_set_printk("rehashing of set %s triggered: "
3348 +                     "hashsize grows from %u to %u",
3349 +                     set->name, map->hashsize, hashsize);
3350 +
3351 +       tmp = kmalloc(sizeof(struct ip_set_iphash)
3352 +                     + map->probes * sizeof(uint32_t), GFP_ATOMIC);
3353 +       if (!tmp) {
3354 +               DP("out of memory for %d bytes",
3355 +                  sizeof(struct ip_set_iphash)
3356 +                  + map->probes * sizeof(uint32_t));
3357 +               return -ENOMEM;
3358 +       }
3359 +       tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
3360 +       if (!tmp->members) {
3361 +               DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
3362 +               kfree(tmp);
3363 +               return -ENOMEM;
3364 +       }
3365 +       tmp->hashsize = hashsize;
3366 +       tmp->elements = 0;
3367 +       tmp->probes = map->probes;
3368 +       tmp->resize = map->resize;
3369 +       tmp->netmask = map->netmask;
3370 +       memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
3371 +
3372 +       write_lock_bh(&set->lock);
3373 +       map = (struct ip_set_iphash *) set->data; /* Play safe */
3374 +       for (i = 0; i < map->hashsize && res == 0; i++) {
3375 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3376 +               if (*elem)
3377 +                       res = __addip(tmp, *elem, &hash_ip);
3378 +       }
3379 +       if (res) {
3380 +               /* Failure, try again */
3381 +               write_unlock_bh(&set->lock);
3382 +               harray_free(tmp->members);
3383 +               kfree(tmp);
3384 +               goto again;
3385 +       }
3386 +
3387 +       /* Success at resizing! */
3388 +       members = map->members;
3389 +
3390 +       map->hashsize = tmp->hashsize;
3391 +       map->members = tmp->members;
3392 +       write_unlock_bh(&set->lock);
3393 +
3394 +       harray_free(members);
3395 +       kfree(tmp);
3396 +
3397 +       return 0;
3398 +}
3399 +
3400 +static inline int
3401 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3402 +{
3403 +       struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3404 +       ip_set_ip_t id, *elem;
3405 +
3406 +       if (!ip)
3407 +               return -ERANGE;
3408 +
3409 +       id = hash_id(set, ip, hash_ip);
3410 +       if (id == UINT_MAX)
3411 +               return -EEXIST;
3412 +
3413 +       elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3414 +       *elem = 0;
3415 +       map->elements--;
3416 +
3417 +       return 0;
3418 +}
3419 +
3420 +static int
3421 +delip(struct ip_set *set, const void *data, size_t size,
3422 +        ip_set_ip_t *hash_ip)
3423 +{
3424 +       struct ip_set_req_iphash *req =
3425 +           (struct ip_set_req_iphash *) data;
3426 +
3427 +       if (size != sizeof(struct ip_set_req_iphash)) {
3428 +               ip_set_printk("data length wrong (want %zu, have %zu)",
3429 +                             sizeof(struct ip_set_req_iphash),
3430 +                             size);
3431 +               return -EINVAL;
3432 +       }
3433 +       return __delip(set, req->ip, hash_ip);
3434 +}
3435 +
3436 +static int
3437 +delip_kernel(struct ip_set *set,
3438 +            const struct sk_buff *skb,
3439 +            ip_set_ip_t *hash_ip,
3440 +            const u_int32_t *flags,
3441 +            unsigned char index)
3442 +{
3443 +       return __delip(set,
3444 +                      ntohl(flags[index] & IPSET_SRC
3445 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3446 +                               ? ip_hdr(skb)->saddr
3447 +                               : ip_hdr(skb)->daddr),
3448 +#else
3449 +                               ? skb->nh.iph->saddr
3450 +                               : skb->nh.iph->daddr),
3451 +#endif
3452 +                      hash_ip);
3453 +}
3454 +
3455 +static int create(struct ip_set *set, const void *data, size_t size)
3456 +{
3457 +       struct ip_set_req_iphash_create *req =
3458 +           (struct ip_set_req_iphash_create *) data;
3459 +       struct ip_set_iphash *map;
3460 +       uint16_t i;
3461 +
3462 +       if (size != sizeof(struct ip_set_req_iphash_create)) {
3463 +               ip_set_printk("data length wrong (want %zu, have %zu)",
3464 +                              sizeof(struct ip_set_req_iphash_create),
3465 +                              size);
3466 +               return -EINVAL;
3467 +       }
3468 +
3469 +       if (req->hashsize < 1) {
3470 +               ip_set_printk("hashsize too small");
3471 +               return -ENOEXEC;
3472 +       }
3473 +
3474 +       if (req->probes < 1) {
3475 +               ip_set_printk("probes too small");
3476 +               return -ENOEXEC;
3477 +       }
3478 +
3479 +       map = kmalloc(sizeof(struct ip_set_iphash)
3480 +                     + req->probes * sizeof(uint32_t), GFP_KERNEL);
3481 +       if (!map) {
3482 +               DP("out of memory for %d bytes",
3483 +                  sizeof(struct ip_set_iphash)
3484 +                  + req->probes * sizeof(uint32_t));
3485 +               return -ENOMEM;
3486 +       }
3487 +       for (i = 0; i < req->probes; i++)
3488 +               get_random_bytes(((uint32_t *) map->initval)+i, 4);
3489 +       map->elements = 0;
3490 +       map->hashsize = req->hashsize;
3491 +       map->probes = req->probes;
3492 +       map->resize = req->resize;
3493 +       map->netmask = req->netmask;
3494 +       map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
3495 +       if (!map->members) {
3496 +               DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
3497 +               kfree(map);
3498 +               return -ENOMEM;
3499 +       }
3500 +
3501 +       set->data = map;
3502 +       return 0;
3503 +}
3504 +
3505 +static void destroy(struct ip_set *set)
3506 +{
3507 +       struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3508 +
3509 +       harray_free(map->members);
3510 +       kfree(map);
3511 +
3512 +       set->data = NULL;
3513 +}
3514 +
3515 +static void flush(struct ip_set *set)
3516 +{
3517 +       struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3518 +       harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
3519 +       map->elements = 0;
3520 +}
3521 +
3522 +static void list_header(const struct ip_set *set, void *data)
3523 +{
3524 +       struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3525 +       struct ip_set_req_iphash_create *header =
3526 +           (struct ip_set_req_iphash_create *) data;
3527 +
3528 +       header->hashsize = map->hashsize;
3529 +       header->probes = map->probes;
3530 +       header->resize = map->resize;
3531 +       header->netmask = map->netmask;
3532 +}
3533 +
3534 +static int list_members_size(const struct ip_set *set)
3535 +{
3536 +       struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3537 +
3538 +       return (map->hashsize * sizeof(ip_set_ip_t));
3539 +}
3540 +
3541 +static void list_members(const struct ip_set *set, void *data)
3542 +{
3543 +       struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3544 +       ip_set_ip_t i, *elem;
3545 +
3546 +       for (i = 0; i < map->hashsize; i++) {
3547 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3548 +               ((ip_set_ip_t *)data)[i] = *elem;
3549 +       }
3550 +}
3551 +
3552 +static struct ip_set_type ip_set_iphash = {
3553 +       .typename               = SETTYPE_NAME,
3554 +       .features               = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3555 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,
3556 +       .create                 = &create,
3557 +       .destroy                = &destroy,
3558 +       .flush                  = &flush,
3559 +       .reqsize                = sizeof(struct ip_set_req_iphash),
3560 +       .addip                  = &addip,
3561 +       .addip_kernel           = &addip_kernel,
3562 +       .retry                  = &retry,
3563 +       .delip                  = &delip,
3564 +       .delip_kernel           = &delip_kernel,
3565 +       .testip                 = &testip,
3566 +       .testip_kernel          = &testip_kernel,
3567 +       .header_size            = sizeof(struct ip_set_req_iphash_create),
3568 +       .list_header            = &list_header,
3569 +       .list_members_size      = &list_members_size,
3570 +       .list_members           = &list_members,
3571 +       .me                     = THIS_MODULE,
3572 +};
3573 +
3574 +MODULE_LICENSE("GPL");
3575 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3576 +MODULE_DESCRIPTION("iphash type of IP sets");
3577 +module_param(limit, int, 0600);
3578 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
3579 +
3580 +static int __init ip_set_iphash_init(void)
3581 +{
3582 +       return ip_set_register_set_type(&ip_set_iphash);
3583 +}
3584 +
3585 +static void __exit ip_set_iphash_fini(void)
3586 +{
3587 +       /* FIXME: possible race with ip_set_create() */
3588 +       ip_set_unregister_set_type(&ip_set_iphash);
3589 +}
3590 +
3591 +module_init(ip_set_iphash_init);
3592 +module_exit(ip_set_iphash_fini);
3593 --- /dev/null
3594 +++ b/net/ipv4/netfilter/ip_set_ipmap.c
3595 @@ -0,0 +1,336 @@
3596 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3597 + *                         Patrick Schaaf <bof@bof.de>
3598 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3599 + *
3600 + * This program is free software; you can redistribute it and/or modify
3601 + * it under the terms of the GNU General Public License version 2 as
3602 + * published by the Free Software Foundation.
3603 + */
3604 +
3605 +/* Kernel module implementing an IP set type: the single bitmap type */
3606 +
3607 +#include <linux/module.h>
3608 +#include <linux/ip.h>
3609 +#include <linux/skbuff.h>
3610 +#include <linux/version.h>
3611 +#include <linux/netfilter_ipv4/ip_tables.h>
3612 +#include <linux/netfilter_ipv4/ip_set.h>
3613 +#include <linux/errno.h>
3614 +#include <asm/uaccess.h>
3615 +#include <asm/bitops.h>
3616 +#include <linux/spinlock.h>
3617 +
3618 +#include <linux/netfilter_ipv4/ip_set_ipmap.h>
3619 +
3620 +static inline ip_set_ip_t
3621 +ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
3622 +{
3623 +       return (ip - map->first_ip)/map->hosts;
3624 +}
3625 +
3626 +static inline int
3627 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3628 +{
3629 +       struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3630 +
3631 +       if (ip < map->first_ip || ip > map->last_ip)
3632 +               return -ERANGE;
3633 +
3634 +       *hash_ip = ip & map->netmask;
3635 +       DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
3636 +          set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
3637 +       return !!test_bit(ip_to_id(map, *hash_ip), map->members);
3638 +}
3639 +
3640 +static int
3641 +testip(struct ip_set *set, const void *data, size_t size,
3642 +       ip_set_ip_t *hash_ip)
3643 +{
3644 +       struct ip_set_req_ipmap *req =
3645 +           (struct ip_set_req_ipmap *) data;
3646 +
3647 +       if (size != sizeof(struct ip_set_req_ipmap)) {
3648 +               ip_set_printk("data length wrong (want %zu, have %zu)",
3649 +                             sizeof(struct ip_set_req_ipmap),
3650 +                             size);
3651 +               return -EINVAL;
3652 +       }
3653 +       return __testip(set, req->ip, hash_ip);
3654 +}
3655 +
3656 +static int
3657 +testip_kernel(struct ip_set *set,
3658 +             const struct sk_buff *skb,
3659 +             ip_set_ip_t *hash_ip,
3660 +             const u_int32_t *flags,
3661 +             unsigned char index)
3662 +{
3663 +       int res =  __testip(set,
3664 +                       ntohl(flags[index] & IPSET_SRC
3665 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3666 +                               ? ip_hdr(skb)->saddr
3667 +                               : ip_hdr(skb)->daddr),
3668 +#else
3669 +                               ? skb->nh.iph->saddr
3670 +                               : skb->nh.iph->daddr),
3671 +#endif
3672 +                       hash_ip);
3673 +       return (res < 0 ? 0 : res);
3674 +}
3675 +
3676 +static inline int
3677 +__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3678 +{
3679 +       struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3680 +
3681 +       if (ip < map->first_ip || ip > map->last_ip)
3682 +               return -ERANGE;
3683 +
3684 +       *hash_ip = ip & map->netmask;
3685 +       DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3686 +       if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
3687 +               return -EEXIST;
3688 +
3689 +       return 0;
3690 +}
3691 +
3692 +static int
3693 +addip(struct ip_set *set, const void *data, size_t size,
3694 +      ip_set_ip_t *hash_ip)
3695 +{
3696 +       struct ip_set_req_ipmap *req =
3697 +           (struct ip_set_req_ipmap *) data;
3698 +
3699 +       if (size != sizeof(struct ip_set_req_ipmap)) {
3700 +               ip_set_printk("data length wrong (want %zu, have %zu)",
3701 +                             sizeof(struct ip_set_req_ipmap),
3702 +                             size);
3703 +               return -EINVAL;
3704 +       }
3705 +       DP("%u.%u.%u.%u", HIPQUAD(req->ip));
3706 +       return __addip(set, req->ip, hash_ip);
3707 +}
3708 +
3709 +static int
3710 +addip_kernel(struct ip_set *set,
3711 +            const struct sk_buff *skb,
3712 +            ip_set_ip_t *hash_ip,
3713 +            const u_int32_t *flags,
3714 +            unsigned char index)
3715 +{
3716 +       return __addip(set,
3717 +                      ntohl(flags[index] & IPSET_SRC
3718 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3719 +                               ? ip_hdr(skb)->saddr
3720 +                               : ip_hdr(skb)->daddr),
3721 +#else
3722 +                               ? skb->nh.iph->saddr
3723 +                               : skb->nh.iph->daddr),
3724 +#endif
3725 +                      hash_ip);
3726 +}
3727 +
3728 +static inline int
3729 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3730 +{
3731 +       struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3732 +
3733 +       if (ip < map->first_ip || ip > map->last_ip)
3734 +               return -ERANGE;
3735 +
3736 +       *hash_ip = ip & map->netmask;
3737 +       DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3738 +       if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
3739 +               return -EEXIST;
3740 +
3741 +       return 0;
3742 +}
3743 +
3744 +static int
3745 +delip(struct ip_set *set, const void *data, size_t size,
3746 +      ip_set_ip_t *hash_ip)
3747 +{
3748 +       struct ip_set_req_ipmap *req =
3749 +           (struct ip_set_req_ipmap *) data;
3750 +
3751 +       if (size != sizeof(struct ip_set_req_ipmap)) {
3752 +               ip_set_printk("data length wrong (want %zu, have %zu)",
3753 +                             sizeof(struct ip_set_req_ipmap),
3754 +                             size);
3755 +               return -EINVAL;
3756 +       }
3757 +       return __delip(set, req->ip, hash_ip);
3758 +}
3759 +
3760 +static int
3761 +delip_kernel(struct ip_set *set,
3762 +            const struct sk_buff *skb,
3763 +            ip_set_ip_t *hash_ip,
3764 +            const u_int32_t *flags,
3765 +            unsigned char index)
3766 +{
3767 +       return __delip(set,
3768 +                      ntohl(flags[index] & IPSET_SRC
3769 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3770 +                               ? ip_hdr(skb)->saddr
3771 +                               : ip_hdr(skb)->daddr),
3772 +#else
3773 +                               ? skb->nh.iph->saddr
3774 +                               : skb->nh.iph->daddr),
3775 +#endif
3776 +                      hash_ip);
3777 +}
3778 +
3779 +static int create(struct ip_set *set, const void *data, size_t size)
3780 +{
3781 +       int newbytes;
3782 +       struct ip_set_req_ipmap_create *req =
3783 +           (struct ip_set_req_ipmap_create *) data;
3784 +       struct ip_set_ipmap *map;
3785 +
3786 +       if (size != sizeof(struct ip_set_req_ipmap_create)) {
3787 +               ip_set_printk("data length wrong (want %zu, have %zu)",
3788 +                             sizeof(struct ip_set_req_ipmap_create),
3789 +                             size);
3790 +               return -EINVAL;
3791 +       }
3792 +
3793 +       DP("from %u.%u.%u.%u to %u.%u.%u.%u",
3794 +          HIPQUAD(req->from), HIPQUAD(req->to));
3795 +
3796 +       if (req->from > req->to) {
3797 +               DP("bad ip range");
3798 +               return -ENOEXEC;
3799 +       }
3800 +
3801 +       map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
3802 +       if (!map) {
3803 +               DP("out of memory for %d bytes",
3804 +                  sizeof(struct ip_set_ipmap));
3805 +               return -ENOMEM;
3806 +       }
3807 +       map->first_ip = req->from;
3808 +       map->last_ip = req->to;
3809 +       map->netmask = req->netmask;
3810 +
3811 +       if (req->netmask == 0xFFFFFFFF) {
3812 +               map->hosts = 1;
3813 +               map->sizeid = map->last_ip - map->first_ip + 1;
3814 +       } else {
3815 +               unsigned int mask_bits, netmask_bits;
3816 +               ip_set_ip_t mask;
3817 +
3818 +               map->first_ip &= map->netmask;  /* Should we better bark? */
3819 +
3820 +               mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
3821 +               netmask_bits = mask_to_bits(map->netmask);
3822 +
3823 +               if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
3824 +                   || netmask_bits <= mask_bits)
3825 +                       return -ENOEXEC;
3826 +
3827 +               DP("mask_bits %u, netmask_bits %u",
3828 +                  mask_bits, netmask_bits);
3829 +               map->hosts = 2 << (32 - netmask_bits - 1);
3830 +               map->sizeid = 2 << (netmask_bits - mask_bits - 1);
3831 +       }
3832 +       if (map->sizeid > MAX_RANGE + 1) {
3833 +               ip_set_printk("range too big (max %d addresses)",
3834 +                              MAX_RANGE+1);
3835 +               kfree(map);
3836 +               return -ENOEXEC;
3837 +       }
3838 +       DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
3839 +       newbytes = bitmap_bytes(0, map->sizeid - 1);
3840 +       map->members = kmalloc(newbytes, GFP_KERNEL);
3841 +       if (!map->members) {
3842 +               DP("out of memory for %d bytes", newbytes);
3843 +               kfree(map);
3844 +               return -ENOMEM;
3845 +       }
3846 +       memset(map->members, 0, newbytes);
3847 +
3848 +       set->data = map;
3849 +       return 0;
3850 +}
3851 +
3852 +static void destroy(struct ip_set *set)
3853 +{
3854 +       struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3855 +
3856 +       kfree(map->members);
3857 +       kfree(map);
3858 +
3859 +       set->data = NULL;
3860 +}
3861 +
3862 +static void flush(struct ip_set *set)
3863 +{
3864 +       struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3865 +       memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
3866 +}
3867 +
3868 +static void list_header(const struct ip_set *set, void *data)
3869 +{
3870 +       struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3871 +       struct ip_set_req_ipmap_create *header =
3872 +           (struct ip_set_req_ipmap_create *) data;
3873 +
3874 +       header->from = map->first_ip;
3875 +       header->to = map->last_ip;
3876 +       header->netmask = map->netmask;
3877 +}
3878 +
3879 +static int list_members_size(const struct ip_set *set)
3880 +{
3881 +       struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3882 +
3883 +       return bitmap_bytes(0, map->sizeid - 1);
3884 +}
3885 +
3886 +static void list_members(const struct ip_set *set, void *data)
3887 +{
3888 +       struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3889 +       int bytes = bitmap_bytes(0, map->sizeid - 1);
3890 +
3891 +       memcpy(data, map->members, bytes);
3892 +}
3893 +
3894 +static struct ip_set_type ip_set_ipmap = {
3895 +       .typename               = SETTYPE_NAME,
3896 +       .features               = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3897 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,
3898 +       .create                 = &create,
3899 +       .destroy                = &destroy,
3900 +       .flush                  = &flush,
3901 +       .reqsize                = sizeof(struct ip_set_req_ipmap),
3902 +       .addip                  = &addip,
3903 +       .addip_kernel           = &addip_kernel,
3904 +       .delip                  = &delip,
3905 +       .delip_kernel           = &delip_kernel,
3906 +       .testip                 = &testip,
3907 +       .testip_kernel          = &testip_kernel,
3908 +       .header_size            = sizeof(struct ip_set_req_ipmap_create),
3909 +       .list_header            = &list_header,
3910 +       .list_members_size      = &list_members_size,
3911 +       .list_members           = &list_members,
3912 +       .me                     = THIS_MODULE,
3913 +};
3914 +
3915 +MODULE_LICENSE("GPL");
3916 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3917 +MODULE_DESCRIPTION("ipmap type of IP sets");
3918 +
3919 +static int __init ip_set_ipmap_init(void)
3920 +{
3921 +       return ip_set_register_set_type(&ip_set_ipmap);
3922 +}
3923 +
3924 +static void __exit ip_set_ipmap_fini(void)
3925 +{
3926 +       /* FIXME: possible race with ip_set_create() */
3927 +       ip_set_unregister_set_type(&ip_set_ipmap);
3928 +}
3929 +
3930 +module_init(ip_set_ipmap_init);
3931 +module_exit(ip_set_ipmap_fini);
3932 --- /dev/null
3933 +++ b/net/ipv4/netfilter/ip_set_ipporthash.c
3934 @@ -0,0 +1,581 @@
3935 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3936 + *
3937 + * This program is free software; you can redistribute it and/or modify
3938 + * it under the terms of the GNU General Public License version 2 as
3939 + * published by the Free Software Foundation.
3940 + */
3941 +
3942 +/* Kernel module implementing an ip+port hash set */
3943 +
3944 +#include <linux/module.h>
3945 +#include <linux/ip.h>
3946 +#include <linux/tcp.h>
3947 +#include <linux/udp.h>
3948 +#include <linux/skbuff.h>
3949 +#include <linux/version.h>
3950 +#include <linux/jhash.h>
3951 +#include <linux/netfilter_ipv4/ip_tables.h>
3952 +#include <linux/netfilter_ipv4/ip_set.h>
3953 +#include <linux/errno.h>
3954 +#include <asm/uaccess.h>
3955 +#include <asm/bitops.h>
3956 +#include <linux/spinlock.h>
3957 +#include <linux/vmalloc.h>
3958 +#include <linux/random.h>
3959 +
3960 +#include <net/ip.h>
3961 +
3962 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3963 +#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
3964 +
3965 +static int limit = MAX_RANGE;
3966 +
3967 +/* We must handle non-linear skbs */
3968 +static inline ip_set_ip_t
3969 +get_port(const struct sk_buff *skb, u_int32_t flags)
3970 +{
3971 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3972 +       struct iphdr *iph = ip_hdr(skb);
3973 +#else
3974 +       struct iphdr *iph = skb->nh.iph;
3975 +#endif
3976 +       u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
3977 +
3978 +       switch (iph->protocol) {
3979 +       case IPPROTO_TCP: {
3980 +               struct tcphdr tcph;
3981 +
3982 +               /* See comments at tcp_match in ip_tables.c */
3983 +               if (offset)
3984 +                       return INVALID_PORT;
3985 +
3986 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3987 +               if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
3988 +#else
3989 +               if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
3990 +#endif
3991 +                       /* No choice either */
3992 +                       return INVALID_PORT;
3993 +
3994 +               return ntohs(flags & IPSET_SRC ?
3995 +                            tcph.source : tcph.dest);
3996 +           }
3997 +       case IPPROTO_UDP: {
3998 +               struct udphdr udph;
3999 +
4000 +               if (offset)
4001 +                       return INVALID_PORT;
4002 +
4003 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4004 +               if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
4005 +#else
4006 +               if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
4007 +#endif
4008 +                       /* No choice either */
4009 +                       return INVALID_PORT;
4010 +
4011 +               return ntohs(flags & IPSET_SRC ?
4012 +                            udph.source : udph.dest);
4013 +           }
4014 +       default:
4015 +               return INVALID_PORT;
4016 +       }
4017 +}
4018 +
4019 +static inline __u32
4020 +jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
4021 +{
4022 +       return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
4023 +}
4024 +
4025 +#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
4026 +
4027 +static inline __u32
4028 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4029 +       ip_set_ip_t *hash_ip)
4030 +{
4031 +       struct ip_set_ipporthash *map =
4032 +               (struct ip_set_ipporthash *) set->data;
4033 +       __u32 id;
4034 +       u_int16_t i;
4035 +       ip_set_ip_t *elem;
4036 +
4037 +       *hash_ip = HASH_IP(map, ip, port);
4038 +       DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4039 +          set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4040 +
4041 +       for (i = 0; i < map->probes; i++) {
4042 +               id = jhash_ip(map, i, *hash_ip) % map->hashsize;
4043 +               DP("hash key: %u", id);
4044 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4045 +               if (*elem == *hash_ip)
4046 +                       return id;
4047 +               /* No shortcut at testing - there can be deleted
4048 +                * entries. */
4049 +       }
4050 +       return UINT_MAX;
4051 +}
4052 +
4053 +static inline int
4054 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4055 +        ip_set_ip_t *hash_ip)
4056 +{
4057 +       struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4058 +
4059 +       if (ip < map->first_ip || ip > map->last_ip)
4060 +               return -ERANGE;
4061 +
4062 +       return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
4063 +}
4064 +
4065 +static int
4066 +testip(struct ip_set *set, const void *data, size_t size,
4067 +       ip_set_ip_t *hash_ip)
4068 +{
4069 +       struct ip_set_req_ipporthash *req =
4070 +           (struct ip_set_req_ipporthash *) data;
4071 +
4072 +       if (size != sizeof(struct ip_set_req_ipporthash)) {
4073 +               ip_set_printk("data length wrong (want %zu, have %zu)",
4074 +                             sizeof(struct ip_set_req_ipporthash),
4075 +                             size);
4076 +               return -EINVAL;
4077 +       }
4078 +       return __testip(set, req->ip, req->port, hash_ip);
4079 +}
4080 +
4081 +static int
4082 +testip_kernel(struct ip_set *set,
4083 +             const struct sk_buff *skb,
4084 +             ip_set_ip_t *hash_ip,
4085 +             const u_int32_t *flags,
4086 +             unsigned char index)
4087 +{
4088 +       ip_set_ip_t port;
4089 +       int res;
4090 +
4091 +       if (flags[index+1] == 0)
4092 +               return 0;
4093 +
4094 +       port = get_port(skb, flags[index+1]);
4095 +
4096 +       DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4097 +          flags[index] & IPSET_SRC ? "SRC" : "DST",
4098 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4099 +          NIPQUAD(ip_hdr(skb)->saddr),
4100 +          NIPQUAD(ip_hdr(skb)->daddr));
4101 +#else
4102 +          NIPQUAD(skb->nh.iph->saddr),
4103 +          NIPQUAD(skb->nh.iph->daddr));
4104 +#endif
4105 +       DP("flag %s port %u",
4106 +          flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4107 +          port);
4108 +       if (port == INVALID_PORT)
4109 +               return 0;
4110 +
4111 +       res =  __testip(set,
4112 +                       ntohl(flags[index] & IPSET_SRC
4113 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4114 +                                       ? ip_hdr(skb)->saddr
4115 +                                       : ip_hdr(skb)->daddr),
4116 +#else
4117 +                                       ? skb->nh.iph->saddr
4118 +                                       : skb->nh.iph->daddr),
4119 +#endif
4120 +                       port,
4121 +                       hash_ip);
4122 +       return (res < 0 ? 0 : res);
4123 +
4124 +}
4125 +
4126 +static inline int
4127 +__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
4128 +{
4129 +       __u32 probe;
4130 +       u_int16_t i;
4131 +       ip_set_ip_t *elem;
4132 +
4133 +       for (i = 0; i < map->probes; i++) {
4134 +               probe = jhash_ip(map, i, hash_ip) % map->hashsize;
4135 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4136 +               if (*elem == hash_ip)
4137 +                       return -EEXIST;
4138 +               if (!*elem) {
4139 +                       *elem = hash_ip;
4140 +                       map->elements++;
4141 +                       return 0;
4142 +               }
4143 +       }
4144 +       /* Trigger rehashing */
4145 +       return -EAGAIN;
4146 +}
4147 +
4148 +static inline int
4149 +__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
4150 +       ip_set_ip_t *hash_ip)
4151 +{
4152 +       if (map->elements > limit)
4153 +               return -ERANGE;
4154 +       if (ip < map->first_ip || ip > map->last_ip)
4155 +               return -ERANGE;
4156 +
4157 +       *hash_ip = HASH_IP(map, ip, port);
4158 +
4159 +       return __add_haship(map, *hash_ip);
4160 +}
4161 +
4162 +static int
4163 +addip(struct ip_set *set, const void *data, size_t size,
4164 +        ip_set_ip_t *hash_ip)
4165 +{
4166 +       struct ip_set_req_ipporthash *req =
4167 +           (struct ip_set_req_ipporthash *) data;
4168 +
4169 +       if (size != sizeof(struct ip_set_req_ipporthash)) {
4170 +               ip_set_printk("data length wrong (want %zu, have %zu)",
4171 +                             sizeof(struct ip_set_req_ipporthash),
4172 +                             size);
4173 +               return -EINVAL;
4174 +       }
4175 +       return __addip((struct ip_set_ipporthash *) set->data,
4176 +                       req->ip, req->port, hash_ip);
4177 +}
4178 +
4179 +static int
4180 +addip_kernel(struct ip_set *set,
4181 +            const struct sk_buff *skb,
4182 +            ip_set_ip_t *hash_ip,
4183 +            const u_int32_t *flags,
4184 +            unsigned char index)
4185 +{
4186 +       ip_set_ip_t port;
4187 +
4188 +       if (flags[index+1] == 0)
4189 +               return -EINVAL;
4190 +
4191 +       port = get_port(skb, flags[index+1]);
4192 +
4193 +       DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4194 +          flags[index] & IPSET_SRC ? "SRC" : "DST",
4195 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4196 +          NIPQUAD(ip_hdr(skb)->saddr),
4197 +          NIPQUAD(ip_hdr(skb)->daddr));
4198 +#else
4199 +          NIPQUAD(skb->nh.iph->saddr),
4200 +          NIPQUAD(skb->nh.iph->daddr));
4201 +#endif
4202 +       DP("flag %s port %u",
4203 +          flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4204 +          port);
4205 +       if (port == INVALID_PORT)
4206 +               return -EINVAL;
4207 +
4208 +       return __addip((struct ip_set_ipporthash *) set->data,
4209 +                      ntohl(flags[index] & IPSET_SRC
4210 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4211 +                               ? ip_hdr(skb)->saddr
4212 +                               : ip_hdr(skb)->daddr),
4213 +#else
4214 +                               ? skb->nh.iph->saddr
4215 +                               : skb->nh.iph->daddr),
4216 +#endif
4217 +                      port,
4218 +                      hash_ip);
4219 +}
4220 +
4221 +static int retry(struct ip_set *set)
4222 +{
4223 +       struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4224 +       ip_set_ip_t *elem;
4225 +       void *members;
4226 +       u_int32_t i, hashsize = map->hashsize;
4227 +       int res;
4228 +       struct ip_set_ipporthash *tmp;
4229 +
4230 +       if (map->resize == 0)
4231 +               return -ERANGE;
4232 +
4233 +    again:
4234 +       res = 0;
4235 +
4236 +       /* Calculate new hash size */
4237 +       hashsize += (hashsize * map->resize)/100;
4238 +       if (hashsize == map->hashsize)
4239 +               hashsize++;
4240 +
4241 +       ip_set_printk("rehashing of set %s triggered: "
4242 +                     "hashsize grows from %u to %u",
4243 +                     set->name, map->hashsize, hashsize);
4244 +
4245 +       tmp = kmalloc(sizeof(struct ip_set_ipporthash)
4246 +                     + map->probes * sizeof(uint32_t), GFP_ATOMIC);
4247 +       if (!tmp) {
4248 +               DP("out of memory for %d bytes",
4249 +                  sizeof(struct ip_set_ipporthash)
4250 +                  + map->probes * sizeof(uint32_t));
4251 +               return -ENOMEM;
4252 +       }
4253 +       tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
4254 +       if (!tmp->members) {
4255 +               DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
4256 +               kfree(tmp);
4257 +               return -ENOMEM;
4258 +       }
4259 +       tmp->hashsize = hashsize;
4260 +       tmp->elements = 0;
4261 +       tmp->probes = map->probes;
4262 +       tmp->resize = map->resize;
4263 +       tmp->first_ip = map->first_ip;
4264 +       tmp->last_ip = map->last_ip;
4265 +       memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
4266 +
4267 +       write_lock_bh(&set->lock);
4268 +       map = (struct ip_set_ipporthash *) set->data; /* Play safe */
4269 +       for (i = 0; i < map->hashsize && res == 0; i++) {
4270 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4271 +               if (*elem)
4272 +                       res = __add_haship(tmp, *elem);
4273 +       }
4274 +       if (res) {
4275 +               /* Failure, try again */
4276 +               write_unlock_bh(&set->lock);
4277 +               harray_free(tmp->members);
4278 +               kfree(tmp);
4279 +               goto again;
4280 +       }
4281 +
4282 +       /* Success at resizing! */
4283 +       members = map->members;
4284 +
4285 +       map->hashsize = tmp->hashsize;
4286 +       map->members = tmp->members;
4287 +       write_unlock_bh(&set->lock);
4288 +
4289 +       harray_free(members);
4290 +       kfree(tmp);
4291 +
4292 +       return 0;
4293 +}
4294 +
4295 +static inline int
4296 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4297 +       ip_set_ip_t *hash_ip)
4298 +{
4299 +       struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4300 +       ip_set_ip_t id;
4301 +       ip_set_ip_t *elem;
4302 +
4303 +       if (ip < map->first_ip || ip > map->last_ip)
4304 +               return -ERANGE;
4305 +
4306 +       id = hash_id(set, ip, port, hash_ip);
4307 +
4308 +       if (id == UINT_MAX)
4309 +               return -EEXIST;
4310 +
4311 +       elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4312 +       *elem = 0;
4313 +       map->elements--;
4314 +
4315 +       return 0;
4316 +}
4317 +
4318 +static int
4319 +delip(struct ip_set *set, const void *data, size_t size,
4320 +        ip_set_ip_t *hash_ip)
4321 +{
4322 +       struct ip_set_req_ipporthash *req =
4323 +           (struct ip_set_req_ipporthash *) data;
4324 +
4325 +       if (size != sizeof(struct ip_set_req_ipporthash)) {
4326 +               ip_set_printk("data length wrong (want %zu, have %zu)",
4327 +                             sizeof(struct ip_set_req_ipporthash),
4328 +                             size);
4329 +               return -EINVAL;
4330 +       }
4331 +       return __delip(set, req->ip, req->port, hash_ip);
4332 +}
4333 +
4334 +static int
4335 +delip_kernel(struct ip_set *set,
4336 +            const struct sk_buff *skb,
4337 +            ip_set_ip_t *hash_ip,
4338 +            const u_int32_t *flags,
4339 +            unsigned char index)
4340 +{
4341 +       ip_set_ip_t port;
4342 +
4343 +       if (flags[index+1] == 0)
4344 +               return -EINVAL;
4345 +
4346 +       port = get_port(skb, flags[index+1]);
4347 +
4348 +       DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4349 +          flags[index] & IPSET_SRC ? "SRC" : "DST",
4350 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4351 +          NIPQUAD(ip_hdr(skb)->saddr),
4352 +          NIPQUAD(ip_hdr(skb)->daddr));
4353 +#else
4354 +          NIPQUAD(skb->nh.iph->saddr),
4355 +          NIPQUAD(skb->nh.iph->daddr));
4356 +#endif
4357 +       DP("flag %s port %u",
4358 +          flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4359 +          port);
4360 +       if (port == INVALID_PORT)
4361 +               return -EINVAL;
4362 +
4363 +       return __delip(set,
4364 +                      ntohl(flags[index] & IPSET_SRC
4365 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4366 +                               ? ip_hdr(skb)->saddr
4367 +                               : ip_hdr(skb)->daddr),
4368 +#else
4369 +                               ? skb->nh.iph->saddr
4370 +                               : skb->nh.iph->daddr),
4371 +#endif
4372 +                      port,
4373 +                      hash_ip);
4374 +}
4375 +
4376 +static int create(struct ip_set *set, const void *data, size_t size)
4377 +{
4378 +       struct ip_set_req_ipporthash_create *req =
4379 +           (struct ip_set_req_ipporthash_create *) data;
4380 +       struct ip_set_ipporthash *map;
4381 +       uint16_t i;
4382 +
4383 +       if (size != sizeof(struct ip_set_req_ipporthash_create)) {
4384 +               ip_set_printk("data length wrong (want %zu, have %zu)",
4385 +                              sizeof(struct ip_set_req_ipporthash_create),
4386 +                              size);
4387 +               return -EINVAL;
4388 +       }
4389 +
4390 +       if (req->hashsize < 1) {
4391 +               ip_set_printk("hashsize too small");
4392 +               return -ENOEXEC;
4393 +       }
4394 +
4395 +       if (req->probes < 1) {
4396 +               ip_set_printk("probes too small");
4397 +               return -ENOEXEC;
4398 +       }
4399 +
4400 +       map = kmalloc(sizeof(struct ip_set_ipporthash)
4401 +                     + req->probes * sizeof(uint32_t), GFP_KERNEL);
4402 +       if (!map) {
4403 +               DP("out of memory for %d bytes",
4404 +                  sizeof(struct ip_set_ipporthash)
4405 +                  + req->probes * sizeof(uint32_t));
4406 +               return -ENOMEM;
4407 +       }
4408 +       for (i = 0; i < req->probes; i++)
4409 +               get_random_bytes(((uint32_t *) map->initval)+i, 4);
4410 +       map->elements = 0;
4411 +       map->hashsize = req->hashsize;
4412 +       map->probes = req->probes;
4413 +       map->resize = req->resize;
4414 +       map->first_ip = req->from;
4415 +       map->last_ip = req->to;
4416 +       map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
4417 +       if (!map->members) {
4418 +               DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
4419 +               kfree(map);
4420 +               return -ENOMEM;
4421 +       }
4422 +
4423 +       set->data = map;
4424 +       return 0;
4425 +}
4426 +
4427 +static void destroy(struct ip_set *set)
4428 +{
4429 +       struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4430 +
4431 +       harray_free(map->members);
4432 +       kfree(map);
4433 +
4434 +       set->data = NULL;
4435 +}
4436 +
4437 +static void flush(struct ip_set *set)
4438 +{
4439 +       struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4440 +       harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
4441 +       map->elements = 0;
4442 +}
4443 +
4444 +static void list_header(const struct ip_set *set, void *data)
4445 +{
4446 +       struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4447 +       struct ip_set_req_ipporthash_create *header =
4448 +           (struct ip_set_req_ipporthash_create *) data;
4449 +
4450 +       header->hashsize = map->hashsize;
4451 +       header->probes = map->probes;
4452 +       header->resize = map->resize;
4453 +       header->from = map->first_ip;
4454 +       header->to = map->last_ip;
4455 +}
4456 +
4457 +static int list_members_size(const struct ip_set *set)
4458 +{
4459 +       struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4460 +
4461 +       return (map->hashsize * sizeof(ip_set_ip_t));
4462 +}
4463 +
4464 +static void list_members(const struct ip_set *set, void *data)
4465 +{
4466 +       struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4467 +       ip_set_ip_t i, *elem;
4468 +
4469 +       for (i = 0; i < map->hashsize; i++) {
4470 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4471 +               ((ip_set_ip_t *)data)[i] = *elem;
4472 +       }
4473 +}
4474 +
4475 +static struct ip_set_type ip_set_ipporthash = {
4476 +       .typename               = SETTYPE_NAME,
4477 +       .features               = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
4478 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,
4479 +       .create                 = &create,
4480 +       .destroy                = &destroy,
4481 +       .flush                  = &flush,
4482 +       .reqsize                = sizeof(struct ip_set_req_ipporthash),
4483 +       .addip                  = &addip,
4484 +       .addip_kernel           = &addip_kernel,
4485 +       .retry                  = &retry,
4486 +       .delip                  = &delip,
4487 +       .delip_kernel           = &delip_kernel,
4488 +       .testip                 = &testip,
4489 +       .testip_kernel          = &testip_kernel,
4490 +       .header_size            = sizeof(struct ip_set_req_ipporthash_create),
4491 +       .list_header            = &list_header,
4492 +       .list_members_size      = &list_members_size,
4493 +       .list_members           = &list_members,
4494 +       .me                     = THIS_MODULE,
4495 +};
4496 +
4497 +MODULE_LICENSE("GPL");
4498 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4499 +MODULE_DESCRIPTION("ipporthash type of IP sets");
4500 +module_param(limit, int, 0600);
4501 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4502 +
4503 +static int __init ip_set_ipporthash_init(void)
4504 +{
4505 +       return ip_set_register_set_type(&ip_set_ipporthash);
4506 +}
4507 +
4508 +static void __exit ip_set_ipporthash_fini(void)
4509 +{
4510 +       /* FIXME: possible race with ip_set_create() */
4511 +       ip_set_unregister_set_type(&ip_set_ipporthash);
4512 +}
4513 +
4514 +module_init(ip_set_ipporthash_init);
4515 +module_exit(ip_set_ipporthash_fini);
4516 --- /dev/null
4517 +++ b/net/ipv4/netfilter/ip_set_iptree.c
4518 @@ -0,0 +1,612 @@
4519 +/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4520 + *
4521 + * This program is free software; you can redistribute it and/or modify
4522 + * it under the terms of the GNU General Public License version 2 as
4523 + * published by the Free Software Foundation.
4524 + */
4525 +
4526 +/* Kernel module implementing an IP set type: the iptree type */
4527 +
4528 +#include <linux/version.h>
4529 +#include <linux/module.h>
4530 +#include <linux/ip.h>
4531 +#include <linux/skbuff.h>
4532 +#include <linux/slab.h>
4533 +#include <linux/delay.h>
4534 +#include <linux/netfilter_ipv4/ip_tables.h>
4535 +#include <linux/netfilter_ipv4/ip_set.h>
4536 +#include <linux/errno.h>
4537 +#include <asm/uaccess.h>
4538 +#include <asm/bitops.h>
4539 +#include <linux/spinlock.h>
4540 +
4541 +/* Backward compatibility */
4542 +#ifndef __nocast
4543 +#define __nocast
4544 +#endif
4545 +
4546 +#include <linux/netfilter_ipv4/ip_set_iptree.h>
4547 +
4548 +static int limit = MAX_RANGE;
4549 +
4550 +/* Garbage collection interval in seconds: */
4551 +#define IPTREE_GC_TIME         5*60
4552 +/* Sleep so many milliseconds before trying again
4553 + * to delete the gc timer at destroying/flushing a set */
4554 +#define IPTREE_DESTROY_SLEEP   100
4555 +
4556 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
4557 +static struct kmem_cache *branch_cachep;
4558 +static struct kmem_cache *leaf_cachep;
4559 +#else
4560 +static kmem_cache_t *branch_cachep;
4561 +static kmem_cache_t *leaf_cachep;
4562 +#endif
4563 +
4564 +#if defined(__LITTLE_ENDIAN)
4565 +#define ABCD(a,b,c,d,addrp) do {               \
4566 +       a = ((unsigned char *)addrp)[3];        \
4567 +       b = ((unsigned char *)addrp)[2];        \
4568 +       c = ((unsigned char *)addrp)[1];        \
4569 +       d = ((unsigned char *)addrp)[0];        \
4570 +} while (0)
4571 +#elif defined(__BIG_ENDIAN)
4572 +#define ABCD(a,b,c,d,addrp) do {               \
4573 +       a = ((unsigned char *)addrp)[0];        \
4574 +       b = ((unsigned char *)addrp)[1];        \
4575 +       c = ((unsigned char *)addrp)[2];        \
4576 +       d = ((unsigned char *)addrp)[3];        \
4577 +} while (0)
4578 +#else
4579 +#error "Please fix asm/byteorder.h"
4580 +#endif /* __LITTLE_ENDIAN */
4581 +
4582 +#define TESTIP_WALK(map, elem, branch) do {    \
4583 +       if ((map)->tree[elem]) {                \
4584 +               branch = (map)->tree[elem];     \
4585 +       } else                                  \
4586 +               return 0;                       \
4587 +} while (0)
4588 +
4589 +static inline int
4590 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4591 +{
4592 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4593 +       struct ip_set_iptreeb *btree;
4594 +       struct ip_set_iptreec *ctree;
4595 +       struct ip_set_iptreed *dtree;
4596 +       unsigned char a,b,c,d;
4597 +
4598 +       if (!ip)
4599 +               return -ERANGE;
4600 +
4601 +       *hash_ip = ip;
4602 +       ABCD(a, b, c, d, hash_ip);
4603 +       DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
4604 +       TESTIP_WALK(map, a, btree);
4605 +       TESTIP_WALK(btree, b, ctree);
4606 +       TESTIP_WALK(ctree, c, dtree);
4607 +       DP("%lu %lu", dtree->expires[d], jiffies);
4608 +       return dtree->expires[d]
4609 +              && (!map->timeout
4610 +                  || time_after(dtree->expires[d], jiffies));
4611 +}
4612 +
4613 +static int
4614 +testip(struct ip_set *set, const void *data, size_t size,
4615 +       ip_set_ip_t *hash_ip)
4616 +{
4617 +       struct ip_set_req_iptree *req =
4618 +           (struct ip_set_req_iptree *) data;
4619 +
4620 +       if (size != sizeof(struct ip_set_req_iptree)) {
4621 +               ip_set_printk("data length wrong (want %zu, have %zu)",
4622 +                             sizeof(struct ip_set_req_iptree),
4623 +                             size);
4624 +               return -EINVAL;
4625 +       }
4626 +       return __testip(set, req->ip, hash_ip);
4627 +}
4628 +
4629 +static int
4630 +testip_kernel(struct ip_set *set,
4631 +             const struct sk_buff *skb,
4632 +             ip_set_ip_t *hash_ip,
4633 +             const u_int32_t *flags,
4634 +             unsigned char index)
4635 +{
4636 +       int res;
4637 +
4638 +       DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4639 +          flags[index] & IPSET_SRC ? "SRC" : "DST",
4640 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4641 +          NIPQUAD(ip_hdr(skb)->saddr),
4642 +          NIPQUAD(ip_hdr(skb)->daddr));
4643 +#else
4644 +          NIPQUAD(skb->nh.iph->saddr),
4645 +          NIPQUAD(skb->nh.iph->daddr));
4646 +#endif
4647 +
4648 +       res =  __testip(set,
4649 +                       ntohl(flags[index] & IPSET_SRC
4650 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4651 +                               ? ip_hdr(skb)->saddr
4652 +                               : ip_hdr(skb)->daddr),
4653 +#else
4654 +                               ? skb->nh.iph->saddr
4655 +                               : skb->nh.iph->daddr),
4656 +#endif
4657 +                       hash_ip);
4658 +       return (res < 0 ? 0 : res);
4659 +}
4660 +
4661 +#define ADDIP_WALK(map, elem, branch, type, cachep) do {       \
4662 +       if ((map)->tree[elem]) {                                \
4663 +               DP("found %u", elem);                           \
4664 +               branch = (map)->tree[elem];                     \
4665 +       } else {                                                \
4666 +               branch = (type *)                               \
4667 +                       kmem_cache_alloc(cachep, GFP_ATOMIC);   \
4668 +               if (branch == NULL)                             \
4669 +                       return -ENOMEM;                         \
4670 +               memset(branch, 0, sizeof(*branch));             \
4671 +               (map)->tree[elem] = branch;                     \
4672 +               DP("alloc %u", elem);                           \
4673 +       }                                                       \
4674 +} while (0)
4675 +
4676 +static inline int
4677 +__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
4678 +       ip_set_ip_t *hash_ip)
4679 +{
4680 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4681 +       struct ip_set_iptreeb *btree;
4682 +       struct ip_set_iptreec *ctree;
4683 +       struct ip_set_iptreed *dtree;
4684 +       unsigned char a,b,c,d;
4685 +       int ret = 0;
4686 +
4687 +       if (!ip || map->elements >= limit)
4688 +               /* We could call the garbage collector
4689 +                * but it's probably overkill */
4690 +               return -ERANGE;
4691 +
4692 +       *hash_ip = ip;
4693 +       ABCD(a, b, c, d, hash_ip);
4694 +       DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
4695 +       ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
4696 +       ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
4697 +       ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
4698 +       if (dtree->expires[d]
4699 +           && (!map->timeout || time_after(dtree->expires[d], jiffies)))
4700 +               ret = -EEXIST;
4701 +       dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
4702 +       /* Lottery: I won! */
4703 +       if (dtree->expires[d] == 0)
4704 +               dtree->expires[d] = 1;
4705 +       DP("%u %lu", d, dtree->expires[d]);
4706 +       if (ret == 0)
4707 +               map->elements++;
4708 +       return ret;
4709 +}
4710 +
4711 +static int
4712 +addip(struct ip_set *set, const void *data, size_t size,
4713 +      ip_set_ip_t *hash_ip)
4714 +{
4715 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4716 +       struct ip_set_req_iptree *req =
4717 +               (struct ip_set_req_iptree *) data;
4718 +
4719 +       if (size != sizeof(struct ip_set_req_iptree)) {
4720 +               ip_set_printk("data length wrong (want %zu, have %zu)",
4721 +                             sizeof(struct ip_set_req_iptree),
4722 +                             size);
4723 +               return -EINVAL;
4724 +       }
4725 +       DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
4726 +       return __addip(set, req->ip,
4727 +                      req->timeout ? req->timeout : map->timeout,
4728 +                      hash_ip);
4729 +}
4730 +
4731 +static int
4732 +addip_kernel(struct ip_set *set,
4733 +            const struct sk_buff *skb,
4734 +            ip_set_ip_t *hash_ip,
4735 +            const u_int32_t *flags,
4736 +            unsigned char index)
4737 +{
4738 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4739 +
4740 +       return __addip(set,
4741 +                      ntohl(flags[index] & IPSET_SRC
4742 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4743 +                               ? ip_hdr(skb)->saddr
4744 +                               : ip_hdr(skb)->daddr),
4745 +#else
4746 +                               ? skb->nh.iph->saddr
4747 +                               : skb->nh.iph->daddr),
4748 +#endif
4749 +                      map->timeout,
4750 +                      hash_ip);
4751 +}
4752 +
4753 +#define DELIP_WALK(map, elem, branch) do {     \
4754 +       if ((map)->tree[elem]) {                \
4755 +               branch = (map)->tree[elem];     \
4756 +       } else                                  \
4757 +               return -EEXIST;                 \
4758 +} while (0)
4759 +
4760 +static inline int
4761 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4762 +{
4763 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4764 +       struct ip_set_iptreeb *btree;
4765 +       struct ip_set_iptreec *ctree;
4766 +       struct ip_set_iptreed *dtree;
4767 +       unsigned char a,b,c,d;
4768 +
4769 +       if (!ip)
4770 +               return -ERANGE;
4771 +
4772 +       *hash_ip = ip;
4773 +       ABCD(a, b, c, d, hash_ip);
4774 +       DELIP_WALK(map, a, btree);
4775 +       DELIP_WALK(btree, b, ctree);
4776 +       DELIP_WALK(ctree, c, dtree);
4777 +
4778 +       if (dtree->expires[d]) {
4779 +               dtree->expires[d] = 0;
4780 +               map->elements--;
4781 +               return 0;
4782 +       }
4783 +       return -EEXIST;
4784 +}
4785 +
4786 +static int
4787 +delip(struct ip_set *set, const void *data, size_t size,
4788 +      ip_set_ip_t *hash_ip)
4789 +{
4790 +       struct ip_set_req_iptree *req =
4791 +           (struct ip_set_req_iptree *) data;
4792 +
4793 +       if (size != sizeof(struct ip_set_req_iptree)) {
4794 +               ip_set_printk("data length wrong (want %zu, have %zu)",
4795 +                             sizeof(struct ip_set_req_iptree),
4796 +                             size);
4797 +               return -EINVAL;
4798 +       }
4799 +       return __delip(set, req->ip, hash_ip);
4800 +}
4801 +
4802 +static int
4803 +delip_kernel(struct ip_set *set,
4804 +            const struct sk_buff *skb,
4805 +            ip_set_ip_t *hash_ip,
4806 +            const u_int32_t *flags,
4807 +            unsigned char index)
4808 +{
4809 +       return __delip(set,
4810 +                      ntohl(flags[index] & IPSET_SRC
4811 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4812 +                               ? ip_hdr(skb)->saddr
4813 +                               : ip_hdr(skb)->daddr),
4814 +#else
4815 +                               ? skb->nh.iph->saddr
4816 +                               : skb->nh.iph->daddr),
4817 +#endif
4818 +                      hash_ip);
4819 +}
4820 +
4821 +#define LOOP_WALK_BEGIN(map, i, branch) \
4822 +       for (i = 0; i < 256; i++) {     \
4823 +               if (!(map)->tree[i])    \
4824 +                       continue;       \
4825 +               branch = (map)->tree[i]
4826 +
4827 +#define LOOP_WALK_END }
4828 +
4829 +static void ip_tree_gc(unsigned long ul_set)
4830 +{
4831 +       struct ip_set *set = (void *) ul_set;
4832 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4833 +       struct ip_set_iptreeb *btree;
4834 +       struct ip_set_iptreec *ctree;
4835 +       struct ip_set_iptreed *dtree;
4836 +       unsigned int a,b,c,d;
4837 +       unsigned char i,j,k;
4838 +
4839 +       i = j = k = 0;
4840 +       DP("gc: %s", set->name);
4841 +       write_lock_bh(&set->lock);
4842 +       LOOP_WALK_BEGIN(map, a, btree);
4843 +       LOOP_WALK_BEGIN(btree, b, ctree);
4844 +       LOOP_WALK_BEGIN(ctree, c, dtree);
4845 +       for (d = 0; d < 256; d++) {
4846 +               if (dtree->expires[d]) {
4847 +                       DP("gc: %u %u %u %u: expires %lu jiffies %lu",
4848 +                           a, b, c, d,
4849 +                           dtree->expires[d], jiffies);
4850 +                       if (map->timeout
4851 +                           && time_before(dtree->expires[d], jiffies)) {
4852 +                               dtree->expires[d] = 0;
4853 +                               map->elements--;
4854 +                       } else
4855 +                               k = 1;
4856 +               }
4857 +       }
4858 +       if (k == 0) {
4859 +               DP("gc: %s: leaf %u %u %u empty",
4860 +                   set->name, a, b, c);
4861 +               kmem_cache_free(leaf_cachep, dtree);
4862 +               ctree->tree[c] = NULL;
4863 +       } else {
4864 +               DP("gc: %s: leaf %u %u %u not empty",
4865 +                   set->name, a, b, c);
4866 +               j = 1;
4867 +               k = 0;
4868 +       }
4869 +       LOOP_WALK_END;
4870 +       if (j == 0) {
4871 +               DP("gc: %s: branch %u %u empty",
4872 +                   set->name, a, b);
4873 +               kmem_cache_free(branch_cachep, ctree);
4874 +               btree->tree[b] = NULL;
4875 +       } else {
4876 +               DP("gc: %s: branch %u %u not empty",
4877 +                   set->name, a, b);
4878 +               i = 1;
4879 +               j = k = 0;
4880 +       }
4881 +       LOOP_WALK_END;
4882 +       if (i == 0) {
4883 +               DP("gc: %s: branch %u empty",
4884 +                   set->name, a);
4885 +               kmem_cache_free(branch_cachep, btree);
4886 +               map->tree[a] = NULL;
4887 +       } else {
4888 +               DP("gc: %s: branch %u not empty",
4889 +                   set->name, a);
4890 +               i = j = k = 0;
4891 +       }
4892 +       LOOP_WALK_END;
4893 +       write_unlock_bh(&set->lock);
4894 +
4895 +       map->gc.expires = jiffies + map->gc_interval * HZ;
4896 +       add_timer(&map->gc);
4897 +}
4898 +
4899 +static inline void init_gc_timer(struct ip_set *set)
4900 +{
4901 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4902 +
4903 +       /* Even if there is no timeout for the entries,
4904 +        * we still have to call gc because delete
4905 +        * do not clean up empty branches */
4906 +       map->gc_interval = IPTREE_GC_TIME;
4907 +       init_timer(&map->gc);
4908 +       map->gc.data = (unsigned long) set;
4909 +       map->gc.function = ip_tree_gc;
4910 +       map->gc.expires = jiffies + map->gc_interval * HZ;
4911 +       add_timer(&map->gc);
4912 +}
4913 +
4914 +static int create(struct ip_set *set, const void *data, size_t size)
4915 +{
4916 +       struct ip_set_req_iptree_create *req =
4917 +           (struct ip_set_req_iptree_create *) data;
4918 +       struct ip_set_iptree *map;
4919 +
4920 +       if (size != sizeof(struct ip_set_req_iptree_create)) {
4921 +               ip_set_printk("data length wrong (want %zu, have %zu)",
4922 +                             sizeof(struct ip_set_req_iptree_create),
4923 +                             size);
4924 +               return -EINVAL;
4925 +       }
4926 +
4927 +       map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
4928 +       if (!map) {
4929 +               DP("out of memory for %d bytes",
4930 +                  sizeof(struct ip_set_iptree));
4931 +               return -ENOMEM;
4932 +       }
4933 +       memset(map, 0, sizeof(*map));
4934 +       map->timeout = req->timeout;
4935 +       map->elements = 0;
4936 +       set->data = map;
4937 +
4938 +       init_gc_timer(set);
4939 +
4940 +       return 0;
4941 +}
4942 +
4943 +static void __flush(struct ip_set_iptree *map)
4944 +{
4945 +       struct ip_set_iptreeb *btree;
4946 +       struct ip_set_iptreec *ctree;
4947 +       struct ip_set_iptreed *dtree;
4948 +       unsigned int a,b,c;
4949 +
4950 +       LOOP_WALK_BEGIN(map, a, btree);
4951 +       LOOP_WALK_BEGIN(btree, b, ctree);
4952 +       LOOP_WALK_BEGIN(ctree, c, dtree);
4953 +       kmem_cache_free(leaf_cachep, dtree);
4954 +       LOOP_WALK_END;
4955 +       kmem_cache_free(branch_cachep, ctree);
4956 +       LOOP_WALK_END;
4957 +       kmem_cache_free(branch_cachep, btree);
4958 +       LOOP_WALK_END;
4959 +       map->elements = 0;
4960 +}
4961 +
4962 +static void destroy(struct ip_set *set)
4963 +{
4964 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4965 +
4966 +       /* gc might be running */
4967 +       while (!del_timer(&map->gc))
4968 +               msleep(IPTREE_DESTROY_SLEEP);
4969 +       __flush(map);
4970 +       kfree(map);
4971 +       set->data = NULL;
4972 +}
4973 +
4974 +static void flush(struct ip_set *set)
4975 +{
4976 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4977 +       unsigned int timeout = map->timeout;
4978 +
4979 +       /* gc might be running */
4980 +       while (!del_timer(&map->gc))
4981 +               msleep(IPTREE_DESTROY_SLEEP);
4982 +       __flush(map);
4983 +       memset(map, 0, sizeof(*map));
4984 +       map->timeout = timeout;
4985 +
4986 +       init_gc_timer(set);
4987 +}
4988 +
4989 +static void list_header(const struct ip_set *set, void *data)
4990 +{
4991 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4992 +       struct ip_set_req_iptree_create *header =
4993 +           (struct ip_set_req_iptree_create *) data;
4994 +
4995 +       header->timeout = map->timeout;
4996 +}
4997 +
4998 +static int list_members_size(const struct ip_set *set)
4999 +{
5000 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5001 +       struct ip_set_iptreeb *btree;
5002 +       struct ip_set_iptreec *ctree;
5003 +       struct ip_set_iptreed *dtree;
5004 +       unsigned int a,b,c,d;
5005 +       unsigned int count = 0;
5006 +
5007 +       LOOP_WALK_BEGIN(map, a, btree);
5008 +       LOOP_WALK_BEGIN(btree, b, ctree);
5009 +       LOOP_WALK_BEGIN(ctree, c, dtree);
5010 +       for (d = 0; d < 256; d++) {
5011 +               if (dtree->expires[d]
5012 +                   && (!map->timeout || time_after(dtree->expires[d], jiffies)))
5013 +                       count++;
5014 +       }
5015 +       LOOP_WALK_END;
5016 +       LOOP_WALK_END;
5017 +       LOOP_WALK_END;
5018 +
5019 +       DP("members %u", count);
5020 +       return (count * sizeof(struct ip_set_req_iptree));
5021 +}
5022 +
5023 +static void list_members(const struct ip_set *set, void *data)
5024 +{
5025 +       struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5026 +       struct ip_set_iptreeb *btree;
5027 +       struct ip_set_iptreec *ctree;
5028 +       struct ip_set_iptreed *dtree;
5029 +       unsigned int a,b,c,d;
5030 +       size_t offset = 0;
5031 +       struct ip_set_req_iptree *entry;
5032 +
5033 +       LOOP_WALK_BEGIN(map, a, btree);
5034 +       LOOP_WALK_BEGIN(btree, b, ctree);
5035 +       LOOP_WALK_BEGIN(ctree, c, dtree);
5036 +       for (d = 0; d < 256; d++) {
5037 +               if (dtree->expires[d]
5038 +                   && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
5039 +                       entry = (struct ip_set_req_iptree *)(data + offset);
5040 +                       entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
5041 +                       entry->timeout = !map->timeout ? 0
5042 +                               : (dtree->expires[d] - jiffies)/HZ;
5043 +                       offset += sizeof(struct ip_set_req_iptree);
5044 +               }
5045 +       }
5046 +       LOOP_WALK_END;
5047 +       LOOP_WALK_END;
5048 +       LOOP_WALK_END;
5049 +}
5050 +
5051 +static struct ip_set_type ip_set_iptree = {
5052 +       .typename               = SETTYPE_NAME,
5053 +       .features               = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5054 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,
5055 +       .create                 = &create,
5056 +       .destroy                = &destroy,
5057 +       .flush                  = &flush,
5058 +       .reqsize                = sizeof(struct ip_set_req_iptree),
5059 +       .addip                  = &addip,
5060 +       .addip_kernel           = &addip_kernel,
5061 +       .delip                  = &delip,
5062 +       .delip_kernel           = &delip_kernel,
5063 +       .testip                 = &testip,
5064 +       .testip_kernel          = &testip_kernel,
5065 +       .header_size            = sizeof(struct ip_set_req_iptree_create),
5066 +       .list_header            = &list_header,
5067 +       .list_members_size      = &list_members_size,
5068 +       .list_members           = &list_members,
5069 +       .me                     = THIS_MODULE,
5070 +};
5071 +
5072 +MODULE_LICENSE("GPL");
5073 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5074 +MODULE_DESCRIPTION("iptree type of IP sets");
5075 +module_param(limit, int, 0600);
5076 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5077 +
5078 +static int __init ip_set_iptree_init(void)
5079 +{
5080 +       int ret;
5081 +
5082 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5083 +       branch_cachep = kmem_cache_create("ip_set_iptreeb",
5084 +                               sizeof(struct ip_set_iptreeb),
5085 +                               0, 0, NULL);
5086 +#else
5087 +       branch_cachep = kmem_cache_create("ip_set_iptreeb",
5088 +                               sizeof(struct ip_set_iptreeb),
5089 +                               0, 0, NULL, NULL);
5090 +#endif
5091 +       if (!branch_cachep) {
5092 +               printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
5093 +               ret = -ENOMEM;
5094 +               goto out;
5095 +       }
5096 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5097 +       leaf_cachep = kmem_cache_create("ip_set_iptreed",
5098 +                               sizeof(struct ip_set_iptreed),
5099 +                               0, 0, NULL);
5100 +#else
5101 +       leaf_cachep = kmem_cache_create("ip_set_iptreed",
5102 +                               sizeof(struct ip_set_iptreed),
5103 +                               0, 0, NULL, NULL);
5104 +#endif
5105 +       if (!leaf_cachep) {
5106 +               printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
5107 +               ret = -ENOMEM;
5108 +               goto free_branch;
5109 +       }
5110 +       ret = ip_set_register_set_type(&ip_set_iptree);
5111 +       if (ret == 0)
5112 +               goto out;
5113 +
5114 +       kmem_cache_destroy(leaf_cachep);
5115 +    free_branch:
5116 +       kmem_cache_destroy(branch_cachep);
5117 +    out:
5118 +       return ret;
5119 +}
5120 +
5121 +static void __exit ip_set_iptree_fini(void)
5122 +{
5123 +       /* FIXME: possible race with ip_set_create() */
5124 +       ip_set_unregister_set_type(&ip_set_iptree);
5125 +       kmem_cache_destroy(leaf_cachep);
5126 +       kmem_cache_destroy(branch_cachep);
5127 +}
5128 +
5129 +module_init(ip_set_iptree_init);
5130 +module_exit(ip_set_iptree_fini);
5131 --- /dev/null
5132 +++ b/net/ipv4/netfilter/ip_set_iptreemap.c
5133 @@ -0,0 +1,829 @@
5134 +/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
5135 + *
5136 + * This program is free software; you can redistribute it and/or modify it
5137 + * under the terms of the GNU General Public License version 2 as published by
5138 + * the Free Software Foundation.
5139 + */
5140 +
5141 +/* This modules implements the iptreemap ipset type. It uses bitmaps to
5142 + * represent every single IPv4 address as a single bit. The bitmaps are managed
5143 + * in a tree structure, where the first three octets of an addresses are used
5144 + * as an index to find the bitmap and the last octet is used as the bit number.
5145 + */
5146 +
5147 +#include <linux/version.h>
5148 +#include <linux/module.h>
5149 +#include <linux/ip.h>
5150 +#include <linux/skbuff.h>
5151 +#include <linux/slab.h>
5152 +#include <linux/delay.h>
5153 +#include <linux/netfilter_ipv4/ip_tables.h>
5154 +#include <linux/netfilter_ipv4/ip_set.h>
5155 +#include <linux/errno.h>
5156 +#include <asm/uaccess.h>
5157 +#include <asm/bitops.h>
5158 +#include <linux/spinlock.h>
5159 +
5160 +#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
5161 +
5162 +#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
5163 +#define IPTREEMAP_DESTROY_SLEEP (100)
5164 +
5165 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
5166 +static struct kmem_cache *cachep_b;
5167 +static struct kmem_cache *cachep_c;
5168 +static struct kmem_cache *cachep_d;
5169 +#else
5170 +static kmem_cache_t *cachep_b;
5171 +static kmem_cache_t *cachep_c;
5172 +static kmem_cache_t *cachep_d;
5173 +#endif
5174 +
5175 +static struct ip_set_iptreemap_d *fullbitmap_d;
5176 +static struct ip_set_iptreemap_c *fullbitmap_c;
5177 +static struct ip_set_iptreemap_b *fullbitmap_b;
5178 +
5179 +#if defined(__LITTLE_ENDIAN)
5180 +#define ABCD(a, b, c, d, addr) \
5181 +       do { \
5182 +               a = ((unsigned char *)addr)[3]; \
5183 +               b = ((unsigned char *)addr)[2]; \
5184 +               c = ((unsigned char *)addr)[1]; \
5185 +               d = ((unsigned char *)addr)[0]; \
5186 +       } while (0)
5187 +#elif defined(__BIG_ENDIAN)
5188 +#define ABCD(a,b,c,d,addrp) do {               \
5189 +       a = ((unsigned char *)addrp)[0];        \
5190 +       b = ((unsigned char *)addrp)[1];        \
5191 +       c = ((unsigned char *)addrp)[2];        \
5192 +       d = ((unsigned char *)addrp)[3];        \
5193 +} while (0)
5194 +#else
5195 +#error "Please fix asm/byteorder.h"
5196 +#endif /* __LITTLE_ENDIAN */
5197 +
5198 +#define TESTIP_WALK(map, elem, branch, full) \
5199 +       do { \
5200 +               branch = (map)->tree[elem]; \
5201 +               if (!branch) \
5202 +                       return 0; \
5203 +               else if (branch == full) \
5204 +                       return 1; \
5205 +       } while (0)
5206 +
5207 +#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
5208 +       do { \
5209 +               branch = (map)->tree[elem]; \
5210 +               if (!branch) { \
5211 +                       branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
5212 +                       if (!branch) \
5213 +                               return -ENOMEM; \
5214 +                       memset(branch, 0, sizeof(*branch)); \
5215 +                       (map)->tree[elem] = branch; \
5216 +               } else if (branch == full) { \
5217 +                       return -EEXIST; \
5218 +               } \
5219 +       } while (0)
5220 +
5221 +#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
5222 +       for (a = a1; a <= a2; a++) { \
5223 +               branch = (map)->tree[a]; \
5224 +               if (branch != full) { \
5225 +                       if ((a > a1 && a < a2) || (hint)) { \
5226 +                               if (branch) \
5227 +                                       free(branch); \
5228 +                               (map)->tree[a] = full; \
5229 +                               continue; \
5230 +                       } else if (!branch) { \
5231 +                               branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
5232 +                               if (!branch) \
5233 +                                       return -ENOMEM; \
5234 +                               memset(branch, 0, sizeof(*branch)); \
5235 +                               (map)->tree[a] = branch; \
5236 +                       }
5237 +
5238 +#define ADDIP_RANGE_LOOP_END() \
5239 +               } \
5240 +       }
5241 +
5242 +#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
5243 +       do { \
5244 +               branch = (map)->tree[elem]; \
5245 +               if (!branch) { \
5246 +                       return -EEXIST; \
5247 +               } else if (branch == full) { \
5248 +                       branch = kmem_cache_alloc(cachep, flags); \
5249 +                       if (!branch) \
5250 +                               return -ENOMEM; \
5251 +                       memcpy(branch, full, sizeof(*full)); \
5252 +                       (map)->tree[elem] = branch; \
5253 +               } \
5254 +       } while (0)
5255 +
5256 +#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
5257 +       for (a = a1; a <= a2; a++) { \
5258 +               branch = (map)->tree[a]; \
5259 +               if (branch) { \
5260 +                       if ((a > a1 && a < a2) || (hint)) { \
5261 +                               if (branch != full) \
5262 +                                       free(branch); \
5263 +                               (map)->tree[a] = NULL; \
5264 +                               continue; \
5265 +                       } else if (branch == full) { \
5266 +                               branch = kmem_cache_alloc(cachep, flags); \
5267 +                               if (!branch) \
5268 +                                       return -ENOMEM; \
5269 +                               memcpy(branch, full, sizeof(*branch)); \
5270 +                               (map)->tree[a] = branch; \
5271 +                       }
5272 +
5273 +#define DELIP_RANGE_LOOP_END() \
5274 +               } \
5275 +       }
5276 +
5277 +#define LOOP_WALK_BEGIN(map, i, branch) \
5278 +       for (i = 0; i < 256; i++) { \
5279 +               branch = (map)->tree[i]; \
5280 +               if (likely(!branch)) \
5281 +                       continue;
5282 +
5283 +#define LOOP_WALK_END() \
5284 +       }
5285 +
5286 +#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
5287 +       count = -256; \
5288 +       for (i = 0; i < 256; i++) { \
5289 +               branch = (map)->tree[i]; \
5290 +               if (likely(!branch)) \
5291 +                       continue; \
5292 +               count++; \
5293 +               if (branch == full) { \
5294 +                       count++; \
5295 +                       continue; \
5296 +               }
5297 +
5298 +#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
5299 +               if (-256 == count) { \
5300 +                       kmem_cache_free(cachep, branch); \
5301 +                       (map)->tree[i] = NULL; \
5302 +               } else if (256 == count) { \
5303 +                       kmem_cache_free(cachep, branch); \
5304 +                       (map)->tree[i] = full; \
5305 +               } \
5306 +       }
5307 +
5308 +#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
5309 +       for (i = 0; i < 256; i++) { \
5310 +               if (!(map)->tree[i]) { \
5311 +                       if (inrange) { \
5312 +                               count++; \
5313 +                               inrange = 0; \
5314 +                       } \
5315 +                       continue; \
5316 +               } \
5317 +               branch = (map)->tree[i];
5318 +
5319 +#define LOOP_WALK_END_COUNT() \
5320 +       }
5321 +
5322 +#define MIN(a, b) (a < b ? a : b)
5323 +#define MAX(a, b) (a > b ? a : b)
5324 +
5325 +#define GETVALUE1(a, a1, b1, r) \
5326 +       (a == a1 ? b1 : r)
5327 +
5328 +#define GETVALUE2(a, b, a1, b1, c1, r) \
5329 +       (a == a1 && b == b1 ? c1 : r)
5330 +
5331 +#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
5332 +       (a == a1 && b == b1 && c == c1 ? d1 : r)
5333 +
5334 +#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
5335 +       ( \
5336 +               GETVALUE1(a, a1, b1, 0) == 0 \
5337 +               && GETVALUE1(a, a2, b2, 255) == 255 \
5338 +               && c1 == 0 \
5339 +               && c2 == 255 \
5340 +               && d1 == 0 \
5341 +               && d2 == 255 \
5342 +       )
5343 +
5344 +#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
5345 +       ( \
5346 +               GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
5347 +               && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
5348 +               && d1 == 0 \
5349 +               && d2 == 255 \
5350 +       )
5351 +
5352 +#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
5353 +       ( \
5354 +               GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
5355 +               && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
5356 +       )
5357 +
5358 +
5359 +static inline void
5360 +free_d(struct ip_set_iptreemap_d *map)
5361 +{
5362 +       kmem_cache_free(cachep_d, map);
5363 +}
5364 +
5365 +static inline void
5366 +free_c(struct ip_set_iptreemap_c *map)
5367 +{
5368 +       struct ip_set_iptreemap_d *dtree;
5369 +       unsigned int i;
5370 +
5371 +       LOOP_WALK_BEGIN(map, i, dtree) {
5372 +               if (dtree != fullbitmap_d)
5373 +                       free_d(dtree);
5374 +       } LOOP_WALK_END();
5375 +
5376 +       kmem_cache_free(cachep_c, map);
5377 +}
5378 +
5379 +static inline void
5380 +free_b(struct ip_set_iptreemap_b *map)
5381 +{
5382 +       struct ip_set_iptreemap_c *ctree;
5383 +       unsigned int i;
5384 +
5385 +       LOOP_WALK_BEGIN(map, i, ctree) {
5386 +               if (ctree != fullbitmap_c)
5387 +                       free_c(ctree);
5388 +       } LOOP_WALK_END();
5389 +
5390 +       kmem_cache_free(cachep_b, map);
5391 +}
5392 +
5393 +static inline int
5394 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5395 +{
5396 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5397 +       struct ip_set_iptreemap_b *btree;
5398 +       struct ip_set_iptreemap_c *ctree;
5399 +       struct ip_set_iptreemap_d *dtree;
5400 +       unsigned char a, b, c, d;
5401 +
5402 +       *hash_ip = ip;
5403 +
5404 +       ABCD(a, b, c, d, hash_ip);
5405 +
5406 +       TESTIP_WALK(map, a, btree, fullbitmap_b);
5407 +       TESTIP_WALK(btree, b, ctree, fullbitmap_c);
5408 +       TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
5409 +
5410 +       return !!test_bit(d, (void *) dtree->bitmap);
5411 +}
5412 +
5413 +static int
5414 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5415 +{
5416 +       struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5417 +
5418 +       if (size != sizeof(struct ip_set_req_iptreemap)) {
5419 +               ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5420 +               return -EINVAL;
5421 +       }
5422 +
5423 +       return __testip(set, req->start, hash_ip);
5424 +}
5425 +
5426 +static int
5427 +testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5428 +{
5429 +       int res;
5430 +
5431 +       res = __testip(set,
5432 +                      ntohl(flags[index] & IPSET_SRC
5433 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5434 +                               ? ip_hdr(skb)->saddr
5435 +                               : ip_hdr(skb)->daddr),
5436 +#else
5437 +                               ? skb->nh.iph->saddr
5438 +                               : skb->nh.iph->daddr),
5439 +#endif
5440 +                      hash_ip);
5441 +
5442 +       return (res < 0 ? 0 : res);
5443 +}
5444 +
5445 +static inline int
5446 +__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5447 +{
5448 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5449 +       struct ip_set_iptreemap_b *btree;
5450 +       struct ip_set_iptreemap_c *ctree;
5451 +       struct ip_set_iptreemap_d *dtree;
5452 +       unsigned char a, b, c, d;
5453 +
5454 +       *hash_ip = ip;
5455 +
5456 +       ABCD(a, b, c, d, hash_ip);
5457 +
5458 +       ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
5459 +       ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
5460 +       ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
5461 +
5462 +       if (test_and_set_bit(d, (void *) dtree->bitmap))
5463 +               return -EEXIST;
5464 +
5465 +       set_bit(b, (void *) btree->dirty);
5466 +
5467 +       return 0;
5468 +}
5469 +
5470 +static inline int
5471 +__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
5472 +{
5473 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5474 +       struct ip_set_iptreemap_b *btree;
5475 +       struct ip_set_iptreemap_c *ctree;
5476 +       struct ip_set_iptreemap_d *dtree;
5477 +       unsigned int a, b, c, d;
5478 +       unsigned char a1, b1, c1, d1;
5479 +       unsigned char a2, b2, c2, d2;
5480 +
5481 +       if (start == end)
5482 +               return __addip_single(set, start, hash_ip);
5483 +
5484 +       *hash_ip = start;
5485 +
5486 +       ABCD(a1, b1, c1, d1, &start);
5487 +       ABCD(a2, b2, c2, d2, &end);
5488 +
5489 +       /* This is sooo ugly... */
5490 +       ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
5491 +               ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
5492 +                       ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
5493 +                               for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5494 +                                       set_bit(d, (void *) dtree->bitmap);
5495 +                               set_bit(b, (void *) btree->dirty);
5496 +                       } ADDIP_RANGE_LOOP_END();
5497 +               } ADDIP_RANGE_LOOP_END();
5498 +       } ADDIP_RANGE_LOOP_END();
5499 +
5500 +       return 0;
5501 +}
5502 +
5503 +static int
5504 +addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5505 +{
5506 +       struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5507 +
5508 +       if (size != sizeof(struct ip_set_req_iptreemap)) {
5509 +               ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5510 +               return -EINVAL;
5511 +       }
5512 +
5513 +       return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
5514 +}
5515 +
5516 +static int
5517 +addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5518 +{
5519 +
5520 +       return __addip_single(set,
5521 +                       ntohl(flags[index] & IPSET_SRC
5522 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5523 +                               ? ip_hdr(skb)->saddr
5524 +                               : ip_hdr(skb)->daddr),
5525 +#else
5526 +                               ? skb->nh.iph->saddr
5527 +                               : skb->nh.iph->daddr),
5528 +#endif
5529 +                       hash_ip);
5530 +}
5531 +
5532 +static inline int
5533 +__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
5534 +{
5535 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5536 +       struct ip_set_iptreemap_b *btree;
5537 +       struct ip_set_iptreemap_c *ctree;
5538 +       struct ip_set_iptreemap_d *dtree;
5539 +       unsigned char a,b,c,d;
5540 +
5541 +       *hash_ip = ip;
5542 +
5543 +       ABCD(a, b, c, d, hash_ip);
5544 +
5545 +       DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
5546 +       DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
5547 +       DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
5548 +
5549 +       if (!test_and_clear_bit(d, (void *) dtree->bitmap))
5550 +               return -EEXIST;
5551 +
5552 +       set_bit(b, (void *) btree->dirty);
5553 +
5554 +       return 0;
5555 +}
5556 +
5557 +static inline int
5558 +__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
5559 +{
5560 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5561 +       struct ip_set_iptreemap_b *btree;
5562 +       struct ip_set_iptreemap_c *ctree;
5563 +       struct ip_set_iptreemap_d *dtree;
5564 +       unsigned int a, b, c, d;
5565 +       unsigned char a1, b1, c1, d1;
5566 +       unsigned char a2, b2, c2, d2;
5567 +
5568 +       if (start == end)
5569 +               return __delip_single(set, start, hash_ip, flags);
5570 +
5571 +       *hash_ip = start;
5572 +
5573 +       ABCD(a1, b1, c1, d1, &start);
5574 +       ABCD(a2, b2, c2, d2, &end);
5575 +
5576 +       /* This is sooo ugly... */
5577 +       DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
5578 +               DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
5579 +                       DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
5580 +                               for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5581 +                                       clear_bit(d, (void *) dtree->bitmap);
5582 +                               set_bit(b, (void *) btree->dirty);
5583 +                       } DELIP_RANGE_LOOP_END();
5584 +               } DELIP_RANGE_LOOP_END();
5585 +       } DELIP_RANGE_LOOP_END();
5586 +
5587 +       return 0;
5588 +}
5589 +
5590 +static int
5591 +delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5592 +{
5593 +       struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5594 +
5595 +       if (size != sizeof(struct ip_set_req_iptreemap)) {
5596 +               ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5597 +               return -EINVAL;
5598 +       }
5599 +
5600 +       return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
5601 +}
5602 +
5603 +static int
5604 +delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5605 +{
5606 +       return __delip_single(set,
5607 +                       ntohl(flags[index] & IPSET_SRC
5608 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5609 +                               ? ip_hdr(skb)->saddr
5610 +                               : ip_hdr(skb)->daddr),
5611 +#else
5612 +                               ? skb->nh.iph->saddr
5613 +                               : skb->nh.iph->daddr),
5614 +#endif
5615 +                       hash_ip,
5616 +                       GFP_ATOMIC);
5617 +}
5618 +
5619 +/* Check the status of the bitmap
5620 + * -1 == all bits cleared
5621 + *  1 == all bits set
5622 + *  0 == anything else
5623 + */
5624 +static inline int
5625 +bitmap_status(struct ip_set_iptreemap_d *dtree)
5626 +{
5627 +       unsigned char first = dtree->bitmap[0];
5628 +       int a;
5629 +
5630 +       for (a = 1; a < 32; a++)
5631 +               if (dtree->bitmap[a] != first)
5632 +                       return 0;
5633 +
5634 +       return (first == 0 ? -1 : (first == 255 ? 1 : 0));
5635 +}
5636 +
5637 +static void
5638 +gc(unsigned long addr)
5639 +{
5640 +       struct ip_set *set = (struct ip_set *) addr;
5641 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5642 +       struct ip_set_iptreemap_b *btree;
5643 +       struct ip_set_iptreemap_c *ctree;
5644 +       struct ip_set_iptreemap_d *dtree;
5645 +       unsigned int a, b, c;
5646 +       int i, j, k;
5647 +
5648 +       write_lock_bh(&set->lock);
5649 +
5650 +       LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
5651 +               LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
5652 +                       if (!test_and_clear_bit(b, (void *) btree->dirty))
5653 +                               continue;
5654 +                       LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
5655 +                               switch (bitmap_status(dtree)) {
5656 +                                       case -1:
5657 +                                               kmem_cache_free(cachep_d, dtree);
5658 +                                               ctree->tree[c] = NULL;
5659 +                                               k--;
5660 +                                       break;
5661 +                                       case 1:
5662 +                                               kmem_cache_free(cachep_d, dtree);
5663 +                                               ctree->tree[c] = fullbitmap_d;
5664 +                                               k++;
5665 +                                       break;
5666 +                               }
5667 +                       } LOOP_WALK_END();
5668 +               } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
5669 +       } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
5670 +
5671 +       write_unlock_bh(&set->lock);
5672 +
5673 +       map->gc.expires = jiffies + map->gc_interval * HZ;
5674 +       add_timer(&map->gc);
5675 +}
5676 +
5677 +static inline void
5678 +init_gc_timer(struct ip_set *set)
5679 +{
5680 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5681 +
5682 +       init_timer(&map->gc);
5683 +       map->gc.data = (unsigned long) set;
5684 +       map->gc.function = gc;
5685 +       map->gc.expires = jiffies + map->gc_interval * HZ;
5686 +       add_timer(&map->gc);
5687 +}
5688 +
5689 +static int create(struct ip_set *set, const void *data, size_t size)
5690 +{
5691 +       struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
5692 +       struct ip_set_iptreemap *map;
5693 +
5694 +       if (size != sizeof(struct ip_set_req_iptreemap_create)) {
5695 +               ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
5696 +               return -EINVAL;
5697 +       }
5698 +
5699 +       map = kzalloc(sizeof(*map), GFP_KERNEL);
5700 +       if (!map)
5701 +               return -ENOMEM;
5702 +
5703 +       map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
5704 +       set->data = map;
5705 +
5706 +       init_gc_timer(set);
5707 +
5708 +       return 0;
5709 +}
5710 +
5711 +static inline void __flush(struct ip_set_iptreemap *map)
5712 +{
5713 +       struct ip_set_iptreemap_b *btree;
5714 +       unsigned int a;
5715 +
5716 +       LOOP_WALK_BEGIN(map, a, btree);
5717 +               if (btree != fullbitmap_b)
5718 +                       free_b(btree);
5719 +       LOOP_WALK_END();
5720 +}
5721 +
5722 +static void destroy(struct ip_set *set)
5723 +{
5724 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5725 +
5726 +       while (!del_timer(&map->gc))
5727 +               msleep(IPTREEMAP_DESTROY_SLEEP);
5728 +
5729 +       __flush(map);
5730 +       kfree(map);
5731 +
5732 +       set->data = NULL;
5733 +}
5734 +
5735 +static void flush(struct ip_set *set)
5736 +{
5737 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5738 +
5739 +       while (!del_timer(&map->gc))
5740 +               msleep(IPTREEMAP_DESTROY_SLEEP);
5741 +
5742 +       __flush(map);
5743 +
5744 +       memset(map, 0, sizeof(*map));
5745 +
5746 +       init_gc_timer(set);
5747 +}
5748 +
5749 +static void list_header(const struct ip_set *set, void *data)
5750 +{
5751 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5752 +       struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
5753 +
5754 +       header->gc_interval = map->gc_interval;
5755 +}
5756 +
5757 +static int list_members_size(const struct ip_set *set)
5758 +{
5759 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5760 +       struct ip_set_iptreemap_b *btree;
5761 +       struct ip_set_iptreemap_c *ctree;
5762 +       struct ip_set_iptreemap_d *dtree;
5763 +       unsigned int a, b, c, d, inrange = 0, count = 0;
5764 +
5765 +       LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
5766 +               LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
5767 +                       LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
5768 +                               for (d = 0; d < 256; d++) {
5769 +                                       if (test_bit(d, (void *) dtree->bitmap)) {
5770 +                                               inrange = 1;
5771 +                                       } else if (inrange) {
5772 +                                               count++;
5773 +                                               inrange = 0;
5774 +                                       }
5775 +                               }
5776 +                       } LOOP_WALK_END_COUNT();
5777 +               } LOOP_WALK_END_COUNT();
5778 +       } LOOP_WALK_END_COUNT();
5779 +
5780 +       if (inrange)
5781 +               count++;
5782 +
5783 +       return (count * sizeof(struct ip_set_req_iptreemap));
5784 +}
5785 +
5786 +static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
5787 +{
5788 +       struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
5789 +
5790 +       entry->start = start;
5791 +       entry->end = end;
5792 +
5793 +       return sizeof(*entry);
5794 +}
5795 +
5796 +static void list_members(const struct ip_set *set, void *data)
5797 +{
5798 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5799 +       struct ip_set_iptreemap_b *btree;
5800 +       struct ip_set_iptreemap_c *ctree;
5801 +       struct ip_set_iptreemap_d *dtree;
5802 +       unsigned int a, b, c, d, inrange = 0;
5803 +       size_t offset = 0;
5804 +       ip_set_ip_t start = 0, end = 0, ip;
5805 +
5806 +       LOOP_WALK_BEGIN(map, a, btree) {
5807 +               LOOP_WALK_BEGIN(btree, b, ctree) {
5808 +                       LOOP_WALK_BEGIN(ctree, c, dtree) {
5809 +                               for (d = 0; d < 256; d++) {
5810 +                                       if (test_bit(d, (void *) dtree->bitmap)) {
5811 +                                               ip = ((a << 24) | (b << 16) | (c << 8) | d);
5812 +                                               if (!inrange) {
5813 +                                                       inrange = 1;
5814 +                                                       start = ip;
5815 +                                               } else if (end < ip - 1) {
5816 +                                                       offset += add_member(data, offset, start, end);
5817 +                                                       start = ip;
5818 +                                               }
5819 +                                               end = ip;
5820 +                                       } else if (inrange) {
5821 +                                               offset += add_member(data, offset, start, end);
5822 +                                               inrange = 0;
5823 +                                       }
5824 +                               }
5825 +                       } LOOP_WALK_END();
5826 +               } LOOP_WALK_END();
5827 +       } LOOP_WALK_END();
5828 +
5829 +       if (inrange)
5830 +               add_member(data, offset, start, end);
5831 +}
5832 +
5833 +static struct ip_set_type ip_set_iptreemap = {
5834 +       .typename               = SETTYPE_NAME,
5835 +       .features               = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5836 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,
5837 +       .create                 = create,
5838 +       .destroy                = destroy,
5839 +       .flush                  = flush,
5840 +       .reqsize                = sizeof(struct ip_set_req_iptreemap),
5841 +       .addip                  = addip,
5842 +       .addip_kernel           = addip_kernel,
5843 +       .delip                  = delip,
5844 +       .delip_kernel           = delip_kernel,
5845 +       .testip                 = testip,
5846 +       .testip_kernel          = testip_kernel,
5847 +       .header_size            = sizeof(struct ip_set_req_iptreemap_create),
5848 +       .list_header            = list_header,
5849 +       .list_members_size      = list_members_size,
5850 +       .list_members           = list_members,
5851 +       .me                     = THIS_MODULE,
5852 +};
5853 +
5854 +MODULE_LICENSE("GPL");
5855 +MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
5856 +MODULE_DESCRIPTION("iptreemap type of IP sets");
5857 +
5858 +static int __init ip_set_iptreemap_init(void)
5859 +{
5860 +       int ret = -ENOMEM;
5861 +       int a;
5862 +
5863 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5864 +       cachep_b = kmem_cache_create("ip_set_iptreemap_b",
5865 +                                    sizeof(struct ip_set_iptreemap_b),
5866 +                                    0, 0, NULL);
5867 +#else
5868 +       cachep_b = kmem_cache_create("ip_set_iptreemap_b",
5869 +                                    sizeof(struct ip_set_iptreemap_b),
5870 +                                    0, 0, NULL, NULL);
5871 +#endif
5872 +       if (!cachep_b) {
5873 +               ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
5874 +               goto out;
5875 +       }
5876 +
5877 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5878 +       cachep_c = kmem_cache_create("ip_set_iptreemap_c",
5879 +                                    sizeof(struct ip_set_iptreemap_c),
5880 +                                    0, 0, NULL);
5881 +#else
5882 +       cachep_c = kmem_cache_create("ip_set_iptreemap_c",
5883 +                                    sizeof(struct ip_set_iptreemap_c),
5884 +                                    0, 0, NULL, NULL);
5885 +#endif
5886 +       if (!cachep_c) {
5887 +               ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
5888 +               goto outb;
5889 +       }
5890 +
5891 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5892 +       cachep_d = kmem_cache_create("ip_set_iptreemap_d",
5893 +                                    sizeof(struct ip_set_iptreemap_d),
5894 +                                    0, 0, NULL);
5895 +#else
5896 +       cachep_d = kmem_cache_create("ip_set_iptreemap_d",
5897 +                                    sizeof(struct ip_set_iptreemap_d),
5898 +                                    0, 0, NULL, NULL);
5899 +#endif
5900 +       if (!cachep_d) {
5901 +               ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
5902 +               goto outc;
5903 +       }
5904 +
5905 +       fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
5906 +       if (!fullbitmap_d)
5907 +               goto outd;
5908 +
5909 +       fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
5910 +       if (!fullbitmap_c)
5911 +               goto outbitmapd;
5912 +
5913 +       fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
5914 +       if (!fullbitmap_b)
5915 +               goto outbitmapc;
5916 +
5917 +       ret = ip_set_register_set_type(&ip_set_iptreemap);
5918 +       if (0 > ret)
5919 +               goto outbitmapb;
5920 +
5921 +       /* Now init our global bitmaps */
5922 +       memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
5923 +
5924 +       for (a = 0; a < 256; a++)
5925 +               fullbitmap_c->tree[a] = fullbitmap_d;
5926 +
5927 +       for (a = 0; a < 256; a++)
5928 +               fullbitmap_b->tree[a] = fullbitmap_c;
5929 +       memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
5930 +
5931 +       return 0;
5932 +
5933 +outbitmapb:
5934 +       kmem_cache_free(cachep_b, fullbitmap_b);
5935 +outbitmapc:
5936 +       kmem_cache_free(cachep_c, fullbitmap_c);
5937 +outbitmapd:
5938 +       kmem_cache_free(cachep_d, fullbitmap_d);
5939 +outd:
5940 +       kmem_cache_destroy(cachep_d);
5941 +outc:
5942 +       kmem_cache_destroy(cachep_c);
5943 +outb:
5944 +       kmem_cache_destroy(cachep_b);
5945 +out:
5946 +
5947 +       return ret;
5948 +}
5949 +
5950 +static void __exit ip_set_iptreemap_fini(void)
5951 +{
5952 +       ip_set_unregister_set_type(&ip_set_iptreemap);
5953 +       kmem_cache_free(cachep_d, fullbitmap_d);
5954 +       kmem_cache_free(cachep_c, fullbitmap_c);
5955 +       kmem_cache_free(cachep_b, fullbitmap_b);
5956 +       kmem_cache_destroy(cachep_d);
5957 +       kmem_cache_destroy(cachep_c);
5958 +       kmem_cache_destroy(cachep_b);
5959 +}
5960 +
5961 +module_init(ip_set_iptreemap_init);
5962 +module_exit(ip_set_iptreemap_fini);
5963 --- /dev/null
5964 +++ b/net/ipv4/netfilter/ip_set_macipmap.c
5965 @@ -0,0 +1,375 @@
5966 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
5967 + *                         Patrick Schaaf <bof@bof.de>
5968 + *                         Martin Josefsson <gandalf@wlug.westbo.se>
5969 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5970 + *
5971 + * This program is free software; you can redistribute it and/or modify
5972 + * it under the terms of the GNU General Public License version 2 as
5973 + * published by the Free Software Foundation.
5974 + */
5975 +
5976 +/* Kernel module implementing an IP set type: the macipmap type */
5977 +
5978 +#include <linux/module.h>
5979 +#include <linux/ip.h>
5980 +#include <linux/skbuff.h>
5981 +#include <linux/version.h>
5982 +#include <linux/netfilter_ipv4/ip_tables.h>
5983 +#include <linux/netfilter_ipv4/ip_set.h>
5984 +#include <linux/errno.h>
5985 +#include <asm/uaccess.h>
5986 +#include <asm/bitops.h>
5987 +#include <linux/spinlock.h>
5988 +#include <linux/if_ether.h>
5989 +#include <linux/vmalloc.h>
5990 +
5991 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
5992 +#include <linux/netfilter_ipv4/ip_set_macipmap.h>
5993 +
5994 +static int
5995 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5996 +{
5997 +       struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
5998 +       struct ip_set_macip *table = (struct ip_set_macip *) map->members;
5999 +       struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
6000 +
6001 +       if (size != sizeof(struct ip_set_req_macipmap)) {
6002 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6003 +                             sizeof(struct ip_set_req_macipmap),
6004 +                             size);
6005 +               return -EINVAL;
6006 +       }
6007 +
6008 +       if (req->ip < map->first_ip || req->ip > map->last_ip)
6009 +               return -ERANGE;
6010 +
6011 +       *hash_ip = req->ip;
6012 +       DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6013 +          set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
6014 +       if (test_bit(IPSET_MACIP_ISSET,
6015 +                    (void *) &table[req->ip - map->first_ip].flags)) {
6016 +               return (memcmp(req->ethernet,
6017 +                              &table[req->ip - map->first_ip].ethernet,
6018 +                              ETH_ALEN) == 0);
6019 +       } else {
6020 +               return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6021 +       }
6022 +}
6023 +
6024 +static int
6025 +testip_kernel(struct ip_set *set,
6026 +             const struct sk_buff *skb,
6027 +             ip_set_ip_t *hash_ip,
6028 +             const u_int32_t *flags,
6029 +             unsigned char index)
6030 +{
6031 +       struct ip_set_macipmap *map =
6032 +           (struct ip_set_macipmap *) set->data;
6033 +       struct ip_set_macip *table =
6034 +           (struct ip_set_macip *) map->members;
6035 +       ip_set_ip_t ip;
6036 +
6037 +       ip = ntohl(flags[index] & IPSET_SRC
6038 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6039 +                       ? ip_hdr(skb)->saddr
6040 +                       : ip_hdr(skb)->daddr);
6041 +#else
6042 +                       ? skb->nh.iph->saddr
6043 +                       : skb->nh.iph->daddr);
6044 +#endif
6045 +
6046 +       if (ip < map->first_ip || ip > map->last_ip)
6047 +               return 0;
6048 +
6049 +       *hash_ip = ip;
6050 +       DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6051 +          set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
6052 +       if (test_bit(IPSET_MACIP_ISSET,
6053 +           (void *) &table[ip - map->first_ip].flags)) {
6054 +               /* Is mac pointer valid?
6055 +                * If so, compare... */
6056 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6057 +               return (skb_mac_header(skb) >= skb->head
6058 +                       && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
6059 +#else
6060 +               return (skb->mac.raw >= skb->head
6061 +                       && (skb->mac.raw + ETH_HLEN) <= skb->data
6062 +#endif
6063 +                       && (memcmp(eth_hdr(skb)->h_source,
6064 +                                  &table[ip - map->first_ip].ethernet,
6065 +                                  ETH_ALEN) == 0));
6066 +       } else {
6067 +               return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6068 +       }
6069 +}
6070 +
6071 +/* returns 0 on success */
6072 +static inline int
6073 +__addip(struct ip_set *set,
6074 +       ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
6075 +{
6076 +       struct ip_set_macipmap *map =
6077 +           (struct ip_set_macipmap *) set->data;
6078 +       struct ip_set_macip *table =
6079 +           (struct ip_set_macip *) map->members;
6080 +
6081 +       if (ip < map->first_ip || ip > map->last_ip)
6082 +               return -ERANGE;
6083 +       if (test_and_set_bit(IPSET_MACIP_ISSET,
6084 +                            (void *) &table[ip - map->first_ip].flags))
6085 +               return -EEXIST;
6086 +
6087 +       *hash_ip = ip;
6088 +       DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6089 +       memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
6090 +       return 0;
6091 +}
6092 +
6093 +static int
6094 +addip(struct ip_set *set, const void *data, size_t size,
6095 +      ip_set_ip_t *hash_ip)
6096 +{
6097 +       struct ip_set_req_macipmap *req =
6098 +           (struct ip_set_req_macipmap *) data;
6099 +
6100 +       if (size != sizeof(struct ip_set_req_macipmap)) {
6101 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6102 +                             sizeof(struct ip_set_req_macipmap),
6103 +                             size);
6104 +               return -EINVAL;
6105 +       }
6106 +       return __addip(set, req->ip, req->ethernet, hash_ip);
6107 +}
6108 +
6109 +static int
6110 +addip_kernel(struct ip_set *set,
6111 +            const struct sk_buff *skb,
6112 +            ip_set_ip_t *hash_ip,
6113 +            const u_int32_t *flags,
6114 +            unsigned char index)
6115 +{
6116 +       ip_set_ip_t ip;
6117 +
6118 +       ip = ntohl(flags[index] & IPSET_SRC
6119 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6120 +                       ? ip_hdr(skb)->saddr
6121 +                       : ip_hdr(skb)->daddr);
6122 +#else
6123 +                       ? skb->nh.iph->saddr
6124 +                       : skb->nh.iph->daddr);
6125 +#endif
6126 +
6127 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6128 +       if (!(skb_mac_header(skb) >= skb->head
6129 +             && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
6130 +#else
6131 +       if (!(skb->mac.raw >= skb->head
6132 +             && (skb->mac.raw + ETH_HLEN) <= skb->data))
6133 +#endif
6134 +               return -EINVAL;
6135 +
6136 +       return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
6137 +}
6138 +
6139 +static inline int
6140 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6141 +{
6142 +       struct ip_set_macipmap *map =
6143 +           (struct ip_set_macipmap *) set->data;
6144 +       struct ip_set_macip *table =
6145 +           (struct ip_set_macip *) map->members;
6146 +
6147 +       if (ip < map->first_ip || ip > map->last_ip)
6148 +               return -ERANGE;
6149 +       if (!test_and_clear_bit(IPSET_MACIP_ISSET,
6150 +                               (void *)&table[ip - map->first_ip].flags))
6151 +               return -EEXIST;
6152 +
6153 +       *hash_ip = ip;
6154 +       DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6155 +       return 0;
6156 +}
6157 +
6158 +static int
6159 +delip(struct ip_set *set, const void *data, size_t size,
6160 +     ip_set_ip_t *hash_ip)
6161 +{
6162 +       struct ip_set_req_macipmap *req =
6163 +           (struct ip_set_req_macipmap *) data;
6164 +
6165 +       if (size != sizeof(struct ip_set_req_macipmap)) {
6166 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6167 +                             sizeof(struct ip_set_req_macipmap),
6168 +                             size);
6169 +               return -EINVAL;
6170 +       }
6171 +       return __delip(set, req->ip, hash_ip);
6172 +}
6173 +
6174 +static int
6175 +delip_kernel(struct ip_set *set,
6176 +            const struct sk_buff *skb,
6177 +            ip_set_ip_t *hash_ip,
6178 +            const u_int32_t *flags,
6179 +            unsigned char index)
6180 +{
6181 +       return __delip(set,
6182 +                      ntohl(flags[index] & IPSET_SRC
6183 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6184 +                               ? ip_hdr(skb)->saddr
6185 +                               : ip_hdr(skb)->daddr),
6186 +#else
6187 +                               ? skb->nh.iph->saddr
6188 +                               : skb->nh.iph->daddr),
6189 +#endif
6190 +                      hash_ip);
6191 +}
6192 +
6193 +static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
6194 +{
6195 +       return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
6196 +}
6197 +
6198 +static int create(struct ip_set *set, const void *data, size_t size)
6199 +{
6200 +       int newbytes;
6201 +       struct ip_set_req_macipmap_create *req =
6202 +           (struct ip_set_req_macipmap_create *) data;
6203 +       struct ip_set_macipmap *map;
6204 +
6205 +       if (size != sizeof(struct ip_set_req_macipmap_create)) {
6206 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6207 +                             sizeof(struct ip_set_req_macipmap_create),
6208 +                             size);
6209 +               return -EINVAL;
6210 +       }
6211 +
6212 +       DP("from %u.%u.%u.%u to %u.%u.%u.%u",
6213 +          HIPQUAD(req->from), HIPQUAD(req->to));
6214 +
6215 +       if (req->from > req->to) {
6216 +               DP("bad ip range");
6217 +               return -ENOEXEC;
6218 +       }
6219 +
6220 +       if (req->to - req->from > MAX_RANGE) {
6221 +               ip_set_printk("range too big (max %d addresses)",
6222 +                              MAX_RANGE+1);
6223 +               return -ENOEXEC;
6224 +       }
6225 +
6226 +       map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
6227 +       if (!map) {
6228 +               DP("out of memory for %d bytes",
6229 +                  sizeof(struct ip_set_macipmap));
6230 +               return -ENOMEM;
6231 +       }
6232 +       map->flags = req->flags;
6233 +       map->first_ip = req->from;
6234 +       map->last_ip = req->to;
6235 +       newbytes = members_size(map->first_ip, map->last_ip);
6236 +       map->members = ip_set_malloc(newbytes);
6237 +       DP("members: %u %p", newbytes, map->members);
6238 +       if (!map->members) {
6239 +               DP("out of memory for %d bytes", newbytes);
6240 +               kfree(map);
6241 +               return -ENOMEM;
6242 +       }
6243 +       memset(map->members, 0, newbytes);
6244 +
6245 +       set->data = map;
6246 +       return 0;
6247 +}
6248 +
6249 +static void destroy(struct ip_set *set)
6250 +{
6251 +       struct ip_set_macipmap *map =
6252 +           (struct ip_set_macipmap *) set->data;
6253 +
6254 +       ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
6255 +       kfree(map);
6256 +
6257 +       set->data = NULL;
6258 +}
6259 +
6260 +static void flush(struct ip_set *set)
6261 +{
6262 +       struct ip_set_macipmap *map =
6263 +           (struct ip_set_macipmap *) set->data;
6264 +       memset(map->members, 0, members_size(map->first_ip, map->last_ip));
6265 +}
6266 +
6267 +static void list_header(const struct ip_set *set, void *data)
6268 +{
6269 +       struct ip_set_macipmap *map =
6270 +           (struct ip_set_macipmap *) set->data;
6271 +       struct ip_set_req_macipmap_create *header =
6272 +           (struct ip_set_req_macipmap_create *) data;
6273 +
6274 +       DP("list_header %x %x %u", map->first_ip, map->last_ip,
6275 +          map->flags);
6276 +
6277 +       header->from = map->first_ip;
6278 +       header->to = map->last_ip;
6279 +       header->flags = map->flags;
6280 +}
6281 +
6282 +static int list_members_size(const struct ip_set *set)
6283 +{
6284 +       struct ip_set_macipmap *map =
6285 +           (struct ip_set_macipmap *) set->data;
6286 +
6287 +       DP("%u", members_size(map->first_ip, map->last_ip));
6288 +       return members_size(map->first_ip, map->last_ip);
6289 +}
6290 +
6291 +static void list_members(const struct ip_set *set, void *data)
6292 +{
6293 +       struct ip_set_macipmap *map =
6294 +           (struct ip_set_macipmap *) set->data;
6295 +
6296 +       int bytes = members_size(map->first_ip, map->last_ip);
6297 +
6298 +       DP("members: %u %p", bytes, map->members);
6299 +       memcpy(data, map->members, bytes);
6300 +}
6301 +
6302 +static struct ip_set_type ip_set_macipmap = {
6303 +       .typename               = SETTYPE_NAME,
6304 +       .features               = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
6305 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,
6306 +       .create                 = &create,
6307 +       .destroy                = &destroy,
6308 +       .flush                  = &flush,
6309 +       .reqsize                = sizeof(struct ip_set_req_macipmap),
6310 +       .addip                  = &addip,
6311 +       .addip_kernel           = &addip_kernel,
6312 +       .delip                  = &delip,
6313 +       .delip_kernel           = &delip_kernel,
6314 +       .testip                 = &testip,
6315 +       .testip_kernel          = &testip_kernel,
6316 +       .header_size            = sizeof(struct ip_set_req_macipmap_create),
6317 +       .list_header            = &list_header,
6318 +       .list_members_size      = &list_members_size,
6319 +       .list_members           = &list_members,
6320 +       .me                     = THIS_MODULE,
6321 +};
6322 +
6323 +MODULE_LICENSE("GPL");
6324 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6325 +MODULE_DESCRIPTION("macipmap type of IP sets");
6326 +
6327 +static int __init ip_set_macipmap_init(void)
6328 +{
6329 +       init_max_malloc_size();
6330 +       return ip_set_register_set_type(&ip_set_macipmap);
6331 +}
6332 +
6333 +static void __exit ip_set_macipmap_fini(void)
6334 +{
6335 +       /* FIXME: possible race with ip_set_create() */
6336 +       ip_set_unregister_set_type(&ip_set_macipmap);
6337 +}
6338 +
6339 +module_init(ip_set_macipmap_init);
6340 +module_exit(ip_set_macipmap_fini);
6341 --- /dev/null
6342 +++ b/net/ipv4/netfilter/ip_set_nethash.c
6343 @@ -0,0 +1,497 @@
6344 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6345 + *
6346 + * This program is free software; you can redistribute it and/or modify
6347 + * it under the terms of the GNU General Public License version 2 as
6348 + * published by the Free Software Foundation.
6349 + */
6350 +
6351 +/* Kernel module implementing a cidr nethash set */
6352 +
6353 +#include <linux/module.h>
6354 +#include <linux/ip.h>
6355 +#include <linux/skbuff.h>
6356 +#include <linux/version.h>
6357 +#include <linux/jhash.h>
6358 +#include <linux/netfilter_ipv4/ip_tables.h>
6359 +#include <linux/netfilter_ipv4/ip_set.h>
6360 +#include <linux/errno.h>
6361 +#include <asm/uaccess.h>
6362 +#include <asm/bitops.h>
6363 +#include <linux/spinlock.h>
6364 +#include <linux/vmalloc.h>
6365 +#include <linux/random.h>
6366 +
6367 +#include <net/ip.h>
6368 +
6369 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
6370 +#include <linux/netfilter_ipv4/ip_set_nethash.h>
6371 +
6372 +static int limit = MAX_RANGE;
6373 +
6374 +static inline __u32
6375 +jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
6376 +{
6377 +       return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
6378 +}
6379 +
6380 +static inline __u32
6381 +hash_id_cidr(struct ip_set_nethash *map,
6382 +            ip_set_ip_t ip,
6383 +            unsigned char cidr,
6384 +            ip_set_ip_t *hash_ip)
6385 +{
6386 +       __u32 id;
6387 +       u_int16_t i;
6388 +       ip_set_ip_t *elem;
6389 +
6390 +       *hash_ip = pack(ip, cidr);
6391 +
6392 +       for (i = 0; i < map->probes; i++) {
6393 +               id = jhash_ip(map, i, *hash_ip) % map->hashsize;
6394 +               DP("hash key: %u", id);
6395 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6396 +               if (*elem == *hash_ip)
6397 +                       return id;
6398 +       }
6399 +       return UINT_MAX;
6400 +}
6401 +
6402 +static inline __u32
6403 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6404 +{
6405 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6406 +       __u32 id = UINT_MAX;
6407 +       int i;
6408 +
6409 +       for (i = 0; i < 30 && map->cidr[i]; i++) {
6410 +               id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
6411 +               if (id != UINT_MAX)
6412 +                       break;
6413 +       }
6414 +       return id;
6415 +}
6416 +
6417 +static inline int
6418 +__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
6419 +             ip_set_ip_t *hash_ip)
6420 +{
6421 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6422 +
6423 +       return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
6424 +}
6425 +
6426 +static inline int
6427 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6428 +{
6429 +       return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
6430 +}
6431 +
6432 +static int
6433 +testip(struct ip_set *set, const void *data, size_t size,
6434 +       ip_set_ip_t *hash_ip)
6435 +{
6436 +       struct ip_set_req_nethash *req =
6437 +           (struct ip_set_req_nethash *) data;
6438 +
6439 +       if (size != sizeof(struct ip_set_req_nethash)) {
6440 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6441 +                             sizeof(struct ip_set_req_nethash),
6442 +                             size);
6443 +               return -EINVAL;
6444 +       }
6445 +       return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
6446 +               : __testip_cidr(set, req->ip, req->cidr, hash_ip));
6447 +}
6448 +
6449 +static int
6450 +testip_kernel(struct ip_set *set,
6451 +             const struct sk_buff *skb,
6452 +             ip_set_ip_t *hash_ip,
6453 +             const u_int32_t *flags,
6454 +             unsigned char index)
6455 +{
6456 +       return __testip(set,
6457 +                       ntohl(flags[index] & IPSET_SRC
6458 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6459 +                               ? ip_hdr(skb)->saddr
6460 +                               : ip_hdr(skb)->daddr),
6461 +#else
6462 +                               ? skb->nh.iph->saddr
6463 +                               : skb->nh.iph->daddr),
6464 +#endif
6465 +                       hash_ip);
6466 +}
6467 +
6468 +static inline int
6469 +__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
6470 +{
6471 +       __u32 probe;
6472 +       u_int16_t i;
6473 +       ip_set_ip_t *elem;
6474 +
6475 +       for (i = 0; i < map->probes; i++) {
6476 +               probe = jhash_ip(map, i, ip) % map->hashsize;
6477 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
6478 +               if (*elem == ip)
6479 +                       return -EEXIST;
6480 +               if (!*elem) {
6481 +                       *elem = ip;
6482 +                       map->elements++;
6483 +                       return 0;
6484 +               }
6485 +       }
6486 +       /* Trigger rehashing */
6487 +       return -EAGAIN;
6488 +}
6489 +
6490 +static inline int
6491 +__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
6492 +       ip_set_ip_t *hash_ip)
6493 +{
6494 +       if (!ip || map->elements >= limit)
6495 +               return -ERANGE;
6496 +
6497 +       *hash_ip = pack(ip, cidr);
6498 +       DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
6499 +
6500 +       return __addip_base(map, *hash_ip);
6501 +}
6502 +
6503 +static void
6504 +update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
6505 +{
6506 +       unsigned char next;
6507 +       int i;
6508 +
6509 +       for (i = 0; i < 30 && map->cidr[i]; i++) {
6510 +               if (map->cidr[i] == cidr) {
6511 +                       return;
6512 +               } else if (map->cidr[i] < cidr) {
6513 +                       next = map->cidr[i];
6514 +                       map->cidr[i] = cidr;
6515 +                       cidr = next;
6516 +               }
6517 +       }
6518 +       if (i < 30)
6519 +               map->cidr[i] = cidr;
6520 +}
6521 +
6522 +static int
6523 +addip(struct ip_set *set, const void *data, size_t size,
6524 +        ip_set_ip_t *hash_ip)
6525 +{
6526 +       struct ip_set_req_nethash *req =
6527 +           (struct ip_set_req_nethash *) data;
6528 +       int ret;
6529 +
6530 +       if (size != sizeof(struct ip_set_req_nethash)) {
6531 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6532 +                             sizeof(struct ip_set_req_nethash),
6533 +                             size);
6534 +               return -EINVAL;
6535 +       }
6536 +       ret = __addip((struct ip_set_nethash *) set->data,
6537 +                     req->ip, req->cidr, hash_ip);
6538 +
6539 +       if (ret == 0)
6540 +               update_cidr_sizes((struct ip_set_nethash *) set->data,
6541 +                                 req->cidr);
6542 +
6543 +       return ret;
6544 +}
6545 +
6546 +static int
6547 +addip_kernel(struct ip_set *set,
6548 +            const struct sk_buff *skb,
6549 +            ip_set_ip_t *hash_ip,
6550 +            const u_int32_t *flags,
6551 +            unsigned char index)
6552 +{
6553 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6554 +       int ret = -ERANGE;
6555 +       ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
6556 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6557 +                                       ? ip_hdr(skb)->saddr
6558 +                                       : ip_hdr(skb)->daddr);
6559 +#else
6560 +                                       ? skb->nh.iph->saddr
6561 +                                       : skb->nh.iph->daddr);
6562 +#endif
6563 +
6564 +       if (map->cidr[0])
6565 +               ret = __addip(map, ip, map->cidr[0], hash_ip);
6566 +
6567 +       return ret;
6568 +}
6569 +
6570 +static int retry(struct ip_set *set)
6571 +{
6572 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6573 +       ip_set_ip_t *elem;
6574 +       void *members;
6575 +       u_int32_t i, hashsize = map->hashsize;
6576 +       int res;
6577 +       struct ip_set_nethash *tmp;
6578 +
6579 +       if (map->resize == 0)
6580 +               return -ERANGE;
6581 +
6582 +    again:
6583 +       res = 0;
6584 +
6585 +       /* Calculate new parameters */
6586 +       hashsize += (hashsize * map->resize)/100;
6587 +       if (hashsize == map->hashsize)
6588 +               hashsize++;
6589 +
6590 +       ip_set_printk("rehashing of set %s triggered: "
6591 +                     "hashsize grows from %u to %u",
6592 +                     set->name, map->hashsize, hashsize);
6593 +
6594 +       tmp = kmalloc(sizeof(struct ip_set_nethash)
6595 +                     + map->probes * sizeof(uint32_t), GFP_ATOMIC);
6596 +       if (!tmp) {
6597 +               DP("out of memory for %d bytes",
6598 +                  sizeof(struct ip_set_nethash)
6599 +                  + map->probes * sizeof(uint32_t));
6600 +               return -ENOMEM;
6601 +       }
6602 +       tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
6603 +       if (!tmp->members) {
6604 +               DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
6605 +               kfree(tmp);
6606 +               return -ENOMEM;
6607 +       }
6608 +       tmp->hashsize = hashsize;
6609 +       tmp->elements = 0;
6610 +       tmp->probes = map->probes;
6611 +       tmp->resize = map->resize;
6612 +       memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
6613 +       memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
6614 +
6615 +       write_lock_bh(&set->lock);
6616 +       map = (struct ip_set_nethash *) set->data; /* Play safe */
6617 +       for (i = 0; i < map->hashsize && res == 0; i++) {
6618 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
6619 +               if (*elem)
6620 +                       res = __addip_base(tmp, *elem);
6621 +       }
6622 +       if (res) {
6623 +               /* Failure, try again */
6624 +               write_unlock_bh(&set->lock);
6625 +               harray_free(tmp->members);
6626 +               kfree(tmp);
6627 +               goto again;
6628 +       }
6629 +
6630 +       /* Success at resizing! */
6631 +       members = map->members;
6632 +
6633 +       map->hashsize = tmp->hashsize;
6634 +       map->members = tmp->members;
6635 +       write_unlock_bh(&set->lock);
6636 +
6637 +       harray_free(members);
6638 +       kfree(tmp);
6639 +
6640 +       return 0;
6641 +}
6642 +
6643 +static inline int
6644 +__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
6645 +       ip_set_ip_t *hash_ip)
6646 +{
6647 +       ip_set_ip_t id, *elem;
6648 +
6649 +       if (!ip)
6650 +               return -ERANGE;
6651 +
6652 +       id = hash_id_cidr(map, ip, cidr, hash_ip);
6653 +       if (id == UINT_MAX)
6654 +               return -EEXIST;
6655 +
6656 +       elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6657 +       *elem = 0;
6658 +       map->elements--;
6659 +       return 0;
6660 +}
6661 +
6662 +static int
6663 +delip(struct ip_set *set, const void *data, size_t size,
6664 +        ip_set_ip_t *hash_ip)
6665 +{
6666 +       struct ip_set_req_nethash *req =
6667 +           (struct ip_set_req_nethash *) data;
6668 +
6669 +       if (size != sizeof(struct ip_set_req_nethash)) {
6670 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6671 +                             sizeof(struct ip_set_req_nethash),
6672 +                             size);
6673 +               return -EINVAL;
6674 +       }
6675 +       /* TODO: no garbage collection in map->cidr */
6676 +       return __delip((struct ip_set_nethash *) set->data,
6677 +                      req->ip, req->cidr, hash_ip);
6678 +}
6679 +
6680 +static int
6681 +delip_kernel(struct ip_set *set,
6682 +            const struct sk_buff *skb,
6683 +            ip_set_ip_t *hash_ip,
6684 +            const u_int32_t *flags,
6685 +            unsigned char index)
6686 +{
6687 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6688 +       int ret = -ERANGE;
6689 +       ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
6690 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6691 +                                       ? ip_hdr(skb)->saddr
6692 +                                       : ip_hdr(skb)->daddr);
6693 +#else
6694 +                                       ? skb->nh.iph->saddr
6695 +                                       : skb->nh.iph->daddr);
6696 +#endif
6697 +
6698 +       if (map->cidr[0])
6699 +               ret = __delip(map, ip, map->cidr[0], hash_ip);
6700 +
6701 +       return ret;
6702 +}
6703 +
6704 +static int create(struct ip_set *set, const void *data, size_t size)
6705 +{
6706 +       struct ip_set_req_nethash_create *req =
6707 +           (struct ip_set_req_nethash_create *) data;
6708 +       struct ip_set_nethash *map;
6709 +       uint16_t i;
6710 +
6711 +       if (size != sizeof(struct ip_set_req_nethash_create)) {
6712 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6713 +                              sizeof(struct ip_set_req_nethash_create),
6714 +                              size);
6715 +               return -EINVAL;
6716 +       }
6717 +
6718 +       if (req->hashsize < 1) {
6719 +               ip_set_printk("hashsize too small");
6720 +               return -ENOEXEC;
6721 +       }
6722 +       if (req->probes < 1) {
6723 +               ip_set_printk("probes too small");
6724 +               return -ENOEXEC;
6725 +       }
6726 +
6727 +       map = kmalloc(sizeof(struct ip_set_nethash)
6728 +                     + req->probes * sizeof(uint32_t), GFP_KERNEL);
6729 +       if (!map) {
6730 +               DP("out of memory for %d bytes",
6731 +                  sizeof(struct ip_set_nethash)
6732 +                  + req->probes * sizeof(uint32_t));
6733 +               return -ENOMEM;
6734 +       }
6735 +       for (i = 0; i < req->probes; i++)
6736 +               get_random_bytes(((uint32_t *) map->initval)+i, 4);
6737 +       map->elements = 0;
6738 +       map->hashsize = req->hashsize;
6739 +       map->probes = req->probes;
6740 +       map->resize = req->resize;
6741 +       memset(map->cidr, 0, 30 * sizeof(unsigned char));
6742 +       map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
6743 +       if (!map->members) {
6744 +               DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
6745 +               kfree(map);
6746 +               return -ENOMEM;
6747 +       }
6748 +
6749 +       set->data = map;
6750 +       return 0;
6751 +}
6752 +
6753 +static void destroy(struct ip_set *set)
6754 +{
6755 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6756 +
6757 +       harray_free(map->members);
6758 +       kfree(map);
6759 +
6760 +       set->data = NULL;
6761 +}
6762 +
6763 +static void flush(struct ip_set *set)
6764 +{
6765 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6766 +       harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
6767 +       memset(map->cidr, 0, 30 * sizeof(unsigned char));
6768 +       map->elements = 0;
6769 +}
6770 +
6771 +static void list_header(const struct ip_set *set, void *data)
6772 +{
6773 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6774 +       struct ip_set_req_nethash_create *header =
6775 +           (struct ip_set_req_nethash_create *) data;
6776 +
6777 +       header->hashsize = map->hashsize;
6778 +       header->probes = map->probes;
6779 +       header->resize = map->resize;
6780 +}
6781 +
6782 +static int list_members_size(const struct ip_set *set)
6783 +{
6784 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6785 +
6786 +       return (map->hashsize * sizeof(ip_set_ip_t));
6787 +}
6788 +
6789 +static void list_members(const struct ip_set *set, void *data)
6790 +{
6791 +       struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6792 +       ip_set_ip_t i, *elem;
6793 +
6794 +       for (i = 0; i < map->hashsize; i++) {
6795 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
6796 +               ((ip_set_ip_t *)data)[i] = *elem;
6797 +       }
6798 +}
6799 +
6800 +static struct ip_set_type ip_set_nethash = {
6801 +       .typename               = SETTYPE_NAME,
6802 +       .features               = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
6803 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,
6804 +       .create                 = &create,
6805 +       .destroy                = &destroy,
6806 +       .flush                  = &flush,
6807 +       .reqsize                = sizeof(struct ip_set_req_nethash),
6808 +       .addip                  = &addip,
6809 +       .addip_kernel           = &addip_kernel,
6810 +       .retry                  = &retry,
6811 +       .delip                  = &delip,
6812 +       .delip_kernel           = &delip_kernel,
6813 +       .testip                 = &testip,
6814 +       .testip_kernel          = &testip_kernel,
6815 +       .header_size            = sizeof(struct ip_set_req_nethash_create),
6816 +       .list_header            = &list_header,
6817 +       .list_members_size      = &list_members_size,
6818 +       .list_members           = &list_members,
6819 +       .me                     = THIS_MODULE,
6820 +};
6821 +
6822 +MODULE_LICENSE("GPL");
6823 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6824 +MODULE_DESCRIPTION("nethash type of IP sets");
6825 +module_param(limit, int, 0600);
6826 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
6827 +
6828 +static int __init ip_set_nethash_init(void)
6829 +{
6830 +       return ip_set_register_set_type(&ip_set_nethash);
6831 +}
6832 +
6833 +static void __exit ip_set_nethash_fini(void)
6834 +{
6835 +       /* FIXME: possible race with ip_set_create() */
6836 +       ip_set_unregister_set_type(&ip_set_nethash);
6837 +}
6838 +
6839 +module_init(ip_set_nethash_init);
6840 +module_exit(ip_set_nethash_fini);
6841 --- /dev/null
6842 +++ b/net/ipv4/netfilter/ip_set_portmap.c
6843 @@ -0,0 +1,346 @@
6844 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6845 + *
6846 + * This program is free software; you can redistribute it and/or modify
6847 + * it under the terms of the GNU General Public License version 2 as
6848 + * published by the Free Software Foundation.
6849 + */
6850 +
6851 +/* Kernel module implementing a port set type as a bitmap */
6852 +
6853 +#include <linux/module.h>
6854 +#include <linux/ip.h>
6855 +#include <linux/tcp.h>
6856 +#include <linux/udp.h>
6857 +#include <linux/skbuff.h>
6858 +#include <linux/version.h>
6859 +#include <linux/netfilter_ipv4/ip_tables.h>
6860 +#include <linux/netfilter_ipv4/ip_set.h>
6861 +#include <linux/errno.h>
6862 +#include <asm/uaccess.h>
6863 +#include <asm/bitops.h>
6864 +#include <linux/spinlock.h>
6865 +
6866 +#include <net/ip.h>
6867 +
6868 +#include <linux/netfilter_ipv4/ip_set_portmap.h>
6869 +
6870 +/* We must handle non-linear skbs */
6871 +static inline ip_set_ip_t
6872 +get_port(const struct sk_buff *skb, u_int32_t flags)
6873 +{
6874 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6875 +       struct iphdr *iph = ip_hdr(skb);
6876 +#else
6877 +       struct iphdr *iph = skb->nh.iph;
6878 +#endif
6879 +       u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
6880 +       switch (iph->protocol) {
6881 +       case IPPROTO_TCP: {
6882 +               struct tcphdr tcph;
6883 +
6884 +               /* See comments at tcp_match in ip_tables.c */
6885 +               if (offset)
6886 +                       return INVALID_PORT;
6887 +
6888 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6889 +               if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
6890 +#else
6891 +               if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
6892 +#endif
6893 +                       /* No choice either */
6894 +                       return INVALID_PORT;
6895 +
6896 +               return ntohs(flags & IPSET_SRC ?
6897 +                            tcph.source : tcph.dest);
6898 +           }
6899 +       case IPPROTO_UDP: {
6900 +               struct udphdr udph;
6901 +
6902 +               if (offset)
6903 +                       return INVALID_PORT;
6904 +
6905 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6906 +               if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
6907 +#else
6908 +               if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
6909 +#endif
6910 +                       /* No choice either */
6911 +                       return INVALID_PORT;
6912 +
6913 +               return ntohs(flags & IPSET_SRC ?
6914 +                            udph.source : udph.dest);
6915 +           }
6916 +       default:
6917 +               return INVALID_PORT;
6918 +       }
6919 +}
6920 +
6921 +static inline int
6922 +__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
6923 +{
6924 +       struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6925 +
6926 +       if (port < map->first_port || port > map->last_port)
6927 +               return -ERANGE;
6928 +
6929 +       *hash_port = port;
6930 +       DP("set: %s, port:%u, %u", set->name, port, *hash_port);
6931 +       return !!test_bit(port - map->first_port, map->members);
6932 +}
6933 +
6934 +static int
6935 +testport(struct ip_set *set, const void *data, size_t size,
6936 +         ip_set_ip_t *hash_port)
6937 +{
6938 +       struct ip_set_req_portmap *req =
6939 +           (struct ip_set_req_portmap *) data;
6940 +
6941 +       if (size != sizeof(struct ip_set_req_portmap)) {
6942 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6943 +                             sizeof(struct ip_set_req_portmap),
6944 +                             size);
6945 +               return -EINVAL;
6946 +       }
6947 +       return __testport(set, req->port, hash_port);
6948 +}
6949 +
6950 +static int
6951 +testport_kernel(struct ip_set *set,
6952 +               const struct sk_buff *skb,
6953 +               ip_set_ip_t *hash_port,
6954 +               const u_int32_t *flags,
6955 +               unsigned char index)
6956 +{
6957 +       int res;
6958 +       ip_set_ip_t port = get_port(skb, flags[index]);
6959 +
6960 +       DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
6961 +       if (port == INVALID_PORT)
6962 +               return 0;
6963 +
6964 +       res =  __testport(set, port, hash_port);
6965 +
6966 +       return (res < 0 ? 0 : res);
6967 +}
6968 +
6969 +static inline int
6970 +__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
6971 +{
6972 +       struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6973 +
6974 +       if (port < map->first_port || port > map->last_port)
6975 +               return -ERANGE;
6976 +       if (test_and_set_bit(port - map->first_port, map->members))
6977 +               return -EEXIST;
6978 +
6979 +       *hash_port = port;
6980 +       DP("port %u", port);
6981 +       return 0;
6982 +}
6983 +
6984 +static int
6985 +addport(struct ip_set *set, const void *data, size_t size,
6986 +        ip_set_ip_t *hash_port)
6987 +{
6988 +       struct ip_set_req_portmap *req =
6989 +           (struct ip_set_req_portmap *) data;
6990 +
6991 +       if (size != sizeof(struct ip_set_req_portmap)) {
6992 +               ip_set_printk("data length wrong (want %zu, have %zu)",
6993 +                             sizeof(struct ip_set_req_portmap),
6994 +                             size);
6995 +               return -EINVAL;
6996 +       }
6997 +       return __addport(set, req->port, hash_port);
6998 +}
6999 +
7000 +static int
7001 +addport_kernel(struct ip_set *set,
7002 +              const struct sk_buff *skb,
7003 +              ip_set_ip_t *hash_port,
7004 +              const u_int32_t *flags,
7005 +              unsigned char index)
7006 +{
7007 +       ip_set_ip_t port = get_port(skb, flags[index]);
7008 +
7009 +       if (port == INVALID_PORT)
7010 +               return -EINVAL;
7011 +
7012 +       return __addport(set, port, hash_port);
7013 +}
7014 +
7015 +static inline int
7016 +__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
7017 +{
7018 +       struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7019 +
7020 +       if (port < map->first_port || port > map->last_port)
7021 +               return -ERANGE;
7022 +       if (!test_and_clear_bit(port - map->first_port, map->members))
7023 +               return -EEXIST;
7024 +
7025 +       *hash_port = port;
7026 +       DP("port %u", port);
7027 +       return 0;
7028 +}
7029 +
7030 +static int
7031 +delport(struct ip_set *set, const void *data, size_t size,
7032 +        ip_set_ip_t *hash_port)
7033 +{
7034 +       struct ip_set_req_portmap *req =
7035 +           (struct ip_set_req_portmap *) data;
7036 +
7037 +       if (size != sizeof(struct ip_set_req_portmap)) {
7038 +               ip_set_printk("data length wrong (want %zu, have %zu)",
7039 +                             sizeof(struct ip_set_req_portmap),
7040 +                             size);
7041 +               return -EINVAL;
7042 +       }
7043 +       return __delport(set, req->port, hash_port);
7044 +}
7045 +
7046 +static int
7047 +delport_kernel(struct ip_set *set,
7048 +              const struct sk_buff *skb,
7049 +              ip_set_ip_t *hash_port,
7050 +              const u_int32_t *flags,
7051 +              unsigned char index)
7052 +{
7053 +       ip_set_ip_t port = get_port(skb, flags[index]);
7054 +
7055 +       if (port == INVALID_PORT)
7056 +               return -EINVAL;
7057 +
7058 +       return __delport(set, port, hash_port);
7059 +}
7060 +
7061 +static int create(struct ip_set *set, const void *data, size_t size)
7062 +{
7063 +       int newbytes;
7064 +       struct ip_set_req_portmap_create *req =
7065 +           (struct ip_set_req_portmap_create *) data;
7066 +       struct ip_set_portmap *map;
7067 +
7068 +       if (size != sizeof(struct ip_set_req_portmap_create)) {
7069 +               ip_set_printk("data length wrong (want %zu, have %zu)",
7070 +                              sizeof(struct ip_set_req_portmap_create),
7071 +                              size);
7072 +               return -EINVAL;
7073 +       }
7074 +
7075 +       DP("from %u to %u", req->from, req->to);
7076 +
7077 +       if (req->from > req->to) {
7078 +               DP("bad port range");
7079 +               return -ENOEXEC;
7080 +       }
7081 +
7082 +       if (req->to - req->from > MAX_RANGE) {
7083 +               ip_set_printk("range too big (max %d ports)",
7084 +                              MAX_RANGE+1);
7085 +               return -ENOEXEC;
7086 +       }
7087 +
7088 +       map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
7089 +       if (!map) {
7090 +               DP("out of memory for %d bytes",
7091 +                  sizeof(struct ip_set_portmap));
7092 +               return -ENOMEM;
7093 +       }
7094 +       map->first_port = req->from;
7095 +       map->last_port = req->to;
7096 +       newbytes = bitmap_bytes(req->from, req->to);
7097 +       map->members = kmalloc(newbytes, GFP_KERNEL);
7098 +       if (!map->members) {
7099 +               DP("out of memory for %d bytes", newbytes);
7100 +               kfree(map);
7101 +               return -ENOMEM;
7102 +       }
7103 +       memset(map->members, 0, newbytes);
7104 +
7105 +       set->data = map;
7106 +       return 0;
7107 +}
7108 +
7109 +static void destroy(struct ip_set *set)
7110 +{
7111 +       struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7112 +
7113 +       kfree(map->members);
7114 +       kfree(map);
7115 +
7116 +       set->data = NULL;
7117 +}
7118 +
7119 +static void flush(struct ip_set *set)
7120 +{
7121 +       struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7122 +       memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
7123 +}
7124 +
7125 +static void list_header(const struct ip_set *set, void *data)
7126 +{
7127 +       struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7128 +       struct ip_set_req_portmap_create *header =
7129 +           (struct ip_set_req_portmap_create *) data;
7130 +
7131 +       DP("list_header %u %u", map->first_port, map->last_port);
7132 +
7133 +       header->from = map->first_port;
7134 +       header->to = map->last_port;
7135 +}
7136 +
7137 +static int list_members_size(const struct ip_set *set)
7138 +{
7139 +       struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7140 +
7141 +       return bitmap_bytes(map->first_port, map->last_port);
7142 +}
7143 +
7144 +static void list_members(const struct ip_set *set, void *data)
7145 +{
7146 +       struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7147 +       int bytes = bitmap_bytes(map->first_port, map->last_port);
7148 +
7149 +       memcpy(data, map->members, bytes);
7150 +}
7151 +
7152 +static struct ip_set_type ip_set_portmap = {
7153 +       .typename               = SETTYPE_NAME,
7154 +       .features               = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
7155 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,
7156 +       .create                 = &create,
7157 +       .destroy                = &destroy,
7158 +       .flush                  = &flush,
7159 +       .reqsize                = sizeof(struct ip_set_req_portmap),
7160 +       .addip                  = &addport,
7161 +       .addip_kernel           = &addport_kernel,
7162 +       .delip                  = &delport,
7163 +       .delip_kernel           = &delport_kernel,
7164 +       .testip                 = &testport,
7165 +       .testip_kernel          = &testport_kernel,
7166 +       .header_size            = sizeof(struct ip_set_req_portmap_create),
7167 +       .list_header            = &list_header,
7168 +       .list_members_size      = &list_members_size,
7169 +       .list_members           = &list_members,
7170 +       .me                     = THIS_MODULE,
7171 +};
7172 +
7173 +MODULE_LICENSE("GPL");
7174 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7175 +MODULE_DESCRIPTION("portmap type of IP sets");
7176 +
7177 +static int __init ip_set_portmap_init(void)
7178 +{
7179 +       return ip_set_register_set_type(&ip_set_portmap);
7180 +}
7181 +
7182 +static void __exit ip_set_portmap_fini(void)
7183 +{
7184 +       /* FIXME: possible race with ip_set_create() */
7185 +       ip_set_unregister_set_type(&ip_set_portmap);
7186 +}
7187 +
7188 +module_init(ip_set_portmap_init);
7189 +module_exit(ip_set_portmap_fini);
7190 --- /dev/null
7191 +++ b/net/ipv4/netfilter/ipt_set.c
7192 @@ -0,0 +1,160 @@
7193 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7194 + *                         Patrick Schaaf <bof@bof.de>
7195 + *                         Martin Josefsson <gandalf@wlug.westbo.se>
7196 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7197 + *
7198 + * This program is free software; you can redistribute it and/or modify
7199 + * it under the terms of the GNU General Public License version 2 as
7200 + * published by the Free Software Foundation.
7201 + */
7202 +
7203 +/* Kernel module to match an IP set. */
7204 +
7205 +#include <linux/module.h>
7206 +#include <linux/ip.h>
7207 +#include <linux/skbuff.h>
7208 +#include <linux/version.h>
7209 +
7210 +#include <linux/netfilter_ipv4/ip_tables.h>
7211 +#include <linux/netfilter_ipv4/ip_set.h>
7212 +#include <linux/netfilter_ipv4/ipt_set.h>
7213 +
7214 +static inline int
7215 +match_set(const struct ipt_set_info *info,
7216 +         const struct sk_buff *skb,
7217 +         int inv)
7218 +{
7219 +       if (ip_set_testip_kernel(info->index, skb, info->flags))
7220 +               inv = !inv;
7221 +       return inv;
7222 +}
7223 +
7224 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7225 +static bool
7226 +#else
7227 +static int
7228 +#endif
7229 +match(const struct sk_buff *skb,
7230 +      const struct net_device *in,
7231 +      const struct net_device *out,
7232 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7233 +      const struct xt_match *match,
7234 +#endif
7235 +      const void *matchinfo,
7236 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7237 +      int offset, unsigned int protoff, bool *hotdrop)
7238 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7239 +      int offset, unsigned int protoff, int *hotdrop)
7240 +#else
7241 +      int offset, int *hotdrop)
7242 +#endif
7243 +{
7244 +       const struct ipt_set_info_match *info = matchinfo;
7245 +
7246 +       return match_set(&info->match_set,
7247 +                        skb,
7248 +                        info->match_set.flags[0] & IPSET_MATCH_INV);
7249 +}
7250 +
7251 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7252 +bool
7253 +#else
7254 +static int
7255 +#endif
7256 +checkentry(const char *tablename,
7257 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7258 +          const void *inf,
7259 +#else
7260 +          const struct ipt_ip *ip,
7261 +#endif
7262 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7263 +          const struct xt_match *match,
7264 +#endif
7265 +          void *matchinfo,
7266 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7267 +          unsigned int matchsize,
7268 +#endif
7269 +          unsigned int hook_mask)
7270 +{
7271 +       struct ipt_set_info_match *info =
7272 +               (struct ipt_set_info_match *) matchinfo;
7273 +       ip_set_id_t index;
7274 +
7275 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7276 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7277 +               ip_set_printk("invalid matchsize %d", matchsize);
7278 +               return 0;
7279 +       }
7280 +#endif
7281 +
7282 +       index = ip_set_get_byindex(info->match_set.index);
7283 +
7284 +       if (index == IP_SET_INVALID_ID) {
7285 +               ip_set_printk("Cannot find set indentified by id %u to match",
7286 +                             info->match_set.index);
7287 +               return 0;       /* error */
7288 +       }
7289 +       if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7290 +               ip_set_printk("That's nasty!");
7291 +               return 0;       /* error */
7292 +       }
7293 +
7294 +       return 1;
7295 +}
7296 +
7297 +static void destroy(
7298 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7299 +                   const struct xt_match *match,
7300 +#endif
7301 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7302 +                   void *matchinfo, unsigned int matchsize)
7303 +#else
7304 +                   void *matchinfo)
7305 +#endif
7306 +{
7307 +       struct ipt_set_info_match *info = matchinfo;
7308 +
7309 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7310 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7311 +               ip_set_printk("invalid matchsize %d", matchsize);
7312 +               return;
7313 +       }
7314 +#endif
7315 +       ip_set_put(info->match_set.index);
7316 +}
7317 +
7318 +static struct ipt_match set_match = {
7319 +       .name           = "set",
7320 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7321 +       .family         = AF_INET,
7322 +#endif
7323 +       .match          = &match,
7324 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7325 +       .matchsize      = sizeof(struct ipt_set_info_match),
7326 +#endif
7327 +       .checkentry     = &checkentry,
7328 +       .destroy        = &destroy,
7329 +       .me             = THIS_MODULE
7330 +};
7331 +
7332 +MODULE_LICENSE("GPL");
7333 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7334 +MODULE_DESCRIPTION("iptables IP set match module");
7335 +
7336 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7337 +#define ipt_register_match     xt_register_match
7338 +#define ipt_unregister_match   xt_unregister_match
7339 +#endif
7340 +
7341 +static int __init ipt_ipset_init(void)
7342 +{
7343 +       return ipt_register_match(&set_match);
7344 +}
7345 +
7346 +static void __exit ipt_ipset_fini(void)
7347 +{
7348 +       ipt_unregister_match(&set_match);
7349 +}
7350 +
7351 +module_init(ipt_ipset_init);
7352 +module_exit(ipt_ipset_fini);
7353 --- /dev/null
7354 +++ b/net/ipv4/netfilter/ipt_SET.c
7355 @@ -0,0 +1,179 @@
7356 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7357 + *                         Patrick Schaaf <bof@bof.de>
7358 + *                         Martin Josefsson <gandalf@wlug.westbo.se>
7359 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7360 + *
7361 + * This program is free software; you can redistribute it and/or modify
7362 + * it under the terms of the GNU General Public License version 2 as
7363 + * published by the Free Software Foundation.
7364 + */
7365 +
7366 +/* ipt_SET.c - netfilter target to manipulate IP sets */
7367 +
7368 +#include <linux/types.h>
7369 +#include <linux/ip.h>
7370 +#include <linux/timer.h>
7371 +#include <linux/module.h>
7372 +#include <linux/netfilter.h>
7373 +#include <linux/netdevice.h>
7374 +#include <linux/if.h>
7375 +#include <linux/inetdevice.h>
7376 +#include <linux/version.h>
7377 +#include <net/protocol.h>
7378 +#include <net/checksum.h>
7379 +#include <linux/netfilter_ipv4.h>
7380 +#include <linux/netfilter_ipv4/ip_tables.h>
7381 +#include <linux/netfilter_ipv4/ipt_set.h>
7382 +
7383 +static unsigned int
7384 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
7385 +target(struct sk_buff *skb,
7386 +#else
7387 +target(struct sk_buff **pskb,
7388 +#endif
7389 +       const struct net_device *in,
7390 +       const struct net_device *out,
7391 +       unsigned int hooknum,
7392 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7393 +       const struct xt_target *target,
7394 +#endif
7395 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7396 +       const void *targinfo,
7397 +       void *userinfo)
7398 +#else
7399 +       const void *targinfo)
7400 +#endif
7401 +{
7402 +       const struct ipt_set_info_target *info = targinfo;
7403 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
7404 +       struct sk_buff *skb = *pskb;
7405 +#endif
7406 +
7407 +       if (info->add_set.index != IP_SET_INVALID_ID)
7408 +               ip_set_addip_kernel(info->add_set.index,
7409 +                                   skb,
7410 +                                   info->add_set.flags);
7411 +       if (info->del_set.index != IP_SET_INVALID_ID)
7412 +               ip_set_delip_kernel(info->del_set.index,
7413 +                                   skb,
7414 +                                   info->del_set.flags);
7415 +
7416 +       return IPT_CONTINUE;
7417 +}
7418 +
7419 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7420 +static bool
7421 +#else
7422 +static int
7423 +#endif
7424 +checkentry(const char *tablename,
7425 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7426 +          const void *e,
7427 +#else
7428 +          const struct ipt_entry *e,
7429 +#endif
7430 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7431 +          const struct xt_target *target,
7432 +#endif
7433 +          void *targinfo,
7434 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7435 +          unsigned int targinfosize,
7436 +#endif
7437 +          unsigned int hook_mask)
7438 +{
7439 +       struct ipt_set_info_target *info =
7440 +               (struct ipt_set_info_target *) targinfo;
7441 +       ip_set_id_t index;
7442 +
7443 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7444 +       if (targinfosize != IPT_ALIGN(sizeof(*info))) {
7445 +               DP("bad target info size %u", targinfosize);
7446 +               return 0;
7447 +       }
7448 +#endif
7449 +
7450 +       if (info->add_set.index != IP_SET_INVALID_ID) {
7451 +               index = ip_set_get_byindex(info->add_set.index);
7452 +               if (index == IP_SET_INVALID_ID) {
7453 +                       ip_set_printk("cannot find add_set index %u as target",
7454 +                                     info->add_set.index);
7455 +                       return 0;       /* error */
7456 +               }
7457 +       }
7458 +
7459 +       if (info->del_set.index != IP_SET_INVALID_ID) {
7460 +               index = ip_set_get_byindex(info->del_set.index);
7461 +               if (index == IP_SET_INVALID_ID) {
7462 +                       ip_set_printk("cannot find del_set index %u as target",
7463 +                                     info->del_set.index);
7464 +                       return 0;       /* error */
7465 +               }
7466 +       }
7467 +       if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
7468 +           || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7469 +               ip_set_printk("That's nasty!");
7470 +               return 0;       /* error */
7471 +       }
7472 +
7473 +       return 1;
7474 +}
7475 +
7476 +static void destroy(
7477 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7478 +                   const struct xt_target *target,
7479 +#endif
7480 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7481 +                   void *targetinfo, unsigned int targetsize)
7482 +#else
7483 +                   void *targetinfo)
7484 +#endif
7485 +{
7486 +       struct ipt_set_info_target *info = targetinfo;
7487 +
7488 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7489 +       if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
7490 +               ip_set_printk("invalid targetsize %d", targetsize);
7491 +               return;
7492 +       }
7493 +#endif
7494 +       if (info->add_set.index != IP_SET_INVALID_ID)
7495 +               ip_set_put(info->add_set.index);
7496 +       if (info->del_set.index != IP_SET_INVALID_ID)
7497 +               ip_set_put(info->del_set.index);
7498 +}
7499 +
7500 +static struct ipt_target SET_target = {
7501 +       .name           = "SET",
7502 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7503 +       .family         = AF_INET,
7504 +#endif
7505 +       .target         = target,
7506 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7507 +       .targetsize     = sizeof(struct ipt_set_info_target),
7508 +#endif
7509 +       .checkentry     = checkentry,
7510 +       .destroy        = destroy,
7511 +       .me             = THIS_MODULE
7512 +};
7513 +
7514 +MODULE_LICENSE("GPL");
7515 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7516 +MODULE_DESCRIPTION("iptables IP set target module");
7517 +
7518 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7519 +#define ipt_register_target      xt_register_target
7520 +#define ipt_unregister_target    xt_unregister_target
7521 +#endif
7522 +
7523 +static int __init ipt_SET_init(void)
7524 +{
7525 +       return ipt_register_target(&SET_target);
7526 +}
7527 +
7528 +static void __exit ipt_SET_fini(void)
7529 +{
7530 +       ipt_unregister_target(&SET_target);
7531 +}
7532 +
7533 +module_init(ipt_SET_init);
7534 +module_exit(ipt_SET_fini);
7535 --- a/net/ipv4/netfilter/Kconfig
7536 +++ b/net/ipv4/netfilter/Kconfig
7537 @@ -394,5 +394,122 @@ config IP_NF_ARP_MANGLE
7538  
7539  endif # IP_NF_ARPTABLES
7540  
7541 +config IP_NF_SET
7542 +       tristate "IP set support"
7543 +       depends on INET && NETFILTER
7544 +       help
7545 +         This option adds IP set support to the kernel.
7546 +         In order to define and use sets, you need the userspace utility
7547 +         ipset(8).
7548 +
7549 +         To compile it as a module, choose M here.  If unsure, say N.
7550 +
7551 +config IP_NF_SET_MAX
7552 +       int "Maximum number of IP sets"
7553 +       default 256
7554 +       range 2 65534
7555 +       depends on IP_NF_SET
7556 +       help
7557 +         You can define here default value of the maximum number 
7558 +         of IP sets for the kernel.
7559 +
7560 +         The value can be overriden by the 'max_sets' module
7561 +         parameter of the 'ip_set' module.
7562 +
7563 +config IP_NF_SET_HASHSIZE
7564 +       int "Hash size for bindings of IP sets"
7565 +       default 1024
7566 +       depends on IP_NF_SET
7567 +       help
7568 +         You can define here default value of the hash size for
7569 +         bindings of IP sets.
7570 +
7571 +         The value can be overriden by the 'hash_size' module
7572 +         parameter of the 'ip_set' module.
7573 +
7574 +config IP_NF_SET_IPMAP
7575 +       tristate "ipmap set support"
7576 +       depends on IP_NF_SET
7577 +       help
7578 +         This option adds the ipmap set type support.
7579 +
7580 +         To compile it as a module, choose M here.  If unsure, say N.
7581 +
7582 +config IP_NF_SET_MACIPMAP
7583 +       tristate "macipmap set support"
7584 +       depends on IP_NF_SET
7585 +       help
7586 +         This option adds the macipmap set type support.
7587 +
7588 +         To compile it as a module, choose M here.  If unsure, say N.
7589 +
7590 +config IP_NF_SET_PORTMAP
7591 +       tristate "portmap set support"
7592 +       depends on IP_NF_SET
7593 +       help
7594 +         This option adds the portmap set type support.
7595 +
7596 +         To compile it as a module, choose M here.  If unsure, say N.
7597 +
7598 +config IP_NF_SET_IPHASH
7599 +       tristate "iphash set support"
7600 +       depends on IP_NF_SET
7601 +       help
7602 +         This option adds the iphash set type support.
7603 +
7604 +         To compile it as a module, choose M here.  If unsure, say N.
7605 +
7606 +config IP_NF_SET_NETHASH
7607 +       tristate "nethash set support"
7608 +       depends on IP_NF_SET
7609 +       help
7610 +         This option adds the nethash set type support.
7611 +
7612 +         To compile it as a module, choose M here.  If unsure, say N.
7613 +
7614 +config IP_NF_SET_IPPORTHASH
7615 +       tristate "ipporthash set support"
7616 +       depends on IP_NF_SET
7617 +       help
7618 +         This option adds the ipporthash set type support.
7619 +
7620 +         To compile it as a module, choose M here.  If unsure, say N.
7621 +
7622 +config IP_NF_SET_IPTREE
7623 +       tristate "iptree set support"
7624 +       depends on IP_NF_SET
7625 +       help
7626 +         This option adds the iptree set type support.
7627 +
7628 +         To compile it as a module, choose M here.  If unsure, say N.
7629 +
7630 +config IP_NF_SET_IPTREEMAP
7631 +       tristate "iptreemap set support"
7632 +       depends on IP_NF_SET
7633 +       help
7634 +         This option adds the iptreemap set type support.
7635 +
7636 +         To compile it as a module, choose M here.  If unsure, say N.
7637 +
7638 +config IP_NF_MATCH_SET
7639 +       tristate "set match support"
7640 +       depends on IP_NF_SET
7641 +       help
7642 +         Set matching matches against given IP sets.
7643 +         You need the ipset utility to create and set up the sets.
7644 +
7645 +         To compile it as a module, choose M here.  If unsure, say N.
7646 +
7647 +config IP_NF_TARGET_SET
7648 +       tristate "SET target support"
7649 +       depends on IP_NF_SET
7650 +       help
7651 +         The SET target makes possible to add/delete entries
7652 +         in IP sets.
7653 +         You need the ipset utility to create and set up the sets.
7654 +
7655 +         To compile it as a module, choose M here.  If unsure, say N.
7656 +
7657 +
7658  endmenu
7659  
7660 --- a/net/ipv4/netfilter/Makefile
7661 +++ b/net/ipv4/netfilter/Makefile
7662 @@ -52,6 +52,7 @@ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ip
7663  obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
7664  obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
7665  obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
7666 +obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
7667  
7668  # targets
7669  obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
7670 @@ -63,6 +64,18 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
7671  obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
7672  obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
7673  obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
7674 +obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
7675 +
7676 +# sets
7677 +obj-$(CONFIG_IP_NF_SET) += ip_set.o
7678 +obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
7679 +obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
7680 +obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
7681 +obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
7682 +obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
7683 +obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
7684 +obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
7685 +obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
7686  
7687  # generic ARP tables
7688  obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o