[ipset] Update ipset to version 3.2
[openwrt.git] / target / linux / generic-2.6 / patches-2.6.30 / 130-netfilter_ipset.patch
1 --- a/include/linux/netfilter_ipv4/Kbuild
2 +++ b/include/linux/netfilter_ipv4/Kbuild
3 @@ -45,3 +45,20 @@ header-y += ipt_ttl.h
4  
5  unifdef-y += ip_queue.h
6  unifdef-y += ip_tables.h
7 +
8 +unifdef-y += ip_set.h
9 +header-y  += ip_set_iphash.h
10 +unifdef-y += ip_set_bitmaps.h
11 +unifdef-y += ip_set_getport.h
12 +unifdef-y += ip_set_hashes.h
13 +header-y  += ip_set_ipmap.h
14 +header-y  += ip_set_ipporthash.h
15 +header-y  += ip_set_ipportiphash.h
16 +header-y  += ip_set_ipportnethash.h
17 +unifdef-y += ip_set_iptree.h
18 +unifdef-y += ip_set_iptreemap.h
19 +header-y  += ip_set_jhash.h
20 +header-y  += ip_set_macipmap.h
21 +header-y  += ip_set_nethash.h
22 +header-y  += ip_set_portmap.h
23 +header-y  += ip_set_setlist.h
24 --- /dev/null
25 +++ b/include/linux/netfilter_ipv4/ip_set.h
26 @@ -0,0 +1,574 @@
27 +#ifndef _IP_SET_H
28 +#define _IP_SET_H
29 +
30 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
31 + *                         Patrick Schaaf <bof@bof.de>
32 + *                         Martin Josefsson <gandalf@wlug.westbo.se>
33 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
34 + *
35 + * This program is free software; you can redistribute it and/or modify
36 + * it under the terms of the GNU General Public License version 2 as
37 + * published by the Free Software Foundation.  
38 + */
39 +
40 +#if 0
41 +#define IP_SET_DEBUG
42 +#endif
43 +
44 +/*
45 + * A sockopt of such quality has hardly ever been seen before on the open
46 + * market!  This little beauty, hardly ever used: above 64, so it's
47 + * traditionally used for firewalling, not touched (even once!) by the
48 + * 2.0, 2.2 and 2.4 kernels!
49 + *
50 + * Comes with its own certificate of authenticity, valid anywhere in the
51 + * Free world!
52 + *
53 + * Rusty, 19.4.2000
54 + */
55 +#define SO_IP_SET              83
56 +
57 +/*
58 + * Heavily modify by Joakim Axelsson 08.03.2002
59 + * - Made it more modulebased
60 + *
61 + * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
62 + * - bindings added
63 + * - in order to "deal with" backward compatibility, renamed to ipset
64 + */
65 +
66 +/* 
67 + * Used so that the kernel module and ipset-binary can match their versions 
68 + */
69 +#define IP_SET_PROTOCOL_VERSION 3
70 +
71 +#define IP_SET_MAXNAMELEN 32   /* set names and set typenames */
72 +
73 +/* Lets work with our own typedef for representing an IP address.
74 + * We hope to make the code more portable, possibly to IPv6...
75 + *
76 + * The representation works in HOST byte order, because most set types
77 + * will perform arithmetic operations and compare operations.
78 + * 
79 + * For now the type is an uint32_t.
80 + *
81 + * Make sure to ONLY use the functions when translating and parsing
82 + * in order to keep the host byte order and make it more portable:
83 + *  parse_ip()
84 + *  parse_mask()
85 + *  parse_ipandmask()
86 + *  ip_tostring()
87 + * (Joakim: where are they???)
88 + */
89 +
90 +typedef uint32_t ip_set_ip_t;
91 +
92 +/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
93 + * and IP_SET_INVALID_ID if you want to increase the max number of sets.
94 + */
95 +typedef uint16_t ip_set_id_t;
96 +
97 +#define IP_SET_INVALID_ID      65535
98 +
99 +/* How deep we follow bindings */
100 +#define IP_SET_MAX_BINDINGS    6
101 +
102 +/*
103 + * Option flags for kernel operations (ipt_set_info)
104 + */
105 +#define IPSET_SRC              0x01    /* Source match/add */
106 +#define IPSET_DST              0x02    /* Destination match/add */
107 +#define IPSET_MATCH_INV                0x04    /* Inverse matching */
108 +
109 +/*
110 + * Set features
111 + */
112 +#define IPSET_TYPE_IP          0x01    /* IP address type of set */
113 +#define IPSET_TYPE_PORT                0x02    /* Port type of set */
114 +#define IPSET_DATA_SINGLE      0x04    /* Single data storage */
115 +#define IPSET_DATA_DOUBLE      0x08    /* Double data storage */
116 +#define IPSET_DATA_TRIPLE      0x10    /* Triple data storage */
117 +#define IPSET_TYPE_IP1         0x20    /* IP address type of set */
118 +#define IPSET_TYPE_SETNAME     0x40    /* setname type of set */
119 +
120 +/* Reserved keywords */
121 +#define IPSET_TOKEN_DEFAULT    ":default:"
122 +#define IPSET_TOKEN_ALL                ":all:"
123 +
124 +/* SO_IP_SET operation constants, and their request struct types.
125 + *
126 + * Operation ids:
127 + *       0-99:  commands with version checking
128 + *     100-199: add/del/test/bind/unbind
129 + *     200-299: list, save, restore
130 + */
131 +
132 +/* Single shot operations: 
133 + * version, create, destroy, flush, rename and swap 
134 + *
135 + * Sets are identified by name.
136 + */
137 +
138 +#define IP_SET_REQ_STD         \
139 +       unsigned op;            \
140 +       unsigned version;       \
141 +       char name[IP_SET_MAXNAMELEN]
142 +
143 +#define IP_SET_OP_CREATE       0x00000001      /* Create a new (empty) set */
144 +struct ip_set_req_create {
145 +       IP_SET_REQ_STD;
146 +       char typename[IP_SET_MAXNAMELEN];
147 +};
148 +
149 +#define IP_SET_OP_DESTROY      0x00000002      /* Remove a (empty) set */
150 +struct ip_set_req_std {
151 +       IP_SET_REQ_STD;
152 +};
153 +
154 +#define IP_SET_OP_FLUSH                0x00000003      /* Remove all IPs in a set */
155 +/* Uses ip_set_req_std */
156 +
157 +#define IP_SET_OP_RENAME       0x00000004      /* Rename a set */
158 +/* Uses ip_set_req_create */
159 +
160 +#define IP_SET_OP_SWAP         0x00000005      /* Swap two sets */
161 +/* Uses ip_set_req_create */
162 +
163 +union ip_set_name_index {
164 +       char name[IP_SET_MAXNAMELEN];
165 +       ip_set_id_t index;
166 +};
167 +
168 +#define IP_SET_OP_GET_BYNAME   0x00000006      /* Get set index by name */
169 +struct ip_set_req_get_set {
170 +       unsigned op;
171 +       unsigned version;
172 +       union ip_set_name_index set;
173 +};
174 +
175 +#define IP_SET_OP_GET_BYINDEX  0x00000007      /* Get set name by index */
176 +/* Uses ip_set_req_get_set */
177 +
178 +#define IP_SET_OP_VERSION      0x00000100      /* Ask kernel version */
179 +struct ip_set_req_version {
180 +       unsigned op;
181 +       unsigned version;
182 +};
183 +
184 +/* Double shots operations: 
185 + * add, del, test, bind and unbind.
186 + *
187 + * First we query the kernel to get the index and type of the target set,
188 + * then issue the command. Validity of IP is checked in kernel in order
189 + * to minimalize sockopt operations.
190 + */
191 +
192 +/* Get minimal set data for add/del/test/bind/unbind IP */
193 +#define IP_SET_OP_ADT_GET      0x00000010      /* Get set and type */
194 +struct ip_set_req_adt_get {
195 +       unsigned op;
196 +       unsigned version;
197 +       union ip_set_name_index set;
198 +       char typename[IP_SET_MAXNAMELEN];
199 +};
200 +
201 +#define IP_SET_REQ_BYINDEX     \
202 +       unsigned op;            \
203 +       ip_set_id_t index;
204 +
205 +struct ip_set_req_adt {
206 +       IP_SET_REQ_BYINDEX;
207 +};
208 +
209 +#define IP_SET_OP_ADD_IP       0x00000101      /* Add an IP to a set */
210 +/* Uses ip_set_req_adt, with type specific addage */
211 +
212 +#define IP_SET_OP_DEL_IP       0x00000102      /* Remove an IP from a set */
213 +/* Uses ip_set_req_adt, with type specific addage */
214 +
215 +#define IP_SET_OP_TEST_IP      0x00000103      /* Test an IP in a set */
216 +/* Uses ip_set_req_adt, with type specific addage */
217 +
218 +#define IP_SET_OP_BIND_SET     0x00000104      /* Bind an IP to a set */
219 +/* Uses ip_set_req_bind, with type specific addage */
220 +struct ip_set_req_bind {
221 +       IP_SET_REQ_BYINDEX;
222 +       char binding[IP_SET_MAXNAMELEN];
223 +};
224 +
225 +#define IP_SET_OP_UNBIND_SET   0x00000105      /* Unbind an IP from a set */
226 +/* Uses ip_set_req_bind, with type speficic addage 
227 + * index = 0 means unbinding for all sets */
228 +
229 +#define IP_SET_OP_TEST_BIND_SET        0x00000106      /* Test binding an IP to a set */
230 +/* Uses ip_set_req_bind, with type specific addage */
231 +
232 +/* Multiple shots operations: list, save, restore.
233 + *
234 + * - check kernel version and query the max number of sets
235 + * - get the basic information on all sets
236 + *   and size required for the next step
237 + * - get actual set data: header, data, bindings
238 + */
239 +
240 +/* Get max_sets and the index of a queried set
241 + */
242 +#define IP_SET_OP_MAX_SETS     0x00000020
243 +struct ip_set_req_max_sets {
244 +       unsigned op;
245 +       unsigned version;
246 +       ip_set_id_t max_sets;           /* max_sets */
247 +       ip_set_id_t sets;               /* real number of sets */
248 +       union ip_set_name_index set;    /* index of set if name used */
249 +};
250 +
251 +/* Get the id and name of the sets plus size for next step */
252 +#define IP_SET_OP_LIST_SIZE    0x00000201
253 +#define IP_SET_OP_SAVE_SIZE    0x00000202
254 +struct ip_set_req_setnames {
255 +       unsigned op;
256 +       ip_set_id_t index;              /* set to list/save */
257 +       u_int32_t size;                 /* size to get setdata/bindings */
258 +       /* followed by sets number of struct ip_set_name_list */
259 +};
260 +
261 +struct ip_set_name_list {
262 +       char name[IP_SET_MAXNAMELEN];
263 +       char typename[IP_SET_MAXNAMELEN];
264 +       ip_set_id_t index;
265 +       ip_set_id_t id;
266 +};
267 +
268 +/* The actual list operation */
269 +#define IP_SET_OP_LIST         0x00000203
270 +struct ip_set_req_list {
271 +       IP_SET_REQ_BYINDEX;
272 +       /* sets number of struct ip_set_list in reply */ 
273 +};
274 +
275 +struct ip_set_list {
276 +       ip_set_id_t index;
277 +       ip_set_id_t binding;
278 +       u_int32_t ref;
279 +       u_int32_t header_size;  /* Set header data of header_size */
280 +       u_int32_t members_size; /* Set members data of members_size */
281 +       u_int32_t bindings_size;/* Set bindings data of bindings_size */
282 +};
283 +
284 +struct ip_set_hash_list {
285 +       ip_set_ip_t ip;
286 +       ip_set_id_t binding;
287 +};
288 +
289 +/* The save operation */
290 +#define IP_SET_OP_SAVE         0x00000204
291 +/* Uses ip_set_req_list, in the reply replaced by
292 + * sets number of struct ip_set_save plus a marker
293 + * ip_set_save followed by ip_set_hash_save structures.
294 + */
295 +struct ip_set_save {
296 +       ip_set_id_t index;
297 +       ip_set_id_t binding;
298 +       u_int32_t header_size;  /* Set header data of header_size */
299 +       u_int32_t members_size; /* Set members data of members_size */
300 +};
301 +
302 +/* At restoring, ip == 0 means default binding for the given set: */
303 +struct ip_set_hash_save {
304 +       ip_set_ip_t ip;
305 +       ip_set_id_t id;
306 +       ip_set_id_t binding;
307 +};
308 +
309 +/* The restore operation */
310 +#define IP_SET_OP_RESTORE      0x00000205
311 +/* Uses ip_set_req_setnames followed by ip_set_restore structures
312 + * plus a marker ip_set_restore, followed by ip_set_hash_save 
313 + * structures.
314 + */
315 +struct ip_set_restore {
316 +       char name[IP_SET_MAXNAMELEN];
317 +       char typename[IP_SET_MAXNAMELEN];
318 +       ip_set_id_t index;
319 +       u_int32_t header_size;  /* Create data of header_size */
320 +       u_int32_t members_size; /* Set members data of members_size */
321 +};
322 +
323 +static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
324 +{
325 +       return 4 * ((((b - a + 8) / 8) + 3) / 4);
326 +}
327 +
328 +/* General limit for the elements in a set */
329 +#define MAX_RANGE 0x0000FFFF
330 +
331 +#ifdef __KERNEL__
332 +#include <linux/netfilter_ipv4/ip_set_compat.h>
333 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
334 +
335 +#define ip_set_printk(format, args...)                         \
336 +       do {                                                    \
337 +               printk("%s: %s: ", __FILE__, __FUNCTION__);     \
338 +               printk(format "\n" , ## args);                  \
339 +       } while (0)
340 +
341 +#if defined(IP_SET_DEBUG)
342 +#define DP(format, args...)                                    \
343 +       do {                                                    \
344 +               printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
345 +               printk(format "\n" , ## args);                  \
346 +       } while (0)
347 +#define IP_SET_ASSERT(x)                                       \
348 +       do {                                                    \
349 +               if (!(x))                                       \
350 +                       printk("IP_SET_ASSERT: %s:%i(%s)\n",    \
351 +                               __FILE__, __LINE__, __FUNCTION__); \
352 +       } while (0)
353 +#else
354 +#define DP(format, args...)
355 +#define IP_SET_ASSERT(x)
356 +#endif
357 +
358 +struct ip_set;
359 +
360 +/*
361 + * The ip_set_type definition - one per set type, e.g. "ipmap".
362 + *
363 + * Each individual set has a pointer, set->type, going to one
364 + * of these structures. Function pointers inside the structure implement
365 + * the real behaviour of the sets.
366 + *
367 + * If not mentioned differently, the implementation behind the function
368 + * pointers of a set_type, is expected to return 0 if ok, and a negative
369 + * errno (e.g. -EINVAL) on error.
370 + */
371 +struct ip_set_type {
372 +       struct list_head list;  /* next in list of set types */
373 +
374 +       /* test for IP in set (kernel: iptables -m set src|dst)
375 +        * return 0 if not in set, 1 if in set.
376 +        */
377 +       int (*testip_kernel) (struct ip_set *set,
378 +                             const struct sk_buff * skb, 
379 +                             ip_set_ip_t *ip,
380 +                             const u_int32_t *flags,
381 +                             unsigned char index);
382 +
383 +       /* test for IP in set (userspace: ipset -T set IP)
384 +        * return 0 if not in set, 1 if in set.
385 +        */
386 +       int (*testip) (struct ip_set *set,
387 +                      const void *data, u_int32_t size,
388 +                      ip_set_ip_t *ip);
389 +
390 +       /*
391 +        * Size of the data structure passed by when
392 +        * adding/deletin/testing an entry.
393 +        */
394 +       u_int32_t reqsize;
395 +
396 +       /* Add IP into set (userspace: ipset -A set IP)
397 +        * Return -EEXIST if the address is already in the set,
398 +        * and -ERANGE if the address lies outside the set bounds.
399 +        * If the address was not already in the set, 0 is returned.
400 +        */
401 +       int (*addip) (struct ip_set *set, 
402 +                     const void *data, u_int32_t size,
403 +                     ip_set_ip_t *ip);
404 +
405 +       /* Add IP into set (kernel: iptables ... -j SET set src|dst)
406 +        * Return -EEXIST if the address is already in the set,
407 +        * and -ERANGE if the address lies outside the set bounds.
408 +        * If the address was not already in the set, 0 is returned.
409 +        */
410 +       int (*addip_kernel) (struct ip_set *set,
411 +                            const struct sk_buff * skb, 
412 +                            ip_set_ip_t *ip,
413 +                            const u_int32_t *flags,
414 +                            unsigned char index);
415 +
416 +       /* remove IP from set (userspace: ipset -D set --entry x)
417 +        * Return -EEXIST if the address is NOT in the set,
418 +        * and -ERANGE if the address lies outside the set bounds.
419 +        * If the address really was in the set, 0 is returned.
420 +        */
421 +       int (*delip) (struct ip_set *set, 
422 +                     const void *data, u_int32_t size,
423 +                     ip_set_ip_t *ip);
424 +
425 +       /* remove IP from set (kernel: iptables ... -j SET --entry x)
426 +        * Return -EEXIST if the address is NOT in the set,
427 +        * and -ERANGE if the address lies outside the set bounds.
428 +        * If the address really was in the set, 0 is returned.
429 +        */
430 +       int (*delip_kernel) (struct ip_set *set,
431 +                            const struct sk_buff * skb, 
432 +                            ip_set_ip_t *ip,
433 +                            const u_int32_t *flags,
434 +                            unsigned char index);
435 +
436 +       /* new set creation - allocated type specific items
437 +        */
438 +       int (*create) (struct ip_set *set,
439 +                      const void *data, u_int32_t size);
440 +
441 +       /* retry the operation after successfully tweaking the set
442 +        */
443 +       int (*retry) (struct ip_set *set);
444 +
445 +       /* set destruction - free type specific items
446 +        * There is no return value.
447 +        * Can be called only when child sets are destroyed.
448 +        */
449 +       void (*destroy) (struct ip_set *set);
450 +
451 +       /* set flushing - reset all bits in the set, or something similar.
452 +        * There is no return value.
453 +        */
454 +       void (*flush) (struct ip_set *set);
455 +
456 +       /* Listing: size needed for header
457 +        */
458 +       u_int32_t header_size;
459 +
460 +       /* Listing: Get the header
461 +        *
462 +        * Fill in the information in "data".
463 +        * This function is always run after list_header_size() under a 
464 +        * writelock on the set. Therefor is the length of "data" always 
465 +        * correct. 
466 +        */
467 +       void (*list_header) (const struct ip_set *set, 
468 +                            void *data);
469 +
470 +       /* Listing: Get the size for the set members
471 +        */
472 +       int (*list_members_size) (const struct ip_set *set);
473 +
474 +       /* Listing: Get the set members
475 +        *
476 +        * Fill in the information in "data".
477 +        * This function is always run after list_member_size() under a 
478 +        * writelock on the set. Therefor is the length of "data" always 
479 +        * correct. 
480 +        */
481 +       void (*list_members) (const struct ip_set *set,
482 +                             void *data);
483 +
484 +       char typename[IP_SET_MAXNAMELEN];
485 +       unsigned char features;
486 +       int protocol_version;
487 +
488 +       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
489 +       struct module *me;
490 +};
491 +
492 +extern int ip_set_register_set_type(struct ip_set_type *set_type);
493 +extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
494 +
495 +/* A generic ipset */
496 +struct ip_set {
497 +       char name[IP_SET_MAXNAMELEN];   /* the name of the set */
498 +       rwlock_t lock;                  /* lock for concurrency control */
499 +       ip_set_id_t id;                 /* set id for swapping */
500 +       ip_set_id_t binding;            /* default binding for the set */
501 +       atomic_t ref;                   /* in kernel and in hash references */
502 +       struct ip_set_type *type;       /* the set types */
503 +       void *data;                     /* pooltype specific data */
504 +};
505 +
506 +/* Structure to bind set elements to sets */
507 +struct ip_set_hash {
508 +       struct list_head list;          /* list of clashing entries in hash */
509 +       ip_set_ip_t ip;                 /* ip from set */
510 +       ip_set_id_t id;                 /* set id */
511 +       ip_set_id_t binding;            /* set we bind the element to */
512 +};
513 +
514 +/* register and unregister set references */
515 +extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
516 +extern ip_set_id_t ip_set_get_byindex(ip_set_id_t index);
517 +extern void ip_set_put_byindex(ip_set_id_t index);
518 +extern ip_set_id_t ip_set_id(ip_set_id_t index);
519 +extern ip_set_id_t __ip_set_get_byname(const char name[IP_SET_MAXNAMELEN],
520 +                                      struct ip_set **set);
521 +extern void __ip_set_put_byindex(ip_set_id_t index);
522 +
523 +/* API for iptables set match, and SET target */
524 +extern int ip_set_addip_kernel(ip_set_id_t id,
525 +                              const struct sk_buff *skb,
526 +                              const u_int32_t *flags);
527 +extern int ip_set_delip_kernel(ip_set_id_t id,
528 +                              const struct sk_buff *skb,
529 +                              const u_int32_t *flags);
530 +extern int ip_set_testip_kernel(ip_set_id_t id,
531 +                               const struct sk_buff *skb,
532 +                               const u_int32_t *flags);
533 +
534 +/* Macros to generate functions */
535 +
536 +#define STRUCT(pre, type)      CONCAT2(pre, type)
537 +#define CONCAT2(pre, type)     struct pre##type
538 +
539 +#define FNAME(pre, mid, post)  CONCAT3(pre, mid, post)
540 +#define CONCAT3(pre, mid, post)        pre##mid##post
541 +
542 +#define UADT0(type, adt, args...)                                      \
543 +static int                                                             \
544 +FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
545 +            ip_set_ip_t *hash_ip)                                      \
546 +{                                                                      \
547 +       const STRUCT(ip_set_req_,type) *req = data;                     \
548 +                                                                       \
549 +       return FNAME(type,_,adt)(set, hash_ip , ## args);               \
550 +}
551 +
552 +#define UADT(type, adt, args...)                                       \
553 +       UADT0(type, adt, req->ip , ## args)
554 +
555 +#define KADT(type, adt, getfn, args...)                                        \
556 +static int                                                             \
557 +FNAME(type,_k,adt)(struct ip_set *set,                                 \
558 +            const struct sk_buff *skb,                                 \
559 +            ip_set_ip_t *hash_ip,                                      \
560 +            const u_int32_t *flags,                                    \
561 +            unsigned char index)                                       \
562 +{                                                                      \
563 +       ip_set_ip_t ip = getfn(skb, flags[index]);                      \
564 +                                                                       \
565 +       KADT_CONDITION                                                  \
566 +       return FNAME(type,_,adt)(set, hash_ip, ip , ##args);            \
567 +}
568 +
569 +#define REGISTER_MODULE(type)                                          \
570 +static int __init ip_set_##type##_init(void)                           \
571 +{                                                                      \
572 +       init_max_page_size();                                           \
573 +       return ip_set_register_set_type(&ip_set_##type);                \
574 +}                                                                      \
575 +                                                                       \
576 +static void __exit ip_set_##type##_fini(void)                          \
577 +{                                                                      \
578 +       /* FIXME: possible race with ip_set_create() */                 \
579 +       ip_set_unregister_set_type(&ip_set_##type);                     \
580 +}                                                                      \
581 +                                                                       \
582 +module_init(ip_set_##type##_init);                                     \
583 +module_exit(ip_set_##type##_fini);
584 +
585 +/* Common functions */
586 +
587 +static inline ip_set_ip_t
588 +ipaddr(const struct sk_buff *skb, u_int32_t flag)
589 +{
590 +       return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
591 +}
592 +
593 +#define jhash_ip(map, i, ip)   jhash_1word(ip, *(map->initval + i))
594 +
595 +#define pack_ip_port(map, ip, port) \
596 +       (port + ((ip - ((map)->first_ip)) << 16))
597 +
598 +#endif                         /* __KERNEL__ */
599 +
600 +#endif /*_IP_SET_H*/
601 --- /dev/null
602 +++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
603 @@ -0,0 +1,121 @@
604 +#ifndef __IP_SET_BITMAPS_H
605 +#define __IP_SET_BITMAPS_H
606 +
607 +/* Macros to generate functions */
608 +
609 +#ifdef __KERNEL__
610 +#define BITMAP_CREATE(type)                                            \
611 +static int                                                             \
612 +type##_create(struct ip_set *set, const void *data, u_int32_t size)    \
613 +{                                                                      \
614 +       int newbytes;                                                   \
615 +       const struct ip_set_req_##type##_create *req = data;            \
616 +       struct ip_set_##type *map;                                      \
617 +                                                                       \
618 +       if (req->from > req->to) {                                      \
619 +               DP("bad range");                                        \
620 +               return -ENOEXEC;                                        \
621 +       }                                                               \
622 +                                                                       \
623 +       map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL);        \
624 +       if (!map) {                                                     \
625 +               DP("out of memory for %zu bytes",                       \
626 +                  sizeof(struct ip_set_##type));                       \
627 +               return -ENOMEM;                                         \
628 +       }                                                               \
629 +       map->first_ip = req->from;                                      \
630 +       map->last_ip = req->to;                                         \
631 +                                                                       \
632 +       newbytes = __##type##_create(req, map);                         \
633 +       if (newbytes < 0) {                                             \
634 +               kfree(map);                                             \
635 +               return newbytes;                                        \
636 +       }                                                               \
637 +                                                                       \
638 +       map->size = newbytes;                                           \
639 +       map->members = ip_set_malloc(newbytes);                         \
640 +       if (!map->members) {                                            \
641 +               DP("out of memory for %i bytes", newbytes);             \
642 +               kfree(map);                                             \
643 +               return -ENOMEM;                                         \
644 +       }                                                               \
645 +       memset(map->members, 0, newbytes);                              \
646 +                                                                       \
647 +       set->data = map;                                                \
648 +       return 0;                                                       \
649 +}
650 +
651 +#define BITMAP_DESTROY(type)                                           \
652 +static void                                                            \
653 +type##_destroy(struct ip_set *set)                                     \
654 +{                                                                      \
655 +       struct ip_set_##type *map = set->data;                          \
656 +                                                                       \
657 +       ip_set_free(map->members, map->size);                           \
658 +       kfree(map);                                                     \
659 +                                                                       \
660 +       set->data = NULL;                                               \
661 +}
662 +
663 +#define BITMAP_FLUSH(type)                                             \
664 +static void                                                            \
665 +type##_flush(struct ip_set *set)                                       \
666 +{                                                                      \
667 +       struct ip_set_##type *map = set->data;                          \
668 +       memset(map->members, 0, map->size);                             \
669 +}
670 +
671 +#define BITMAP_LIST_HEADER(type)                                       \
672 +static void                                                            \
673 +type##_list_header(const struct ip_set *set, void *data)               \
674 +{                                                                      \
675 +       const struct ip_set_##type *map = set->data;                    \
676 +       struct ip_set_req_##type##_create *header = data;               \
677 +                                                                       \
678 +       header->from = map->first_ip;                                   \
679 +       header->to = map->last_ip;                                      \
680 +       __##type##_list_header(map, header);                            \
681 +}
682 +
683 +#define BITMAP_LIST_MEMBERS_SIZE(type)                                 \
684 +static int                                                             \
685 +type##_list_members_size(const struct ip_set *set)                     \
686 +{                                                                      \
687 +       const struct ip_set_##type *map = set->data;                    \
688 +                                                                       \
689 +       return map->size;                                               \
690 +}
691 +
692 +#define BITMAP_LIST_MEMBERS(type)                                      \
693 +static void                                                            \
694 +type##_list_members(const struct ip_set *set, void *data)              \
695 +{                                                                      \
696 +       const struct ip_set_##type *map = set->data;                    \
697 +                                                                       \
698 +       memcpy(data, map->members, map->size);                          \
699 +}
700 +
701 +#define IP_SET_TYPE(type, __features)                                  \
702 +struct ip_set_type ip_set_##type = {                                   \
703 +       .typename               = #type,                                \
704 +       .features               = __features,                           \
705 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,              \
706 +       .create                 = &type##_create,                       \
707 +       .destroy                = &type##_destroy,                      \
708 +       .flush                  = &type##_flush,                        \
709 +       .reqsize                = sizeof(struct ip_set_req_##type),     \
710 +       .addip                  = &type##_uadd,                         \
711 +       .addip_kernel           = &type##_kadd,                         \
712 +       .delip                  = &type##_udel,                         \
713 +       .delip_kernel           = &type##_kdel,                         \
714 +       .testip                 = &type##_utest,                        \
715 +       .testip_kernel          = &type##_ktest,                        \
716 +       .header_size            = sizeof(struct ip_set_req_##type##_create),\
717 +       .list_header            = &type##_list_header,                  \
718 +       .list_members_size      = &type##_list_members_size,            \
719 +       .list_members           = &type##_list_members,                 \
720 +       .me                     = THIS_MODULE,                          \
721 +};
722 +#endif /* __KERNEL */
723 +
724 +#endif /* __IP_SET_BITMAPS_H */
725 --- /dev/null
726 +++ b/include/linux/netfilter_ipv4/ip_set_compat.h
727 @@ -0,0 +1,71 @@
728 +#ifndef _IP_SET_COMPAT_H
729 +#define _IP_SET_COMPAT_H
730 +
731 +#ifdef __KERNEL__
732 +#include <linux/version.h>
733 +
734 +/* Arrgh */
735 +#ifdef MODULE
736 +#define __MOD_INC(foo)         __MOD_INC_USE_COUNT(foo)
737 +#define __MOD_DEC(foo)         __MOD_DEC_USE_COUNT(foo)
738 +#else
739 +#define __MOD_INC(foo)         1
740 +#define __MOD_DEC(foo)
741 +#endif
742 +
743 +/* Backward compatibility */
744 +#ifndef __nocast
745 +#define __nocast
746 +#endif
747 +#ifndef __bitwise__
748 +#define __bitwise__
749 +#endif
750 +
751 +/* Compatibility glue code */
752 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
753 +#include <linux/interrupt.h>
754 +#define DEFINE_RWLOCK(x)                rwlock_t x = RW_LOCK_UNLOCKED
755 +#define try_module_get(x)              __MOD_INC(x)
756 +#define module_put(x)                   __MOD_DEC(x)
757 +#define __clear_bit(nr, addr)          clear_bit(nr, addr)
758 +#define __set_bit(nr, addr)            set_bit(nr, addr)
759 +#define __test_and_set_bit(nr, addr)   test_and_set_bit(nr, addr)
760 +#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
761 +
762 +typedef unsigned __bitwise__ gfp_t;
763 +
764 +static inline void *kzalloc(size_t size, gfp_t flags)
765 +{
766 +       void *data = kmalloc(size, flags);
767 +       
768 +       if (data)
769 +               memset(data, 0, size);
770 +       
771 +       return data;
772 +}
773 +#endif
774 +
775 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
776 +#define __KMEM_CACHE_T__       kmem_cache_t
777 +#else
778 +#define __KMEM_CACHE_T__       struct kmem_cache
779 +#endif
780 +
781 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
782 +#define ip_hdr(skb)            ((skb)->nh.iph)
783 +#define skb_mac_header(skb)    ((skb)->mac.raw)
784 +#define eth_hdr(skb)           ((struct ethhdr *)skb_mac_header(skb))
785 +#endif
786 +
787 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
788 +#include <linux/netfilter.h>
789 +#define KMEM_CACHE_CREATE(name, size) \
790 +       kmem_cache_create(name, size, 0, 0, NULL, NULL)
791 +#else
792 +#define KMEM_CACHE_CREATE(name, size) \
793 +       kmem_cache_create(name, size, 0, 0, NULL)
794 +#endif
795 +  
796 +
797 +#endif /* __KERNEL__ */
798 +#endif /* _IP_SET_COMPAT_H */   
799 --- /dev/null
800 +++ b/include/linux/netfilter_ipv4/ip_set_getport.h
801 @@ -0,0 +1,48 @@
802 +#ifndef _IP_SET_GETPORT_H
803 +#define _IP_SET_GETPORT_H
804 +
805 +#ifdef __KERNEL__
806 +
807 +#define INVALID_PORT   (MAX_RANGE + 1)
808 +
809 +/* We must handle non-linear skbs */
810 +static inline ip_set_ip_t
811 +get_port(const struct sk_buff *skb, u_int32_t flags)
812 +{
813 +       struct iphdr *iph = ip_hdr(skb);
814 +       u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
815 +       switch (iph->protocol) {
816 +       case IPPROTO_TCP: {
817 +               struct tcphdr tcph;
818 +               
819 +               /* See comments at tcp_match in ip_tables.c */
820 +               if (offset)
821 +                       return INVALID_PORT;
822 +
823 +               if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
824 +                       /* No choice either */
825 +                       return INVALID_PORT;
826 +               
827 +               return ntohs(flags & IPSET_SRC ?
828 +                            tcph.source : tcph.dest);
829 +           }
830 +       case IPPROTO_UDP: {
831 +               struct udphdr udph;
832 +
833 +               if (offset)
834 +                       return INVALID_PORT;
835 +
836 +               if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
837 +                       /* No choice either */
838 +                       return INVALID_PORT;
839 +               
840 +               return ntohs(flags & IPSET_SRC ?
841 +                            udph.source : udph.dest);
842 +           }
843 +       default:
844 +               return INVALID_PORT;
845 +       }
846 +}
847 +#endif                         /* __KERNEL__ */
848 +
849 +#endif /*_IP_SET_GETPORT_H*/
850 --- /dev/null
851 +++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
852 @@ -0,0 +1,306 @@
853 +#ifndef __IP_SET_HASHES_H
854 +#define __IP_SET_HASHES_H
855 +
856 +#define initval_t uint32_t
857 +
858 +/* Macros to generate functions */
859 +
860 +#ifdef __KERNEL__
861 +#define HASH_RETRY0(type, dtype, cond)                                 \
862 +static int                                                             \
863 +type##_retry(struct ip_set *set)                                       \
864 +{                                                                      \
865 +       struct ip_set_##type *map = set->data, *tmp;                    \
866 +       dtype *elem;                                                    \
867 +       void *members;                                                  \
868 +       u_int32_t i, hashsize = map->hashsize;                          \
869 +       int res;                                                        \
870 +                                                                       \
871 +       if (map->resize == 0)                                           \
872 +               return -ERANGE;                                         \
873 +                                                                       \
874 +    again:                                                             \
875 +       res = 0;                                                        \
876 +                                                                       \
877 +       /* Calculate new hash size */                                   \
878 +       hashsize += (hashsize * map->resize)/100;                       \
879 +       if (hashsize == map->hashsize)                                  \
880 +               hashsize++;                                             \
881 +                                                                       \
882 +       ip_set_printk("rehashing of set %s triggered: "                 \
883 +                     "hashsize grows from %lu to %lu",                 \
884 +                     set->name,                                        \
885 +                     (long unsigned)map->hashsize,                     \
886 +                     (long unsigned)hashsize);                         \
887 +                                                                       \
888 +       tmp = kmalloc(sizeof(struct ip_set_##type)                      \
889 +                     + map->probes * sizeof(initval_t), GFP_ATOMIC);   \
890 +       if (!tmp) {                                                     \
891 +               DP("out of memory for %zu bytes",                       \
892 +                  sizeof(struct ip_set_##type)                         \
893 +                  + map->probes * sizeof(initval_t));                  \
894 +               return -ENOMEM;                                         \
895 +       }                                                               \
896 +       tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
897 +       if (!tmp->members) {                                            \
898 +               DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
899 +               kfree(tmp);                                             \
900 +               return -ENOMEM;                                         \
901 +       }                                                               \
902 +       tmp->hashsize = hashsize;                                       \
903 +       tmp->elements = 0;                                              \
904 +       tmp->probes = map->probes;                                      \
905 +       tmp->resize = map->resize;                                      \
906 +       memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
907 +       __##type##_retry(tmp, map);                                     \
908 +                                                                       \
909 +       write_lock_bh(&set->lock);                                      \
910 +       map = set->data; /* Play safe */                                \
911 +       for (i = 0; i < map->hashsize && res == 0; i++) {               \
912 +               elem = HARRAY_ELEM(map->members, dtype *, i);           \
913 +               if (cond)                                               \
914 +                       res = __##type##_add(tmp, elem);                \
915 +       }                                                               \
916 +       if (res) {                                                      \
917 +               /* Failure, try again */                                \
918 +               write_unlock_bh(&set->lock);                            \
919 +               harray_free(tmp->members);                              \
920 +               kfree(tmp);                                             \
921 +               goto again;                                             \
922 +       }                                                               \
923 +                                                                       \
924 +       /* Success at resizing! */                                      \
925 +       members = map->members;                                         \
926 +                                                                       \
927 +       map->hashsize = tmp->hashsize;                                  \
928 +       map->members = tmp->members;                                    \
929 +       write_unlock_bh(&set->lock);                                    \
930 +                                                                       \
931 +       harray_free(members);                                           \
932 +       kfree(tmp);                                                     \
933 +                                                                       \
934 +       return 0;                                                       \
935 +}
936 +
937 +#define HASH_RETRY(type, dtype)                                                \
938 +       HASH_RETRY0(type, dtype, *elem)
939 +
940 +#define HASH_RETRY2(type, dtype)                                               \
941 +       HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
942 +
943 +#define HASH_CREATE(type, dtype)                                       \
944 +static int                                                             \
945 +type##_create(struct ip_set *set, const void *data, u_int32_t size)    \
946 +{                                                                      \
947 +       const struct ip_set_req_##type##_create *req = data;            \
948 +       struct ip_set_##type *map;                                      \
949 +       uint16_t i;                                                     \
950 +                                                                       \
951 +       if (req->hashsize < 1) {                                        \
952 +               ip_set_printk("hashsize too small");                    \
953 +               return -ENOEXEC;                                        \
954 +       }                                                               \
955 +                                                                       \
956 +       if (req->probes < 1) {                                          \
957 +               ip_set_printk("probes too small");                      \
958 +               return -ENOEXEC;                                        \
959 +       }                                                               \
960 +                                                                       \
961 +       map = kmalloc(sizeof(struct ip_set_##type)                      \
962 +                     + req->probes * sizeof(initval_t), GFP_KERNEL);   \
963 +       if (!map) {                                                     \
964 +               DP("out of memory for %zu bytes",                       \
965 +                  sizeof(struct ip_set_##type)                         \
966 +                  + req->probes * sizeof(initval_t));                  \
967 +               return -ENOMEM;                                         \
968 +       }                                                               \
969 +       for (i = 0; i < req->probes; i++)                               \
970 +               get_random_bytes(((initval_t *) map->initval)+i, 4);    \
971 +       map->elements = 0;                                              \
972 +       map->hashsize = req->hashsize;                                  \
973 +       map->probes = req->probes;                                      \
974 +       map->resize = req->resize;                                      \
975 +       if (__##type##_create(req, map)) {                              \
976 +               kfree(map);                                             \
977 +               return -ENOEXEC;                                        \
978 +       }                                                               \
979 +       map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
980 +       if (!map->members) {                                            \
981 +               DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
982 +               kfree(map);                                             \
983 +               return -ENOMEM;                                         \
984 +       }                                                               \
985 +                                                                       \
986 +       set->data = map;                                                \
987 +       return 0;                                                       \
988 +}
989 +
990 +#define HASH_DESTROY(type)                                             \
991 +static void                                                            \
992 +type##_destroy(struct ip_set *set)                                     \
993 +{                                                                      \
994 +       struct ip_set_##type *map = set->data;                          \
995 +                                                                       \
996 +       harray_free(map->members);                                      \
997 +       kfree(map);                                                     \
998 +                                                                       \
999 +       set->data = NULL;                                               \
1000 +}
1001 +
1002 +#define HASH_FLUSH(type, dtype)                                                \
1003 +static void                                                            \
1004 +type##_flush(struct ip_set *set)                                       \
1005 +{                                                                      \
1006 +       struct ip_set_##type *map = set->data;                          \
1007 +       harray_flush(map->members, map->hashsize, sizeof(dtype));       \
1008 +       map->elements = 0;                                              \
1009 +}
1010 +
1011 +#define HASH_FLUSH_CIDR(type, dtype)                                   \
1012 +static void                                                            \
1013 +type##_flush(struct ip_set *set)                                       \
1014 +{                                                                      \
1015 +       struct ip_set_##type *map = set->data;                          \
1016 +       harray_flush(map->members, map->hashsize, sizeof(dtype));       \
1017 +       memset(map->cidr, 0, sizeof(map->cidr));                        \
1018 +       memset(map->nets, 0, sizeof(map->nets));                        \
1019 +       map->elements = 0;                                              \
1020 +}
1021 +
1022 +#define HASH_LIST_HEADER(type)                                         \
1023 +static void                                                            \
1024 +type##_list_header(const struct ip_set *set, void *data)               \
1025 +{                                                                      \
1026 +       const struct ip_set_##type *map = set->data;                    \
1027 +       struct ip_set_req_##type##_create *header = data;               \
1028 +                                                                       \
1029 +       header->hashsize = map->hashsize;                               \
1030 +       header->probes = map->probes;                                   \
1031 +       header->resize = map->resize;                                   \
1032 +       __##type##_list_header(map, header);                            \
1033 +}
1034 +
1035 +#define HASH_LIST_MEMBERS_SIZE(type, dtype)                            \
1036 +static int                                                             \
1037 +type##_list_members_size(const struct ip_set *set)                     \
1038 +{                                                                      \
1039 +       const struct ip_set_##type *map = set->data;                    \
1040 +                                                                       \
1041 +       return (map->hashsize * sizeof(dtype));                         \
1042 +}
1043 +
1044 +#define HASH_LIST_MEMBERS(type, dtype)                                 \
1045 +static void                                                            \
1046 +type##_list_members(const struct ip_set *set, void *data)              \
1047 +{                                                                      \
1048 +       const struct ip_set_##type *map = set->data;                    \
1049 +       dtype *elem;                                                    \
1050 +       uint32_t i;                                                     \
1051 +                                                                       \
1052 +       for (i = 0; i < map->hashsize; i++) {                           \
1053 +               elem = HARRAY_ELEM(map->members, dtype *, i);           \
1054 +               ((dtype *)data)[i] = *elem;                             \
1055 +       }                                                               \
1056 +}
1057 +
1058 +#define HASH_LIST_MEMBERS_MEMCPY(type, dtype)                          \
1059 +static void                                                            \
1060 +type##_list_members(const struct ip_set *set, void *data)              \
1061 +{                                                                      \
1062 +       const struct ip_set_##type *map = set->data;                    \
1063 +       dtype *elem;                                                    \
1064 +       uint32_t i;                                                     \
1065 +                                                                       \
1066 +       for (i = 0; i < map->hashsize; i++) {                           \
1067 +               elem = HARRAY_ELEM(map->members, dtype *, i);           \
1068 +               memcpy((((dtype *)data)+i), elem, sizeof(dtype));       \
1069 +       }                                                               \
1070 +}
1071 +
1072 +#define IP_SET_RTYPE(type, __features)                                 \
1073 +struct ip_set_type ip_set_##type = {                                   \
1074 +       .typename               = #type,                                \
1075 +       .features               = __features,                           \
1076 +       .protocol_version       = IP_SET_PROTOCOL_VERSION,              \
1077 +       .create                 = &type##_create,                       \
1078 +       .retry                  = &type##_retry,                        \
1079 +       .destroy                = &type##_destroy,                      \
1080 +       .flush                  = &type##_flush,                        \
1081 +       .reqsize                = sizeof(struct ip_set_req_##type),     \
1082 +       .addip                  = &type##_uadd,                         \
1083 +       .addip_kernel           = &type##_kadd,                         \
1084 +       .delip                  = &type##_udel,                         \
1085 +       .delip_kernel           = &type##_kdel,                         \
1086 +       .testip                 = &type##_utest,                        \
1087 +       .testip_kernel          = &type##_ktest,                        \
1088 +       .header_size            = sizeof(struct ip_set_req_##type##_create),\
1089 +       .list_header            = &type##_list_header,                  \
1090 +       .list_members_size      = &type##_list_members_size,            \
1091 +       .list_members           = &type##_list_members,                 \
1092 +       .me                     = THIS_MODULE,                          \
1093 +};
1094 +
1095 +/* Helper functions */
1096 +static inline void
1097 +add_cidr_size(uint8_t *cidr, uint8_t size)
1098 +{
1099 +       uint8_t next;
1100 +       int i;
1101 +       
1102 +       for (i = 0; i < 30 && cidr[i]; i++) {
1103 +               if (cidr[i] < size) {
1104 +                       next = cidr[i];
1105 +                       cidr[i] = size;
1106 +                       size = next;
1107 +               }
1108 +       }
1109 +       if (i < 30)
1110 +               cidr[i] = size;
1111 +}
1112 +
1113 +static inline void
1114 +del_cidr_size(uint8_t *cidr, uint8_t size)
1115 +{
1116 +       int i;
1117 +       
1118 +       for (i = 0; i < 29 && cidr[i]; i++) {
1119 +               if (cidr[i] == size)
1120 +                       cidr[i] = size = cidr[i+1];
1121 +       }
1122 +       cidr[29] = 0;
1123 +}
1124 +#else
1125 +#include <arpa/inet.h>
1126 +#endif /* __KERNEL */
1127 +
1128 +#ifndef UINT16_MAX
1129 +#define UINT16_MAX 65535
1130 +#endif
1131 +
1132 +static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
1133 +
1134 +static inline ip_set_ip_t 
1135 +pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
1136 +{
1137 +       ip_set_ip_t addr, *paddr = &addr;
1138 +       unsigned char n, t, *a;
1139 +
1140 +       addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
1141 +#ifdef __KERNEL__
1142 +       DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
1143 +#endif
1144 +       n = cidr / 8;
1145 +       t = cidr % 8;   
1146 +       a = &((unsigned char *)paddr)[n];
1147 +       *a = *a /(1 << (8 - t)) + shifts[t];
1148 +#ifdef __KERNEL__
1149 +       DP("n: %u, t: %u, a: %u", n, t, *a);
1150 +       DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
1151 +          HIPQUAD(ip), cidr, NIPQUAD(addr));
1152 +#endif
1153 +
1154 +       return ntohl(addr);
1155 +}
1156 +
1157 +
1158 +#endif /* __IP_SET_HASHES_H */
1159 --- /dev/null
1160 +++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
1161 @@ -0,0 +1,30 @@
1162 +#ifndef __IP_SET_IPHASH_H
1163 +#define __IP_SET_IPHASH_H
1164 +
1165 +#include <linux/netfilter_ipv4/ip_set.h>
1166 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1167 +
1168 +#define SETTYPE_NAME "iphash"
1169 +
1170 +struct ip_set_iphash {
1171 +       ip_set_ip_t *members;           /* the iphash proper */
1172 +       uint32_t elements;              /* number of elements */
1173 +       uint32_t hashsize;              /* hash size */
1174 +       uint16_t probes;                /* max number of probes  */
1175 +       uint16_t resize;                /* resize factor in percent */
1176 +       ip_set_ip_t netmask;            /* netmask */
1177 +       initval_t initval[0];           /* initvals for jhash_1word */
1178 +};
1179 +
1180 +struct ip_set_req_iphash_create {
1181 +       uint32_t hashsize;
1182 +       uint16_t probes;
1183 +       uint16_t resize;
1184 +       ip_set_ip_t netmask;
1185 +};
1186 +
1187 +struct ip_set_req_iphash {
1188 +       ip_set_ip_t ip;
1189 +};
1190 +
1191 +#endif /* __IP_SET_IPHASH_H */
1192 --- /dev/null
1193 +++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
1194 @@ -0,0 +1,57 @@
1195 +#ifndef __IP_SET_IPMAP_H
1196 +#define __IP_SET_IPMAP_H
1197 +
1198 +#include <linux/netfilter_ipv4/ip_set.h>
1199 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
1200 +
1201 +#define SETTYPE_NAME "ipmap"
1202 +
1203 +struct ip_set_ipmap {
1204 +       void *members;                  /* the ipmap proper */
1205 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
1206 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
1207 +       ip_set_ip_t netmask;            /* subnet netmask */
1208 +       ip_set_ip_t sizeid;             /* size of set in IPs */
1209 +       ip_set_ip_t hosts;              /* number of hosts in a subnet */
1210 +       u_int32_t size;                 /* size of the ipmap proper */
1211 +};
1212 +
1213 +struct ip_set_req_ipmap_create {
1214 +       ip_set_ip_t from;
1215 +       ip_set_ip_t to;
1216 +       ip_set_ip_t netmask;
1217 +};
1218 +
1219 +struct ip_set_req_ipmap {
1220 +       ip_set_ip_t ip;
1221 +};
1222 +
1223 +static inline unsigned int
1224 +mask_to_bits(ip_set_ip_t mask)
1225 +{
1226 +       unsigned int bits = 32;
1227 +       ip_set_ip_t maskaddr;
1228 +       
1229 +       if (mask == 0xFFFFFFFF)
1230 +               return bits;
1231 +       
1232 +       maskaddr = 0xFFFFFFFE;
1233 +       while (--bits > 0 && maskaddr != mask)
1234 +               maskaddr <<= 1;
1235 +       
1236 +       return bits;
1237 +}
1238 +
1239 +static inline ip_set_ip_t
1240 +range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
1241 +{
1242 +       ip_set_ip_t mask = 0xFFFFFFFE;
1243 +       
1244 +       *bits = 32;
1245 +       while (--(*bits) > 0 && mask && (to & mask) != from)
1246 +               mask <<= 1;
1247 +               
1248 +       return mask;
1249 +}
1250 +       
1251 +#endif /* __IP_SET_IPMAP_H */
1252 --- /dev/null
1253 +++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
1254 @@ -0,0 +1,33 @@
1255 +#ifndef __IP_SET_IPPORTHASH_H
1256 +#define __IP_SET_IPPORTHASH_H
1257 +
1258 +#include <linux/netfilter_ipv4/ip_set.h>
1259 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1260 +
1261 +#define SETTYPE_NAME "ipporthash"
1262 +
1263 +struct ip_set_ipporthash {
1264 +       ip_set_ip_t *members;           /* the ipporthash proper */
1265 +       uint32_t elements;              /* number of elements */
1266 +       uint32_t hashsize;              /* hash size */
1267 +       uint16_t probes;                /* max number of probes  */
1268 +       uint16_t resize;                /* resize factor in percent */
1269 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
1270 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
1271 +       initval_t initval[0];           /* initvals for jhash_1word */
1272 +};
1273 +
1274 +struct ip_set_req_ipporthash_create {
1275 +       uint32_t hashsize;
1276 +       uint16_t probes;
1277 +       uint16_t resize;
1278 +       ip_set_ip_t from;
1279 +       ip_set_ip_t to;
1280 +};
1281 +
1282 +struct ip_set_req_ipporthash {
1283 +       ip_set_ip_t ip;
1284 +       ip_set_ip_t port;
1285 +};
1286 +
1287 +#endif /* __IP_SET_IPPORTHASH_H */
1288 --- /dev/null
1289 +++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
1290 @@ -0,0 +1,39 @@
1291 +#ifndef __IP_SET_IPPORTIPHASH_H
1292 +#define __IP_SET_IPPORTIPHASH_H
1293 +
1294 +#include <linux/netfilter_ipv4/ip_set.h>
1295 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1296 +
1297 +#define SETTYPE_NAME "ipportiphash"
1298 +
1299 +struct ipportip {
1300 +       ip_set_ip_t ip;
1301 +       ip_set_ip_t ip1;
1302 +};
1303 +
1304 +struct ip_set_ipportiphash {
1305 +       struct ipportip *members;       /* the ipportip proper */
1306 +       uint32_t elements;              /* number of elements */
1307 +       uint32_t hashsize;              /* hash size */
1308 +       uint16_t probes;                /* max number of probes  */
1309 +       uint16_t resize;                /* resize factor in percent */
1310 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
1311 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
1312 +       initval_t initval[0];           /* initvals for jhash_1word */
1313 +};
1314 +
1315 +struct ip_set_req_ipportiphash_create {
1316 +       uint32_t hashsize;
1317 +       uint16_t probes;
1318 +       uint16_t resize;
1319 +       ip_set_ip_t from;
1320 +       ip_set_ip_t to;
1321 +};
1322 +
1323 +struct ip_set_req_ipportiphash {
1324 +       ip_set_ip_t ip;
1325 +       ip_set_ip_t port;
1326 +       ip_set_ip_t ip1;
1327 +};
1328 +
1329 +#endif /* __IP_SET_IPPORTIPHASH_H */
1330 --- /dev/null
1331 +++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
1332 @@ -0,0 +1,42 @@
1333 +#ifndef __IP_SET_IPPORTNETHASH_H
1334 +#define __IP_SET_IPPORTNETHASH_H
1335 +
1336 +#include <linux/netfilter_ipv4/ip_set.h>
1337 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1338 +
1339 +#define SETTYPE_NAME "ipportnethash"
1340 +
1341 +struct ipportip {
1342 +       ip_set_ip_t ip;
1343 +       ip_set_ip_t ip1;
1344 +};
1345 +
1346 +struct ip_set_ipportnethash {
1347 +       struct ipportip *members;       /* the ipportip proper */
1348 +       uint32_t elements;              /* number of elements */
1349 +       uint32_t hashsize;              /* hash size */
1350 +       uint16_t probes;                /* max number of probes  */
1351 +       uint16_t resize;                /* resize factor in percent */
1352 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
1353 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
1354 +       uint8_t cidr[30];               /* CIDR sizes */
1355 +       uint16_t nets[30];              /* nr of nets by CIDR sizes */
1356 +       initval_t initval[0];           /* initvals for jhash_1word */
1357 +};
1358 +
1359 +struct ip_set_req_ipportnethash_create {
1360 +       uint32_t hashsize;
1361 +       uint16_t probes;
1362 +       uint16_t resize;
1363 +       ip_set_ip_t from;
1364 +       ip_set_ip_t to;
1365 +};
1366 +
1367 +struct ip_set_req_ipportnethash {
1368 +       ip_set_ip_t ip;
1369 +       ip_set_ip_t port;
1370 +       ip_set_ip_t ip1;
1371 +       uint8_t cidr;
1372 +};
1373 +
1374 +#endif /* __IP_SET_IPPORTNETHASH_H */
1375 --- /dev/null
1376 +++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
1377 @@ -0,0 +1,39 @@
1378 +#ifndef __IP_SET_IPTREE_H
1379 +#define __IP_SET_IPTREE_H
1380 +
1381 +#include <linux/netfilter_ipv4/ip_set.h>
1382 +
1383 +#define SETTYPE_NAME "iptree"
1384 +
1385 +struct ip_set_iptreed {
1386 +       unsigned long expires[256];             /* x.x.x.ADDR */
1387 +};
1388 +
1389 +struct ip_set_iptreec {
1390 +       struct ip_set_iptreed *tree[256];       /* x.x.ADDR.* */
1391 +};
1392 +
1393 +struct ip_set_iptreeb {
1394 +       struct ip_set_iptreec *tree[256];       /* x.ADDR.*.* */
1395 +};
1396 +
1397 +struct ip_set_iptree {
1398 +       unsigned int timeout;
1399 +       unsigned int gc_interval;
1400 +#ifdef __KERNEL__
1401 +       uint32_t elements;              /* number of elements */
1402 +       struct timer_list gc;
1403 +       struct ip_set_iptreeb *tree[256];       /* ADDR.*.*.* */
1404 +#endif
1405 +};
1406 +
1407 +struct ip_set_req_iptree_create {
1408 +       unsigned int timeout;
1409 +};
1410 +
1411 +struct ip_set_req_iptree {
1412 +       ip_set_ip_t ip;
1413 +       unsigned int timeout;
1414 +};
1415 +
1416 +#endif /* __IP_SET_IPTREE_H */
1417 --- /dev/null
1418 +++ b/include/linux/netfilter_ipv4/ip_set_iptreemap.h
1419 @@ -0,0 +1,40 @@
1420 +#ifndef __IP_SET_IPTREEMAP_H
1421 +#define __IP_SET_IPTREEMAP_H
1422 +
1423 +#include <linux/netfilter_ipv4/ip_set.h>
1424 +
1425 +#define SETTYPE_NAME "iptreemap"
1426 +
1427 +#ifdef __KERNEL__
1428 +struct ip_set_iptreemap_d {
1429 +       unsigned char bitmap[32]; /* x.x.x.y */
1430 +};
1431 +
1432 +struct ip_set_iptreemap_c {
1433 +       struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
1434 +};
1435 +
1436 +struct ip_set_iptreemap_b {
1437 +       struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
1438 +       unsigned char dirty[32];
1439 +};
1440 +#endif
1441 +
1442 +struct ip_set_iptreemap {
1443 +       unsigned int gc_interval;
1444 +#ifdef __KERNEL__
1445 +       struct timer_list gc;
1446 +       struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
1447 +#endif
1448 +};
1449 +
1450 +struct ip_set_req_iptreemap_create {
1451 +       unsigned int gc_interval;
1452 +};
1453 +
1454 +struct ip_set_req_iptreemap {
1455 +       ip_set_ip_t ip;
1456 +       ip_set_ip_t end;
1457 +};
1458 +
1459 +#endif /* __IP_SET_IPTREEMAP_H */
1460 --- /dev/null
1461 +++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
1462 @@ -0,0 +1,157 @@
1463 +#ifndef _LINUX_JHASH_H
1464 +#define _LINUX_JHASH_H
1465 +
1466 +/* jhash.h: Jenkins hash support.
1467 + *
1468 + * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
1469 + *
1470 + * http://burtleburtle.net/bob/hash/
1471 + *
1472 + * These are the credits from Bob's sources:
1473 + *
1474 + * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
1475 + *
1476 + * These are functions for producing 32-bit hashes for hash table lookup.
1477 + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() 
1478 + * are externally useful functions.  Routines to test the hash are included 
1479 + * if SELF_TEST is defined.  You can use this free for any purpose.  It's in
1480 + * the public domain.  It has no warranty.
1481 + *
1482 + * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
1483 + *
1484 + * I've modified Bob's hash to be useful in the Linux kernel, and
1485 + * any bugs present are my fault.  Jozsef
1486 + */
1487 +
1488 +#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
1489 +
1490 +/* __jhash_mix - mix 3 32-bit values reversibly. */
1491 +#define __jhash_mix(a,b,c) \
1492 +{ \
1493 +  a -= c;  a ^= __rot(c, 4);  c += b; \
1494 +  b -= a;  b ^= __rot(a, 6);  a += c; \
1495 +  c -= b;  c ^= __rot(b, 8);  b += a; \
1496 +  a -= c;  a ^= __rot(c,16);  c += b; \
1497 +  b -= a;  b ^= __rot(a,19);  a += c; \
1498 +  c -= b;  c ^= __rot(b, 4);  b += a; \
1499 +}
1500 +
1501 +/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
1502 +#define __jhash_final(a,b,c) \
1503 +{ \
1504 +  c ^= b; c -= __rot(b,14); \
1505 +  a ^= c; a -= __rot(c,11); \
1506 +  b ^= a; b -= __rot(a,25); \
1507 +  c ^= b; c -= __rot(b,16); \
1508 +  a ^= c; a -= __rot(c,4);  \
1509 +  b ^= a; b -= __rot(a,14); \
1510 +  c ^= b; c -= __rot(b,24); \
1511 +}
1512 +
1513 +/* The golden ration: an arbitrary value */
1514 +#define JHASH_GOLDEN_RATIO     0xdeadbeef
1515 +
1516 +/* The most generic version, hashes an arbitrary sequence
1517 + * of bytes.  No alignment or length assumptions are made about
1518 + * the input key. The result depends on endianness.
1519 + */
1520 +static inline u32 jhash(const void *key, u32 length, u32 initval)
1521 +{
1522 +       u32 a,b,c;
1523 +       const u8 *k = key;
1524 +
1525 +       /* Set up the internal state */
1526 +       a = b = c = JHASH_GOLDEN_RATIO + length + initval;
1527 +
1528 +       /* all but the last block: affect some 32 bits of (a,b,c) */
1529 +       while (length > 12) {
1530 +               a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
1531 +               b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
1532 +               c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
1533 +               __jhash_mix(a, b, c);
1534 +               length -= 12;
1535 +               k += 12;
1536 +       }
1537 +
1538 +       /* last block: affect all 32 bits of (c) */
1539 +       /* all the case statements fall through */
1540 +       switch (length) {
1541 +       case 12: c += (u32)k[11]<<24;
1542 +       case 11: c += (u32)k[10]<<16;
1543 +       case 10: c += (u32)k[9]<<8;
1544 +       case 9 : c += k[8];
1545 +       case 8 : b += (u32)k[7]<<24;
1546 +       case 7 : b += (u32)k[6]<<16;
1547 +       case 6 : b += (u32)k[5]<<8;
1548 +       case 5 : b += k[4];
1549 +       case 4 : a += (u32)k[3]<<24;
1550 +       case 3 : a += (u32)k[2]<<16;
1551 +       case 2 : a += (u32)k[1]<<8;
1552 +       case 1 : a += k[0];
1553 +               __jhash_final(a, b, c);
1554 +       case 0 :
1555 +               break;
1556 +       }
1557 +
1558 +       return c;
1559 +}
1560 +
1561 +/* A special optimized version that handles 1 or more of u32s.
1562 + * The length parameter here is the number of u32s in the key.
1563 + */
1564 +static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
1565 +{
1566 +       u32 a, b, c;
1567 +
1568 +       /* Set up the internal state */
1569 +       a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
1570 +
1571 +       /* handle most of the key */
1572 +       while (length > 3) {
1573 +               a += k[0];
1574 +               b += k[1];
1575 +               c += k[2];
1576 +               __jhash_mix(a, b, c);
1577 +               length -= 3;
1578 +               k += 3;
1579 +       }
1580 +
1581 +       /* handle the last 3 u32's */
1582 +       /* all the case statements fall through */ 
1583 +       switch (length) {
1584 +       case 3: c += k[2];
1585 +       case 2: b += k[1];
1586 +       case 1: a += k[0];
1587 +               __jhash_final(a, b, c);
1588 +       case 0:     /* case 0: nothing left to add */
1589 +               break;
1590 +       }
1591 +
1592 +       return c;
1593 +}
1594 +
1595 +/* A special ultra-optimized versions that knows they are hashing exactly
1596 + * 3, 2 or 1 word(s).
1597 + */
1598 +static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
1599 +{
1600 +       a += JHASH_GOLDEN_RATIO + initval;
1601 +       b += JHASH_GOLDEN_RATIO + initval;
1602 +       c += JHASH_GOLDEN_RATIO + initval;
1603 +
1604 +       __jhash_final(a, b, c);
1605 +
1606 +       return c;
1607 +}
1608 +
1609 +static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
1610 +{
1611 +       return jhash_3words(0, a, b, initval);
1612 +}
1613 +
1614 +static inline u32 jhash_1word(u32 a, u32 initval)
1615 +{
1616 +       return jhash_3words(0, 0, a, initval);
1617 +}
1618 +
1619 +#endif /* _LINUX_JHASH_H */
1620 --- /dev/null
1621 +++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
1622 @@ -0,0 +1,39 @@
1623 +#ifndef __IP_SET_MACIPMAP_H
1624 +#define __IP_SET_MACIPMAP_H
1625 +
1626 +#include <linux/netfilter_ipv4/ip_set.h>
1627 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
1628 +
1629 +#define SETTYPE_NAME "macipmap"
1630 +
1631 +/* general flags */
1632 +#define IPSET_MACIP_MATCHUNSET 1
1633 +
1634 +/* per ip flags */
1635 +#define IPSET_MACIP_ISSET      1
1636 +
1637 +struct ip_set_macipmap {
1638 +       void *members;                  /* the macipmap proper */
1639 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
1640 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
1641 +       u_int32_t flags;
1642 +       u_int32_t size;                 /* size of the ipmap proper */
1643 +};
1644 +
1645 +struct ip_set_req_macipmap_create {
1646 +       ip_set_ip_t from;
1647 +       ip_set_ip_t to;
1648 +       u_int32_t flags;
1649 +};
1650 +
1651 +struct ip_set_req_macipmap {
1652 +       ip_set_ip_t ip;
1653 +       unsigned char ethernet[ETH_ALEN];
1654 +};
1655 +
1656 +struct ip_set_macip {
1657 +       unsigned short match;
1658 +       unsigned char ethernet[ETH_ALEN];
1659 +};
1660 +
1661 +#endif /* __IP_SET_MACIPMAP_H */
1662 --- /dev/null
1663 +++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
1664 @@ -0,0 +1,153 @@
1665 +#ifndef _IP_SET_MALLOC_H
1666 +#define _IP_SET_MALLOC_H
1667 +
1668 +#ifdef __KERNEL__
1669 +#include <linux/vmalloc.h> 
1670 +
1671 +static size_t max_malloc_size = 0, max_page_size = 0;
1672 +static size_t default_max_malloc_size = 131072;                        /* Guaranteed: slab.c */
1673 +
1674 +static inline int init_max_page_size(void)
1675 +{
1676 +/* Compatibility glues to support 2.4.36 */
1677 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1678 +#define __GFP_NOWARN           0
1679 +
1680 +       /* Guaranteed: slab.c */
1681 +       max_malloc_size = max_page_size = default_max_malloc_size;
1682 +#else
1683 +       size_t page_size = 0;
1684 +
1685 +#define CACHE(x) if (max_page_size == 0 || x < max_page_size)  \
1686 +                       page_size = x;
1687 +#include <linux/kmalloc_sizes.h>
1688 +#undef CACHE
1689 +       if (page_size) {
1690 +               if (max_malloc_size == 0)
1691 +                       max_malloc_size = page_size;
1692 +
1693 +               max_page_size = page_size;
1694 +
1695 +               return 1;
1696 +       }
1697 +#endif
1698 +       return 0;
1699 +}
1700 +
1701 +struct harray {
1702 +       size_t max_elements;
1703 +       void *arrays[0];
1704 +};
1705 +
1706 +static inline void * 
1707 +__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
1708 +{
1709 +       struct harray *harray;
1710 +       size_t max_elements, size, i, j;
1711 +
1712 +       BUG_ON(max_page_size == 0);
1713 +
1714 +       if (typesize > max_page_size)
1715 +               return NULL;
1716 +
1717 +       max_elements = max_page_size/typesize;
1718 +       size = hashsize/max_elements;
1719 +       if (hashsize % max_elements)
1720 +               size++;
1721 +       
1722 +       /* Last pointer signals end of arrays */
1723 +       harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
1724 +                        flags);
1725 +
1726 +       if (!harray)
1727 +               return NULL;
1728 +       
1729 +       for (i = 0; i < size - 1; i++) {
1730 +               harray->arrays[i] = kmalloc(max_elements * typesize, flags);
1731 +               if (!harray->arrays[i])
1732 +                       goto undo;
1733 +               memset(harray->arrays[i], 0, max_elements * typesize);
1734 +       }
1735 +       harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize, 
1736 +                                   flags);
1737 +       if (!harray->arrays[i])
1738 +               goto undo;
1739 +       memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
1740 +
1741 +       harray->max_elements = max_elements;
1742 +       harray->arrays[size] = NULL;
1743 +       
1744 +       return (void *)harray;
1745 +
1746 +    undo:
1747 +       for (j = 0; j < i; j++) {
1748 +               kfree(harray->arrays[j]);
1749 +       }
1750 +       kfree(harray);
1751 +       return NULL;
1752 +}
1753 +
1754 +static inline void *
1755 +harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
1756 +{
1757 +       void *harray;
1758 +       
1759 +       do {
1760 +               harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
1761 +       } while (harray == NULL && init_max_page_size());
1762 +       
1763 +       return harray;
1764 +}              
1765 +
1766 +static inline void harray_free(void *h)
1767 +{
1768 +       struct harray *harray = (struct harray *) h;
1769 +       size_t i;
1770 +       
1771 +       for (i = 0; harray->arrays[i] != NULL; i++)
1772 +               kfree(harray->arrays[i]);
1773 +       kfree(harray);
1774 +}
1775 +
1776 +static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
1777 +{
1778 +       struct harray *harray = (struct harray *) h;
1779 +       size_t i;
1780 +       
1781 +       for (i = 0; harray->arrays[i+1] != NULL; i++)
1782 +               memset(harray->arrays[i], 0, harray->max_elements * typesize);
1783 +       memset(harray->arrays[i], 0, 
1784 +              (hashsize - i * harray->max_elements) * typesize);
1785 +}
1786 +
1787 +#define HARRAY_ELEM(h, type, which)                            \
1788 +({                                                             \
1789 +       struct harray *__h = (struct harray *)(h);              \
1790 +       ((type)((__h)->arrays[(which)/(__h)->max_elements])     \
1791 +               + (which)%(__h)->max_elements);                 \
1792 +})
1793 +
1794 +/* General memory allocation and deallocation */
1795 +static inline void * ip_set_malloc(size_t bytes)
1796 +{
1797 +       BUG_ON(max_malloc_size == 0);
1798 +
1799 +       if (bytes > default_max_malloc_size)
1800 +               return vmalloc(bytes);
1801 +       else
1802 +               return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
1803 +}
1804 +
1805 +static inline void ip_set_free(void * data, size_t bytes)
1806 +{
1807 +       BUG_ON(max_malloc_size == 0);
1808 +
1809 +       if (bytes > default_max_malloc_size)
1810 +               vfree(data);
1811 +       else
1812 +               kfree(data);
1813 +}
1814 +
1815 +#endif                         /* __KERNEL__ */
1816 +
1817 +#endif /*_IP_SET_MALLOC_H*/
1818 --- /dev/null
1819 +++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
1820 @@ -0,0 +1,31 @@
1821 +#ifndef __IP_SET_NETHASH_H
1822 +#define __IP_SET_NETHASH_H
1823 +
1824 +#include <linux/netfilter_ipv4/ip_set.h>
1825 +#include <linux/netfilter_ipv4/ip_set_hashes.h>
1826 +
1827 +#define SETTYPE_NAME "nethash"
1828 +
1829 +struct ip_set_nethash {
1830 +       ip_set_ip_t *members;           /* the nethash proper */
1831 +       uint32_t elements;              /* number of elements */
1832 +       uint32_t hashsize;              /* hash size */
1833 +       uint16_t probes;                /* max number of probes  */
1834 +       uint16_t resize;                /* resize factor in percent */
1835 +       uint8_t cidr[30];               /* CIDR sizes */
1836 +       uint16_t nets[30];              /* nr of nets by CIDR sizes */
1837 +       initval_t initval[0];           /* initvals for jhash_1word */
1838 +};
1839 +
1840 +struct ip_set_req_nethash_create {
1841 +       uint32_t hashsize;
1842 +       uint16_t probes;
1843 +       uint16_t resize;
1844 +};
1845 +
1846 +struct ip_set_req_nethash {
1847 +       ip_set_ip_t ip;
1848 +       uint8_t cidr;
1849 +};
1850 +
1851 +#endif /* __IP_SET_NETHASH_H */
1852 --- /dev/null
1853 +++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
1854 @@ -0,0 +1,25 @@
1855 +#ifndef __IP_SET_PORTMAP_H
1856 +#define __IP_SET_PORTMAP_H
1857 +
1858 +#include <linux/netfilter_ipv4/ip_set.h>
1859 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
1860 +
1861 +#define SETTYPE_NAME   "portmap"
1862 +
1863 +struct ip_set_portmap {
1864 +       void *members;                  /* the portmap proper */
1865 +       ip_set_ip_t first_ip;           /* host byte order, included in range */
1866 +       ip_set_ip_t last_ip;            /* host byte order, included in range */
1867 +       u_int32_t size;                 /* size of the ipmap proper */
1868 +};
1869 +
1870 +struct ip_set_req_portmap_create {
1871 +       ip_set_ip_t from;
1872 +       ip_set_ip_t to;
1873 +};
1874 +
1875 +struct ip_set_req_portmap {
1876 +       ip_set_ip_t ip;
1877 +};
1878 +
1879 +#endif /* __IP_SET_PORTMAP_H */
1880 --- /dev/null
1881 +++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
1882 @@ -0,0 +1,26 @@
1883 +#ifndef __IP_SET_SETLIST_H
1884 +#define __IP_SET_SETLIST_H
1885 +
1886 +#include <linux/netfilter_ipv4/ip_set.h>
1887 +
1888 +#define SETTYPE_NAME "setlist"
1889 +
1890 +#define IP_SET_SETLIST_ADD_AFTER       0
1891 +#define IP_SET_SETLIST_ADD_BEFORE      1
1892 +
1893 +struct ip_set_setlist {
1894 +       uint8_t size;
1895 +       ip_set_id_t index[0];
1896 +};
1897 +
1898 +struct ip_set_req_setlist_create {
1899 +       uint8_t size;
1900 +};
1901 +
1902 +struct ip_set_req_setlist {
1903 +       char name[IP_SET_MAXNAMELEN];
1904 +       char ref[IP_SET_MAXNAMELEN];
1905 +       uint8_t before;
1906 +};
1907 +
1908 +#endif /* __IP_SET_SETLIST_H */
1909 --- /dev/null
1910 +++ b/include/linux/netfilter_ipv4/ipt_set.h
1911 @@ -0,0 +1,21 @@
1912 +#ifndef _IPT_SET_H
1913 +#define _IPT_SET_H
1914 +
1915 +#include <linux/netfilter_ipv4/ip_set.h>
1916 +
1917 +struct ipt_set_info {
1918 +       ip_set_id_t index;
1919 +       u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
1920 +};
1921 +
1922 +/* match info */
1923 +struct ipt_set_info_match {
1924 +       struct ipt_set_info match_set;
1925 +};
1926 +
1927 +struct ipt_set_info_target {
1928 +       struct ipt_set_info add_set;
1929 +       struct ipt_set_info del_set;
1930 +};
1931 +
1932 +#endif /*_IPT_SET_H*/
1933 --- /dev/null
1934 +++ b/net/ipv4/netfilter/ip_set.c
1935 @@ -0,0 +1,2076 @@
1936 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
1937 + *                         Patrick Schaaf <bof@bof.de>
1938 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
1939 + *
1940 + * This program is free software; you can redistribute it and/or modify
1941 + * it under the terms of the GNU General Public License version 2 as
1942 + * published by the Free Software Foundation.
1943 + */
1944 +
1945 +/* Kernel module for IP set management */
1946 +
1947 +#include <linux/version.h>
1948 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1949 +#include <linux/config.h>
1950 +#endif
1951 +#include <linux/module.h>
1952 +#include <linux/moduleparam.h>
1953 +#include <linux/kmod.h>
1954 +#include <linux/ip.h>
1955 +#include <linux/skbuff.h>
1956 +#include <linux/random.h>
1957 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
1958 +#include <linux/errno.h>
1959 +#include <linux/capability.h>
1960 +#include <asm/uaccess.h>
1961 +#include <asm/bitops.h>
1962 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
1963 +#include <asm/semaphore.h>
1964 +#else
1965 +#include <linux/semaphore.h>
1966 +#endif
1967 +#include <linux/spinlock.h>
1968 +
1969 +#define ASSERT_READ_LOCK(x)
1970 +#define ASSERT_WRITE_LOCK(x)
1971 +#include <linux/netfilter.h>
1972 +#include <linux/netfilter_ipv4/ip_set.h>
1973 +
1974 +static struct list_head set_type_list;         /* all registered sets */
1975 +static struct ip_set **ip_set_list;            /* all individual sets */
1976 +static DEFINE_RWLOCK(ip_set_lock);             /* protects the lists and the hash */
1977 +static DECLARE_MUTEX(ip_set_app_mutex);                /* serializes user access */
1978 +static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
1979 +static ip_set_id_t ip_set_bindings_hash_size =  CONFIG_IP_NF_SET_HASHSIZE;
1980 +static struct list_head *ip_set_hash;          /* hash of bindings */
1981 +static unsigned int ip_set_hash_random;                /* random seed */
1982 +
1983 +#define SETNAME_EQ(a,b)                (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
1984 +
1985 +/*
1986 + * Sets are identified either by the index in ip_set_list or by id.
1987 + * The id never changes and is used to find a key in the hash.
1988 + * The index may change by swapping and used at all other places
1989 + * (set/SET netfilter modules, binding value, etc.)
1990 + *
1991 + * Userspace requests are serialized by ip_set_mutex and sets can
1992 + * be deleted only from userspace. Therefore ip_set_list locking
1993 + * must obey the following rules:
1994 + *
1995 + * - kernel requests: read and write locking mandatory
1996 + * - user requests: read locking optional, write locking mandatory
1997 + */
1998 +
1999 +static inline void
2000 +__ip_set_get(ip_set_id_t index)
2001 +{
2002 +       atomic_inc(&ip_set_list[index]->ref);
2003 +}
2004 +
2005 +static inline void
2006 +__ip_set_put(ip_set_id_t index)
2007 +{
2008 +       atomic_dec(&ip_set_list[index]->ref);
2009 +}
2010 +
2011 +/*
2012 + * Binding routines
2013 + */
2014 +
2015 +static inline struct ip_set_hash *
2016 +__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
2017 +{
2018 +       struct ip_set_hash *set_hash;
2019 +
2020 +       list_for_each_entry(set_hash, &ip_set_hash[key], list)
2021 +               if (set_hash->id == id && set_hash->ip == ip)
2022 +                       return set_hash;
2023 +                       
2024 +       return NULL;
2025 +}
2026 +
2027 +static ip_set_id_t
2028 +ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
2029 +{
2030 +       u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
2031 +                               % ip_set_bindings_hash_size;
2032 +       struct ip_set_hash *set_hash;
2033 +
2034 +       ASSERT_READ_LOCK(&ip_set_lock);
2035 +       IP_SET_ASSERT(ip_set_list[id]);
2036 +       DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));     
2037 +       
2038 +       set_hash = __ip_set_find(key, id, ip);
2039 +       
2040 +       DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
2041 +          HIPQUAD(ip),
2042 +          set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
2043 +
2044 +       return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
2045 +}
2046 +
2047 +static inline void
2048 +__set_hash_del(struct ip_set_hash *set_hash)
2049 +{
2050 +       ASSERT_WRITE_LOCK(&ip_set_lock);
2051 +       IP_SET_ASSERT(ip_set_list[set_hash->binding]);  
2052 +
2053 +       __ip_set_put(set_hash->binding);
2054 +       list_del(&set_hash->list);
2055 +       kfree(set_hash);
2056 +}
2057 +
2058 +static int
2059 +ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
2060 +{
2061 +       u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
2062 +                               % ip_set_bindings_hash_size;
2063 +       struct ip_set_hash *set_hash;
2064 +       
2065 +       IP_SET_ASSERT(ip_set_list[id]);
2066 +       DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));     
2067 +       write_lock_bh(&ip_set_lock);
2068 +       set_hash = __ip_set_find(key, id, ip);
2069 +       DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
2070 +          HIPQUAD(ip),
2071 +          set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
2072 +
2073 +       if (set_hash != NULL)
2074 +               __set_hash_del(set_hash);
2075 +       write_unlock_bh(&ip_set_lock);
2076 +       return 0;
2077 +}
2078 +
2079 +static int
2080 +ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
2081 +{
2082 +       u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
2083 +                               % ip_set_bindings_hash_size;
2084 +       struct ip_set_hash *set_hash;
2085 +       int ret = 0;
2086 +       
2087 +       IP_SET_ASSERT(ip_set_list[id]);
2088 +       IP_SET_ASSERT(ip_set_list[binding]);
2089 +       DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
2090 +          HIPQUAD(ip), ip_set_list[binding]->name);
2091 +       write_lock_bh(&ip_set_lock);
2092 +       set_hash = __ip_set_find(key, id, ip);
2093 +       if (!set_hash) {
2094 +               set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
2095 +               if (!set_hash) {
2096 +                       ret = -ENOMEM;
2097 +                       goto unlock;
2098 +               }
2099 +               INIT_LIST_HEAD(&set_hash->list);
2100 +               set_hash->id = id;
2101 +               set_hash->ip = ip;
2102 +               list_add(&set_hash->list, &ip_set_hash[key]);
2103 +       } else {
2104 +               IP_SET_ASSERT(ip_set_list[set_hash->binding]);  
2105 +               DP("overwrite binding: %s",
2106 +                  ip_set_list[set_hash->binding]->name);
2107 +               __ip_set_put(set_hash->binding);
2108 +       }
2109 +       set_hash->binding = binding;
2110 +       __ip_set_get(set_hash->binding);
2111 +       DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
2112 +          key, id, ip_set_list[id]->name,
2113 +          HIPQUAD(ip), binding, ip_set_list[binding]->name);
2114 +    unlock:
2115 +       write_unlock_bh(&ip_set_lock);
2116 +       return ret;
2117 +}
2118 +
2119 +#define FOREACH_HASH_DO(fn, args...)                                           \
2120 +({                                                                             \
2121 +       ip_set_id_t __key;                                                      \
2122 +       struct ip_set_hash *__set_hash;                                         \
2123 +                                                                               \
2124 +       for (__key = 0; __key < ip_set_bindings_hash_size; __key++) {           \
2125 +               list_for_each_entry(__set_hash, &ip_set_hash[__key], list)      \
2126 +                       fn(__set_hash , ## args);                               \
2127 +       }                                                                       \
2128 +})
2129 +
2130 +#define FOREACH_HASH_RW_DO(fn, args...)                                                \
2131 +({                                                                             \
2132 +       ip_set_id_t __key;                                                      \
2133 +       struct ip_set_hash *__set_hash, *__n;                                   \
2134 +                                                                               \
2135 +       ASSERT_WRITE_LOCK(&ip_set_lock);                                        \
2136 +       for (__key = 0; __key < ip_set_bindings_hash_size; __key++) {           \
2137 +               list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
2138 +                       fn(__set_hash , ## args);                               \
2139 +       }                                                                       \
2140 +})
2141 +
2142 +/* Add, del and test set entries from kernel */
2143 +
2144 +#define follow_bindings(index, set, ip)                                        \
2145 +((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID     \
2146 + || (index = (set)->binding) != IP_SET_INVALID_ID)
2147 +
2148 +int
2149 +ip_set_testip_kernel(ip_set_id_t index,
2150 +                    const struct sk_buff *skb,
2151 +                    const u_int32_t *flags)
2152 +{
2153 +       struct ip_set *set;
2154 +       ip_set_ip_t ip;
2155 +       int res;
2156 +       unsigned char i = 0;
2157 +       
2158 +       IP_SET_ASSERT(flags[i]);
2159 +       read_lock_bh(&ip_set_lock);
2160 +       do {
2161 +               set = ip_set_list[index];
2162 +               IP_SET_ASSERT(set);
2163 +               DP("set %s, index %u", set->name, index);
2164 +               read_lock_bh(&set->lock);
2165 +               res = set->type->testip_kernel(set, skb, &ip, flags, i++);
2166 +               read_unlock_bh(&set->lock);
2167 +               i += !!(set->type->features & IPSET_DATA_DOUBLE);
2168 +       } while (res > 0
2169 +                && flags[i]
2170 +                && follow_bindings(index, set, ip));
2171 +       read_unlock_bh(&ip_set_lock);
2172 +
2173 +       return (res < 0 ? 0 : res);
2174 +}
2175 +
2176 +int
2177 +ip_set_addip_kernel(ip_set_id_t index,
2178 +                   const struct sk_buff *skb,
2179 +                   const u_int32_t *flags)
2180 +{
2181 +       struct ip_set *set;
2182 +       ip_set_ip_t ip;
2183 +       int res;
2184 +       unsigned char i = 0;
2185 +
2186 +       IP_SET_ASSERT(flags[i]);
2187 +   retry:
2188 +       read_lock_bh(&ip_set_lock);
2189 +       do {
2190 +               set = ip_set_list[index];
2191 +               IP_SET_ASSERT(set);
2192 +               DP("set %s, index %u", set->name, index);
2193 +               write_lock_bh(&set->lock);
2194 +               res = set->type->addip_kernel(set, skb, &ip, flags, i++);
2195 +               write_unlock_bh(&set->lock);
2196 +               i += !!(set->type->features & IPSET_DATA_DOUBLE);
2197 +       } while ((res == 0 || res == -EEXIST)
2198 +                && flags[i]
2199 +                && follow_bindings(index, set, ip));
2200 +       read_unlock_bh(&ip_set_lock);
2201 +
2202 +       if (res == -EAGAIN
2203 +           && set->type->retry
2204 +           && (res = set->type->retry(set)) == 0)
2205 +               goto retry;
2206 +       
2207 +       return res;
2208 +}
2209 +
2210 +int
2211 +ip_set_delip_kernel(ip_set_id_t index,
2212 +                   const struct sk_buff *skb,
2213 +                   const u_int32_t *flags)
2214 +{
2215 +       struct ip_set *set;
2216 +       ip_set_ip_t ip;
2217 +       int res;
2218 +       unsigned char i = 0;
2219 +
2220 +       IP_SET_ASSERT(flags[i]);
2221 +       read_lock_bh(&ip_set_lock);
2222 +       do {
2223 +               set = ip_set_list[index];
2224 +               IP_SET_ASSERT(set);
2225 +               DP("set %s, index %u", set->name, index);
2226 +               write_lock_bh(&set->lock);
2227 +               res = set->type->delip_kernel(set, skb, &ip, flags, i++);
2228 +               write_unlock_bh(&set->lock);
2229 +               i += !!(set->type->features & IPSET_DATA_DOUBLE);
2230 +       } while ((res == 0 || res == -EEXIST)
2231 +                && flags[i]
2232 +                && follow_bindings(index, set, ip));
2233 +       read_unlock_bh(&ip_set_lock);
2234 +       
2235 +       return res;
2236 +}
2237 +
2238 +/* Register and deregister settype */
2239 +
2240 +static inline struct ip_set_type *
2241 +find_set_type(const char *name)
2242 +{
2243 +       struct ip_set_type *set_type;
2244 +
2245 +       list_for_each_entry(set_type, &set_type_list, list)
2246 +               if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
2247 +                       return set_type;
2248 +       return NULL;
2249 +}
2250 +
2251 +int
2252 +ip_set_register_set_type(struct ip_set_type *set_type)
2253 +{
2254 +       int ret = 0;
2255 +       
2256 +       if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
2257 +               ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
2258 +                             set_type->typename,
2259 +                             set_type->protocol_version,
2260 +                             IP_SET_PROTOCOL_VERSION);
2261 +               return -EINVAL;
2262 +       }
2263 +
2264 +       write_lock_bh(&ip_set_lock);
2265 +       if (find_set_type(set_type->typename)) {
2266 +               /* Duplicate! */
2267 +               ip_set_printk("'%s' already registered!",
2268 +                             set_type->typename);
2269 +               ret = -EINVAL;
2270 +               goto unlock;
2271 +       }
2272 +       if (!try_module_get(THIS_MODULE)) {
2273 +               ret = -EFAULT;
2274 +               goto unlock;
2275 +       }
2276 +       list_add(&set_type->list, &set_type_list);
2277 +       DP("'%s' registered.", set_type->typename);
2278 +   unlock:
2279 +       write_unlock_bh(&ip_set_lock);
2280 +       return ret;
2281 +}
2282 +
2283 +void
2284 +ip_set_unregister_set_type(struct ip_set_type *set_type)
2285 +{
2286 +       write_lock_bh(&ip_set_lock);
2287 +       if (!find_set_type(set_type->typename)) {
2288 +               ip_set_printk("'%s' not registered?",
2289 +                             set_type->typename);
2290 +               goto unlock;
2291 +       }
2292 +       list_del(&set_type->list);
2293 +       module_put(THIS_MODULE);
2294 +       DP("'%s' unregistered.", set_type->typename);
2295 +   unlock:
2296 +       write_unlock_bh(&ip_set_lock);
2297 +
2298 +}
2299 +
2300 +ip_set_id_t
2301 +__ip_set_get_byname(const char *name, struct ip_set **set)
2302 +{
2303 +       ip_set_id_t i, index = IP_SET_INVALID_ID;
2304 +       
2305 +       for (i = 0; i < ip_set_max; i++) {
2306 +               if (ip_set_list[i] != NULL
2307 +                   && SETNAME_EQ(ip_set_list[i]->name, name)) {
2308 +                       __ip_set_get(i);
2309 +                       index = i;
2310 +                       *set = ip_set_list[i];
2311 +                       break;
2312 +               }
2313 +       }
2314 +       return index;
2315 +}
2316 +
2317 +void __ip_set_put_byindex(ip_set_id_t index)
2318 +{
2319 +       if (ip_set_list[index])
2320 +               __ip_set_put(index);
2321 +}
2322 +
2323 +/*
2324 + * Userspace routines
2325 + */
2326 +
2327 +/*
2328 + * Find set by name, reference it once. The reference makes sure the
2329 + * thing pointed to, does not go away under our feet. Drop the reference
2330 + * later, using ip_set_put().
2331 + */
2332 +ip_set_id_t
2333 +ip_set_get_byname(const char *name)
2334 +{
2335 +       ip_set_id_t i, index = IP_SET_INVALID_ID;
2336 +       
2337 +       down(&ip_set_app_mutex);
2338 +       for (i = 0; i < ip_set_max; i++) {
2339 +               if (ip_set_list[i] != NULL
2340 +                   && SETNAME_EQ(ip_set_list[i]->name, name)) {
2341 +                       __ip_set_get(i);
2342 +                       index = i;
2343 +                       break;
2344 +               }
2345 +       }
2346 +       up(&ip_set_app_mutex);
2347 +       return index;
2348 +}
2349 +
2350 +/*
2351 + * Find set by index, reference it once. The reference makes sure the
2352 + * thing pointed to, does not go away under our feet. Drop the reference
2353 + * later, using ip_set_put().
2354 + */
2355 +ip_set_id_t
2356 +ip_set_get_byindex(ip_set_id_t index)
2357 +{
2358 +       down(&ip_set_app_mutex);
2359 +
2360 +       if (index >= ip_set_max)
2361 +               return IP_SET_INVALID_ID;
2362 +       
2363 +       if (ip_set_list[index])
2364 +               __ip_set_get(index);
2365 +       else
2366 +               index = IP_SET_INVALID_ID;
2367 +               
2368 +       up(&ip_set_app_mutex);
2369 +       return index;
2370 +}
2371 +
2372 +/*
2373 + * Find the set id belonging to the index.
2374 + * We are protected by the mutex, so we do not need to use
2375 + * ip_set_lock. There is no need to reference the sets either.
2376 + */
2377 +ip_set_id_t
2378 +ip_set_id(ip_set_id_t index)
2379 +{
2380 +       if (index >= ip_set_max || !ip_set_list[index])
2381 +               return IP_SET_INVALID_ID;
2382 +       
2383 +       return ip_set_list[index]->id;
2384 +}
2385 +
2386 +/*
2387 + * If the given set pointer points to a valid set, decrement
2388 + * reference count by 1. The caller shall not assume the index
2389 + * to be valid, after calling this function.
2390 + */
2391 +void ip_set_put_byindex(ip_set_id_t index)
2392 +{
2393 +       down(&ip_set_app_mutex);
2394 +       if (ip_set_list[index])
2395 +               __ip_set_put(index);
2396 +       up(&ip_set_app_mutex);
2397 +}
2398 +
2399 +/* Find a set by name or index */
2400 +static ip_set_id_t
2401 +ip_set_find_byname(const char *name)
2402 +{
2403 +       ip_set_id_t i, index = IP_SET_INVALID_ID;
2404 +       
2405 +       for (i = 0; i < ip_set_max; i++) {
2406 +               if (ip_set_list[i] != NULL
2407 +                   && SETNAME_EQ(ip_set_list[i]->name, name)) {
2408 +                       index = i;
2409 +                       break;
2410 +               }
2411 +       }
2412 +       return index;
2413 +}
2414 +
2415 +static ip_set_id_t
2416 +ip_set_find_byindex(ip_set_id_t index)
2417 +{
2418 +       if (index >= ip_set_max || ip_set_list[index] == NULL)
2419 +               index = IP_SET_INVALID_ID;
2420 +       
2421 +       return index;
2422 +}
2423 +
2424 +/*
2425 + * Add, del, test, bind and unbind
2426 + */
2427 +
2428 +static inline int
2429 +__ip_set_testip(struct ip_set *set,
2430 +               const void *data,
2431 +               u_int32_t size,
2432 +               ip_set_ip_t *ip)
2433 +{
2434 +       int res;
2435 +
2436 +       read_lock_bh(&set->lock);
2437 +       res = set->type->testip(set, data, size, ip);
2438 +       read_unlock_bh(&set->lock);
2439 +
2440 +       return res;
2441 +}
2442 +
2443 +static int
2444 +__ip_set_addip(ip_set_id_t index,
2445 +              const void *data,
2446 +              u_int32_t size)
2447 +{
2448 +       struct ip_set *set = ip_set_list[index];
2449 +       ip_set_ip_t ip;
2450 +       int res;
2451 +       
2452 +       IP_SET_ASSERT(set);
2453 +       do {
2454 +               write_lock_bh(&set->lock);
2455 +               res = set->type->addip(set, data, size, &ip);
2456 +               write_unlock_bh(&set->lock);
2457 +       } while (res == -EAGAIN
2458 +                && set->type->retry
2459 +                && (res = set->type->retry(set)) == 0);
2460 +
2461 +       return res;
2462 +}
2463 +
2464 +static int
2465 +ip_set_addip(ip_set_id_t index,
2466 +            const void *data,
2467 +            u_int32_t size)
2468 +{
2469 +       struct ip_set *set = ip_set_list[index];
2470 +
2471 +       IP_SET_ASSERT(set);
2472 +
2473 +       if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
2474 +               ip_set_printk("data length wrong (want %lu, have %zu)",
2475 +                             (long unsigned)set->type->reqsize,
2476 +                             size - sizeof(struct ip_set_req_adt));
2477 +               return -EINVAL;
2478 +       }
2479 +       return __ip_set_addip(index,
2480 +                             data + sizeof(struct ip_set_req_adt),
2481 +                             size - sizeof(struct ip_set_req_adt));
2482 +}
2483 +
2484 +static int
2485 +ip_set_delip(ip_set_id_t index,
2486 +            const void *data,
2487 +            u_int32_t size)
2488 +{
2489 +       struct ip_set *set = ip_set_list[index];
2490 +       ip_set_ip_t ip;
2491 +       int res;
2492 +       
2493 +       IP_SET_ASSERT(set);
2494 +
2495 +       if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
2496 +               ip_set_printk("data length wrong (want %lu, have %zu)",
2497 +                             (long unsigned)set->type->reqsize,
2498 +                             size - sizeof(struct ip_set_req_adt));
2499 +               return -EINVAL;
2500 +       }
2501 +       write_lock_bh(&set->lock);
2502 +       res = set->type->delip(set,
2503 +                              data + sizeof(struct ip_set_req_adt),
2504 +                              size - sizeof(struct ip_set_req_adt),
2505 +                              &ip);
2506 +       write_unlock_bh(&set->lock);
2507 +
2508 +       return res;
2509 +}
2510 +
2511 +static int
2512 +ip_set_testip(ip_set_id_t index,
2513 +             const void *data,
2514 +             u_int32_t size)
2515 +{
2516 +       struct ip_set *set = ip_set_list[index];
2517 +       ip_set_ip_t ip;
2518 +       int res;
2519 +
2520 +       IP_SET_ASSERT(set);
2521 +       
2522 +       if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
2523 +               ip_set_printk("data length wrong (want %lu, have %zu)",
2524 +                             (long unsigned)set->type->reqsize,
2525 +                             size - sizeof(struct ip_set_req_adt));
2526 +               return -EINVAL;
2527 +       }
2528 +       res = __ip_set_testip(set,
2529 +                             data + sizeof(struct ip_set_req_adt),
2530 +                             size - sizeof(struct ip_set_req_adt),
2531 +                             &ip);
2532 +
2533 +       return (res > 0 ? -EEXIST : res);
2534 +}
2535 +
2536 +static int
2537 +ip_set_bindip(ip_set_id_t index,
2538 +             const void *data,
2539 +             u_int32_t size)
2540 +{
2541 +       struct ip_set *set = ip_set_list[index];
2542 +       const struct ip_set_req_bind *req_bind;
2543 +       ip_set_id_t binding;
2544 +       ip_set_ip_t ip;
2545 +       int res;
2546 +
2547 +       IP_SET_ASSERT(set);
2548 +       if (size < sizeof(struct ip_set_req_bind))
2549 +               return -EINVAL;
2550 +               
2551 +       req_bind = data;
2552 +
2553 +       if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
2554 +               /* Default binding of a set */
2555 +               const char *binding_name;
2556 +               
2557 +               if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
2558 +                       return -EINVAL;
2559 +
2560 +               binding_name = data + sizeof(struct ip_set_req_bind);
2561 +
2562 +               binding = ip_set_find_byname(binding_name);
2563 +               if (binding == IP_SET_INVALID_ID)
2564 +                       return -ENOENT;
2565 +
2566 +               write_lock_bh(&ip_set_lock);
2567 +               /* Sets as binding values are referenced */
2568 +               if (set->binding != IP_SET_INVALID_ID)
2569 +                       __ip_set_put(set->binding);
2570 +               set->binding = binding;
2571 +               __ip_set_get(set->binding);
2572 +               write_unlock_bh(&ip_set_lock);
2573 +
2574 +               return 0;
2575 +       }
2576 +       binding = ip_set_find_byname(req_bind->binding);
2577 +       if (binding == IP_SET_INVALID_ID)
2578 +               return -ENOENT;
2579 +
2580 +       res = __ip_set_testip(set,
2581 +                             data + sizeof(struct ip_set_req_bind),
2582 +                             size - sizeof(struct ip_set_req_bind),
2583 +                             &ip);
2584 +       DP("set %s, ip: %u.%u.%u.%u, binding %s",
2585 +          set->name, HIPQUAD(ip), ip_set_list[binding]->name);
2586 +       
2587 +       if (res >= 0)
2588 +               res = ip_set_hash_add(set->id, ip, binding);
2589 +
2590 +       return res;
2591 +}
2592 +
2593 +#define FOREACH_SET_DO(fn, args...)                            \
2594 +({                                                             \
2595 +       ip_set_id_t __i;                                        \
2596 +       struct ip_set *__set;                                   \
2597 +                                                               \
2598 +       for (__i = 0; __i < ip_set_max; __i++) {                \
2599 +               __set = ip_set_list[__i];                       \
2600 +               if (__set != NULL)                              \
2601 +                       fn(__set , ##args);                     \
2602 +       }                                                       \
2603 +})
2604 +
2605 +static inline void
2606 +__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
2607 +{
2608 +       if (set_hash->id == id)
2609 +               __set_hash_del(set_hash);
2610 +}
2611 +
2612 +static inline void
2613 +__unbind_default(struct ip_set *set)
2614 +{
2615 +       if (set->binding != IP_SET_INVALID_ID) {
2616 +               /* Sets as binding values are referenced */
2617 +               __ip_set_put(set->binding);
2618 +               set->binding = IP_SET_INVALID_ID;
2619 +       }
2620 +}
2621 +
2622 +static int
2623 +ip_set_unbindip(ip_set_id_t index,
2624 +               const void *data,
2625 +               u_int32_t size)
2626 +{
2627 +       struct ip_set *set;
2628 +       const struct ip_set_req_bind *req_bind;
2629 +       ip_set_ip_t ip;
2630 +       int res;
2631 +
2632 +       DP("");
2633 +       if (size < sizeof(struct ip_set_req_bind))
2634 +               return -EINVAL;
2635 +               
2636 +       req_bind = data;
2637 +       
2638 +       DP("%u %s", index, req_bind->binding);
2639 +       if (index == IP_SET_INVALID_ID) {
2640 +               /* unbind :all: */
2641 +               if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
2642 +                       /* Default binding of sets */
2643 +                       write_lock_bh(&ip_set_lock);
2644 +                       FOREACH_SET_DO(__unbind_default);
2645 +                       write_unlock_bh(&ip_set_lock);
2646 +                       return 0;
2647 +               } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
2648 +                       /* Flush all bindings of all sets*/
2649 +                       write_lock_bh(&ip_set_lock);
2650 +                       FOREACH_HASH_RW_DO(__set_hash_del);
2651 +                       write_unlock_bh(&ip_set_lock);
2652 +                       return 0;
2653 +               }
2654 +               DP("unreachable reached!");
2655 +               return -EINVAL;
2656 +       }
2657 +       
2658 +       set = ip_set_list[index];
2659 +       IP_SET_ASSERT(set);
2660 +       if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
2661 +               /* Default binding of set */
2662 +               ip_set_id_t binding = ip_set_find_byindex(set->binding);
2663 +
2664 +               if (binding == IP_SET_INVALID_ID)
2665 +                       return -ENOENT;
2666 +                       
2667 +               write_lock_bh(&ip_set_lock);
2668 +               /* Sets in hash values are referenced */
2669 +               __ip_set_put(set->binding);
2670 +               set->binding = IP_SET_INVALID_ID;
2671 +               write_unlock_bh(&ip_set_lock);
2672 +
2673 +               return 0;
2674 +       } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
2675 +               /* Flush all bindings */
2676 +
2677 +               write_lock_bh(&ip_set_lock);
2678 +               FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2679 +               write_unlock_bh(&ip_set_lock);
2680 +               return 0;
2681 +       }
2682 +       
2683 +       res = __ip_set_testip(set,
2684 +                             data + sizeof(struct ip_set_req_bind),
2685 +                             size - sizeof(struct ip_set_req_bind),
2686 +                             &ip);
2687 +
2688 +       DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
2689 +       if (res >= 0)
2690 +               res = ip_set_hash_del(set->id, ip);
2691 +
2692 +       return res;
2693 +}
2694 +
2695 +static int
2696 +ip_set_testbind(ip_set_id_t index,
2697 +               const void *data,
2698 +               u_int32_t size)
2699 +{
2700 +       struct ip_set *set = ip_set_list[index];
2701 +       const struct ip_set_req_bind *req_bind;
2702 +       ip_set_id_t binding;
2703 +       ip_set_ip_t ip;
2704 +       int res;
2705 +
2706 +       IP_SET_ASSERT(set);
2707 +       if (size < sizeof(struct ip_set_req_bind))
2708 +               return -EINVAL;
2709 +               
2710 +       req_bind = data;
2711 +
2712 +       if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
2713 +               /* Default binding of set */
2714 +               const char *binding_name;
2715 +               
2716 +               if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
2717 +                       return -EINVAL;
2718 +
2719 +               binding_name = data + sizeof(struct ip_set_req_bind);
2720 +
2721 +               binding = ip_set_find_byname(binding_name);
2722 +               if (binding == IP_SET_INVALID_ID)
2723 +                       return -ENOENT;
2724 +               
2725 +               res = (set->binding == binding) ? -EEXIST : 0;
2726 +
2727 +               return res;
2728 +       }
2729 +       binding = ip_set_find_byname(req_bind->binding);
2730 +       if (binding == IP_SET_INVALID_ID)
2731 +               return -ENOENT;
2732 +               
2733 +       
2734 +       res = __ip_set_testip(set,
2735 +                             data + sizeof(struct ip_set_req_bind),
2736 +                             size - sizeof(struct ip_set_req_bind),
2737 +                             &ip);
2738 +       DP("set %s, ip: %u.%u.%u.%u, binding %s",
2739 +          set->name, HIPQUAD(ip), ip_set_list[binding]->name);
2740 +       
2741 +       if (res >= 0)
2742 +               res = (ip_set_find_in_hash(set->id, ip) == binding)
2743 +                       ? -EEXIST : 0;
2744 +
2745 +       return res;
2746 +}
2747 +
2748 +static struct ip_set_type *
2749 +find_set_type_rlock(const char *typename)
2750 +{
2751 +       struct ip_set_type *type;
2752 +       
2753 +       read_lock_bh(&ip_set_lock);
2754 +       type = find_set_type(typename);
2755 +       if (type == NULL)
2756 +               read_unlock_bh(&ip_set_lock);
2757 +
2758 +       return type;
2759 +}
2760 +
2761 +static int
2762 +find_free_id(const char *name,
2763 +            ip_set_id_t *index,
2764 +            ip_set_id_t *id)
2765 +{
2766 +       ip_set_id_t i;
2767 +
2768 +       *id = IP_SET_INVALID_ID;
2769 +       for (i = 0;  i < ip_set_max; i++) {
2770 +               if (ip_set_list[i] == NULL) {
2771 +                       if (*id == IP_SET_INVALID_ID)
2772 +                               *id = *index = i;
2773 +               } else if (SETNAME_EQ(name, ip_set_list[i]->name))
2774 +                       /* Name clash */
2775 +                       return -EEXIST;
2776 +       }
2777 +       if (*id == IP_SET_INVALID_ID)
2778 +               /* No free slot remained */
2779 +               return -ERANGE;
2780 +       /* Check that index is usable as id (swapping) */
2781 +    check:     
2782 +       for (i = 0;  i < ip_set_max; i++) {
2783 +               if (ip_set_list[i] != NULL
2784 +                   && ip_set_list[i]->id == *id) {
2785 +                   *id = i;
2786 +                   goto check;
2787 +               }
2788 +       }
2789 +       return 0;
2790 +}
2791 +
2792 +/*
2793 + * Create a set
2794 + */
2795 +static int
2796 +ip_set_create(const char *name,
2797 +             const char *typename,
2798 +             ip_set_id_t restore,
2799 +             const void *data,
2800 +             u_int32_t size)
2801 +{
2802 +       struct ip_set *set;
2803 +       ip_set_id_t index = 0, id;
2804 +       int res = 0;
2805 +
2806 +       DP("setname: %s, typename: %s, id: %u", name, typename, restore);
2807 +
2808 +       /*
2809 +        * First, and without any locks, allocate and initialize
2810 +        * a normal base set structure.
2811 +        */
2812 +       set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
2813 +       if (!set)
2814 +               return -ENOMEM;
2815 +       rwlock_init(&set->lock);
2816 +       strncpy(set->name, name, IP_SET_MAXNAMELEN);
2817 +       set->binding = IP_SET_INVALID_ID;
2818 +       atomic_set(&set->ref, 0);
2819 +
2820 +       /*
2821 +        * Next, take the &ip_set_lock, check that we know the type,
2822 +        * and take a reference on the type, to make sure it
2823 +        * stays available while constructing our new set.
2824 +        *
2825 +        * After referencing the type, we drop the &ip_set_lock,
2826 +        * and let the new set construction run without locks.
2827 +        */
2828 +       set->type = find_set_type_rlock(typename);
2829 +       if (set->type == NULL) {
2830 +               /* Try loading the module */
2831 +               char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
2832 +               strcpy(modulename, "ip_set_");
2833 +               strcat(modulename, typename);
2834 +               DP("try to load %s", modulename);
2835 +               request_module(modulename);
2836 +               set->type = find_set_type_rlock(typename);
2837 +       }
2838 +       if (set->type == NULL) {
2839 +               ip_set_printk("no set type '%s', set '%s' not created",
2840 +                             typename, name);
2841 +               res = -ENOENT;
2842 +               goto out;
2843 +       }
2844 +       if (!try_module_get(set->type->me)) {
2845 +               read_unlock_bh(&ip_set_lock);
2846 +               res = -EFAULT;
2847 +               goto out;
2848 +       }
2849 +       read_unlock_bh(&ip_set_lock);
2850 +
2851 +       /* Check request size */
2852 +       if (size != set->type->header_size) {
2853 +               ip_set_printk("data length wrong (want %lu, have %lu)",
2854 +                             (long unsigned)set->type->header_size,
2855 +                             (long unsigned)size);
2856 +               goto put_out;
2857 +       }
2858 +
2859 +       /*
2860 +        * Without holding any locks, create private part.
2861 +        */
2862 +       res = set->type->create(set, data, size);
2863 +       if (res != 0)
2864 +               goto put_out;
2865 +
2866 +       /* BTW, res==0 here. */
2867 +
2868 +       /*
2869 +        * Here, we have a valid, constructed set. &ip_set_lock again,
2870 +        * find free id/index and check that it is not already in
2871 +        * ip_set_list.
2872 +        */
2873 +       write_lock_bh(&ip_set_lock);
2874 +       if ((res = find_free_id(set->name, &index, &id)) != 0) {
2875 +               DP("no free id!");
2876 +               goto cleanup;
2877 +       }
2878 +
2879 +       /* Make sure restore gets the same index */
2880 +       if (restore != IP_SET_INVALID_ID && index != restore) {
2881 +               DP("Can't restore, sets are screwed up");
2882 +               res = -ERANGE;
2883 +               goto cleanup;
2884 +       }
2885 +       
2886 +       /*
2887 +        * Finally! Add our shiny new set to the list, and be done.
2888 +        */
2889 +       DP("create: '%s' created with index %u, id %u!", set->name, index, id);
2890 +       set->id = id;
2891 +       ip_set_list[index] = set;
2892 +       write_unlock_bh(&ip_set_lock);
2893 +       return res;
2894 +       
2895 +    cleanup:
2896 +       write_unlock_bh(&ip_set_lock);
2897 +       set->type->destroy(set);
2898 +    put_out:
2899 +       module_put(set->type->me);
2900 +    out:
2901 +       kfree(set);
2902 +       return res;
2903 +}
2904 +
2905 +/*
2906 + * Destroy a given existing set
2907 + */
2908 +static void
2909 +ip_set_destroy_set(ip_set_id_t index)
2910 +{
2911 +       struct ip_set *set = ip_set_list[index];
2912 +
2913 +       IP_SET_ASSERT(set);
2914 +       DP("set: %s",  set->name);
2915 +       write_lock_bh(&ip_set_lock);
2916 +       FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2917 +       if (set->binding != IP_SET_INVALID_ID)
2918 +               __ip_set_put(set->binding);
2919 +       ip_set_list[index] = NULL;
2920 +       write_unlock_bh(&ip_set_lock);
2921 +
2922 +       /* Must call it without holding any lock */
2923 +       set->type->destroy(set);
2924 +       module_put(set->type->me);
2925 +       kfree(set);
2926 +}
2927 +
2928 +/*
2929 + * Destroy a set - or all sets
2930 + * Sets must not be referenced/used.
2931 + */
2932 +static int
2933 +ip_set_destroy(ip_set_id_t index)
2934 +{
2935 +       ip_set_id_t i;
2936 +
2937 +       /* ref modification always protected by the mutex */
2938 +       if (index != IP_SET_INVALID_ID) {
2939 +               if (atomic_read(&ip_set_list[index]->ref))
2940 +                       return -EBUSY;
2941 +               ip_set_destroy_set(index);
2942 +       } else {
2943 +               for (i = 0; i < ip_set_max; i++) {
2944 +                       if (ip_set_list[i] != NULL
2945 +                           && (atomic_read(&ip_set_list[i]->ref)))
2946 +                               return -EBUSY;
2947 +               }
2948 +
2949 +               for (i = 0; i < ip_set_max; i++) {
2950 +                       if (ip_set_list[i] != NULL)
2951 +                               ip_set_destroy_set(i);
2952 +               }
2953 +       }
2954 +       return 0;
2955 +}
2956 +
2957 +static void
2958 +ip_set_flush_set(struct ip_set *set)
2959 +{
2960 +       DP("set: %s %u",  set->name, set->id);
2961 +
2962 +       write_lock_bh(&set->lock);
2963 +       set->type->flush(set);
2964 +       write_unlock_bh(&set->lock);
2965 +}
2966 +
2967 +/*
2968 + * Flush data in a set - or in all sets
2969 + */
2970 +static int
2971 +ip_set_flush(ip_set_id_t index)
2972 +{
2973 +       if (index != IP_SET_INVALID_ID) {
2974 +               IP_SET_ASSERT(ip_set_list[index]);
2975 +               ip_set_flush_set(ip_set_list[index]);
2976 +       } else
2977 +               FOREACH_SET_DO(ip_set_flush_set);
2978 +
2979 +       return 0;
2980 +}
2981 +
2982 +/* Rename a set */
2983 +static int
2984 +ip_set_rename(ip_set_id_t index, const char *name)
2985 +{
2986 +       struct ip_set *set = ip_set_list[index];
2987 +       ip_set_id_t i;
2988 +       int res = 0;
2989 +
2990 +       DP("set: %s to %s",  set->name, name);
2991 +       write_lock_bh(&ip_set_lock);
2992 +       for (i = 0; i < ip_set_max; i++) {
2993 +               if (ip_set_list[i] != NULL
2994 +                   && SETNAME_EQ(ip_set_list[i]->name, name)) {
2995 +                       res = -EEXIST;
2996 +                       goto unlock;
2997 +               }
2998 +       }
2999 +       strncpy(set->name, name, IP_SET_MAXNAMELEN);
3000 +    unlock:
3001 +       write_unlock_bh(&ip_set_lock);
3002 +       return res;
3003 +}
3004 +
3005 +/*
3006 + * Swap two sets so that name/index points to the other.
3007 + * References are also swapped.
3008 + */
3009 +static int
3010 +ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
3011 +{
3012 +       struct ip_set *from = ip_set_list[from_index];
3013 +       struct ip_set *to = ip_set_list[to_index];
3014 +       char from_name[IP_SET_MAXNAMELEN];
3015 +       u_int32_t from_ref;
3016 +
3017 +       DP("set: %s to %s",  from->name, to->name);
3018 +       /* Features must not change. 
3019 +        * Not an artifical restriction anymore, as we must prevent
3020 +        * possible loops created by swapping in setlist type of sets. */
3021 +       if (from->type->features != to->type->features)
3022 +               return -ENOEXEC;
3023 +
3024 +       /* No magic here: ref munging protected by the mutex */ 
3025 +       write_lock_bh(&ip_set_lock);
3026 +       strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
3027 +       from_ref = atomic_read(&from->ref);
3028 +
3029 +       strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
3030 +       atomic_set(&from->ref, atomic_read(&to->ref));
3031 +       strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
3032 +       atomic_set(&to->ref, from_ref);
3033 +       
3034 +       ip_set_list[from_index] = to;
3035 +       ip_set_list[to_index] = from;
3036 +       
3037 +       write_unlock_bh(&ip_set_lock);
3038 +       return 0;
3039 +}
3040 +
3041 +/*
3042 + * List set data
3043 + */
3044 +
3045 +static inline void
3046 +__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
3047 +                             ip_set_id_t id, u_int32_t *size)
3048 +{
3049 +       if (set_hash->id == id)
3050 +               *size += sizeof(struct ip_set_hash_list);
3051 +}
3052 +
3053 +static inline void
3054 +__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
3055 +                             ip_set_id_t id, u_int32_t *size)
3056 +{
3057 +       if (set_hash->id == id)
3058 +               *size += sizeof(struct ip_set_hash_save);
3059 +}
3060 +
3061 +static inline void
3062 +__set_hash_bindings(struct ip_set_hash *set_hash,
3063 +                   ip_set_id_t id, void *data, int *used)
3064 +{
3065 +       if (set_hash->id == id) {
3066 +               struct ip_set_hash_list *hash_list = data + *used;
3067 +
3068 +               hash_list->ip = set_hash->ip;
3069 +               hash_list->binding = set_hash->binding;
3070 +               *used += sizeof(struct ip_set_hash_list);
3071 +       }
3072 +}
3073 +
3074 +static int ip_set_list_set(ip_set_id_t index,
3075 +                          void *data,
3076 +                          int *used,
3077 +                          int len)
3078 +{
3079 +       struct ip_set *set = ip_set_list[index];
3080 +       struct ip_set_list *set_list;
3081 +
3082 +       /* Pointer to our header */
3083 +       set_list = data + *used;
3084 +
3085 +       DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
3086 +
3087 +       /* Get and ensure header size */
3088 +       if (*used + sizeof(struct ip_set_list) > len)
3089 +               goto not_enough_mem;
3090 +       *used += sizeof(struct ip_set_list);
3091 +
3092 +       read_lock_bh(&set->lock);
3093 +       /* Get and ensure set specific header size */
3094 +       set_list->header_size = set->type->header_size;
3095 +       if (*used + set_list->header_size > len)
3096 +               goto unlock_set;
3097 +
3098 +       /* Fill in the header */
3099 +       set_list->index = index;
3100 +       set_list->binding = set->binding;
3101 +       set_list->ref = atomic_read(&set->ref);
3102 +
3103 +       /* Fill in set spefific header data */
3104 +       set->type->list_header(set, data + *used);
3105 +       *used += set_list->header_size;
3106 +
3107 +       /* Get and ensure set specific members size */
3108 +       set_list->members_size = set->type->list_members_size(set);
3109 +       if (*used + set_list->members_size > len)
3110 +               goto unlock_set;
3111 +
3112 +       /* Fill in set spefific members data */
3113 +       set->type->list_members(set, data + *used);
3114 +       *used += set_list->members_size;
3115 +       read_unlock_bh(&set->lock);
3116 +
3117 +       /* Bindings */
3118 +
3119 +       /* Get and ensure set specific bindings size */
3120 +       set_list->bindings_size = 0;
3121 +       FOREACH_HASH_DO(__set_hash_bindings_size_list,
3122 +                       set->id, &set_list->bindings_size);
3123 +       if (*used + set_list->bindings_size > len)
3124 +               goto not_enough_mem;
3125 +
3126 +       /* Fill in set spefific bindings data */
3127 +       FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
3128 +       
3129 +       return 0;
3130 +
3131 +    unlock_set:
3132 +       read_unlock_bh(&set->lock);
3133 +    not_enough_mem:
3134 +       DP("not enough mem, try again");
3135 +       return -EAGAIN;
3136 +}
3137 +
3138 +/*
3139 + * Save sets
3140 + */
3141 +static int ip_set_save_set(ip_set_id_t index,
3142 +                          void *data,
3143 +                          int *used,
3144 +                          int len)
3145 +{
3146 +       struct ip_set *set;
3147 +       struct ip_set_save *set_save;
3148 +
3149 +       /* Pointer to our header */
3150 +       set_save = data + *used;
3151 +
3152 +       /* Get and ensure header size */
3153 +       if (*used + sizeof(struct ip_set_save) > len)
3154 +               goto not_enough_mem;
3155 +       *used += sizeof(struct ip_set_save);
3156 +
3157 +       set = ip_set_list[index];
3158 +       DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
3159 +          data, data + *used);
3160 +
3161 +       read_lock_bh(&set->lock);
3162 +       /* Get and ensure set specific header size */
3163 +       set_save->header_size = set->type->header_size;
3164 +       if (*used + set_save->header_size > len)
3165 +               goto unlock_set;
3166 +
3167 +       /* Fill in the header */
3168 +       set_save->index = index;
3169 +       set_save->binding = set->binding;
3170 +
3171 +       /* Fill in set spefific header data */
3172 +       set->type->list_header(set, data + *used);
3173 +       *used += set_save->header_size;
3174 +
3175 +       DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
3176 +          (unsigned long)set_save->header_size, data, data + *used);
3177 +       /* Get and ensure set specific members size */
3178 +       set_save->members_size = set->type->list_members_size(set);
3179 +       if (*used + set_save->members_size > len)
3180 +               goto unlock_set;
3181 +
3182 +       /* Fill in set spefific members data */
3183 +       set->type->list_members(set, data + *used);
3184 +       *used += set_save->members_size;
3185 +       read_unlock_bh(&set->lock);
3186 +       DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
3187 +          (unsigned long)set_save->members_size, data, data + *used);
3188 +       return 0;
3189 +
3190 +    unlock_set:
3191 +       read_unlock_bh(&set->lock);
3192 +    not_enough_mem:
3193 +       DP("not enough mem, try again");
3194 +       return -EAGAIN;
3195 +}
3196 +
3197 +static inline void
3198 +__set_hash_save_bindings(struct ip_set_hash *set_hash,
3199 +                        ip_set_id_t id,
3200 +                        void *data,
3201 +                        int *used,
3202 +                        int len,
3203 +                        int *res)
3204 +{
3205 +       if (*res == 0
3206 +           && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
3207 +               struct ip_set_hash_save *hash_save = data + *used;
3208 +               /* Ensure bindings size */
3209 +               if (*used + sizeof(struct ip_set_hash_save) > len) {
3210 +                       *res = -ENOMEM;
3211 +                       return;
3212 +               }
3213 +               hash_save->id = set_hash->id;
3214 +               hash_save->ip = set_hash->ip;
3215 +               hash_save->binding = set_hash->binding;
3216 +               *used += sizeof(struct ip_set_hash_save);
3217 +       }
3218 +}
3219 +
3220 +static int ip_set_save_bindings(ip_set_id_t index,
3221 +                               void *data,
3222 +                               int *used,
3223 +                               int len)
3224 +{
3225 +       int res = 0;
3226 +       struct ip_set_save *set_save;
3227 +
3228 +       DP("used %u, len %u", *used, len);
3229 +       /* Get and ensure header size */
3230 +       if (*used + sizeof(struct ip_set_save) > len)
3231 +               return -ENOMEM;
3232 +
3233 +       /* Marker */
3234 +       set_save = data + *used;
3235 +       set_save->index = IP_SET_INVALID_ID;
3236 +       set_save->header_size = 0;
3237 +       set_save->members_size = 0;
3238 +       *used += sizeof(struct ip_set_save);
3239 +
3240 +       DP("marker added used %u, len %u", *used, len);
3241 +       /* Fill in bindings data */
3242 +       if (index != IP_SET_INVALID_ID)
3243 +               /* Sets are identified by id in hash */
3244 +               index = ip_set_list[index]->id;
3245 +       FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
3246 +
3247 +       return res;     
3248 +}
3249 +
3250 +/*
3251 + * Restore sets
3252 + */
3253 +static int ip_set_restore(void *data,
3254 +                         int len)
3255 +{
3256 +       int res = 0;
3257 +       int line = 0, used = 0, members_size;
3258 +       struct ip_set *set;
3259 +       struct ip_set_hash_save *hash_save;
3260 +       struct ip_set_restore *set_restore;
3261 +       ip_set_id_t index;
3262 +
3263 +       /* Loop to restore sets */
3264 +       while (1) {
3265 +               line++;
3266 +               
3267 +               DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
3268 +               /* Get and ensure header size */
3269 +               if (used + sizeof(struct ip_set_restore) > len)
3270 +                       return line;
3271 +               set_restore = data + used;
3272 +               used += sizeof(struct ip_set_restore);
3273 +
3274 +               /* Ensure data size */
3275 +               if (used
3276 +                   + set_restore->header_size
3277 +                   + set_restore->members_size > len)
3278 +                       return line;
3279 +
3280 +               /* Check marker */
3281 +               if (set_restore->index == IP_SET_INVALID_ID) {
3282 +                       line--;
3283 +                       goto bindings;
3284 +               }
3285 +               
3286 +               /* Try to create the set */
3287 +               DP("restore %s %s", set_restore->name, set_restore->typename);
3288 +               res = ip_set_create(set_restore->name,
3289 +                                   set_restore->typename,
3290 +                                   set_restore->index,
3291 +                                   data + used,
3292 +                                   set_restore->header_size);
3293 +               
3294 +               if (res != 0)
3295 +                       return line;
3296 +               used += set_restore->header_size;
3297 +
3298 +               index = ip_set_find_byindex(set_restore->index);
3299 +               DP("index %u, restore_index %u", index, set_restore->index);
3300 +               if (index != set_restore->index)
3301 +                       return line;
3302 +               /* Try to restore members data */
3303 +               set = ip_set_list[index];
3304 +               members_size = 0;
3305 +               DP("members_size %lu reqsize %lu",
3306 +                  (unsigned long)set_restore->members_size,
3307 +                  (unsigned long)set->type->reqsize);
3308 +               while (members_size + set->type->reqsize <=
3309 +                      set_restore->members_size) {
3310 +                       line++;
3311 +                       DP("members: %d, line %d", members_size, line);
3312 +                       res = __ip_set_addip(index,
3313 +                                          data + used + members_size,
3314 +                                          set->type->reqsize);
3315 +                       if (!(res == 0 || res == -EEXIST))
3316 +                               return line;
3317 +                       members_size += set->type->reqsize;
3318 +               }
3319 +
3320 +               DP("members_size %lu  %d",
3321 +                  (unsigned long)set_restore->members_size, members_size);
3322 +               if (members_size != set_restore->members_size)
3323 +                       return line++;
3324 +               used += set_restore->members_size;              
3325 +       }
3326 +       
3327 +   bindings:
3328 +       /* Loop to restore bindings */
3329 +       while (used < len) {
3330 +               line++;
3331 +
3332 +               DP("restore binding, line %u", line);           
3333 +               /* Get and ensure size */
3334 +               if (used + sizeof(struct ip_set_hash_save) > len)
3335 +                       return line;
3336 +               hash_save = data + used;
3337 +               used += sizeof(struct ip_set_hash_save);
3338 +               
3339 +               /* hash_save->id is used to store the index */
3340 +               index = ip_set_find_byindex(hash_save->id);
3341 +               DP("restore binding index %u, id %u, %u -> %u",
3342 +                  index, hash_save->id, hash_save->ip, hash_save->binding);            
3343 +               if (index != hash_save->id)
3344 +                       return line;
3345 +               if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
3346 +                       DP("corrupt binding set index %u", hash_save->binding);
3347 +                       return line;
3348 +               }
3349 +               set = ip_set_list[hash_save->id];
3350 +               /* Null valued IP means default binding */
3351 +               if (hash_save->ip)
3352 +                       res = ip_set_hash_add(set->id,
3353 +                                             hash_save->ip,
3354 +                                             hash_save->binding);
3355 +               else {
3356 +                       IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
3357 +                       write_lock_bh(&ip_set_lock);
3358 +                       set->binding = hash_save->binding;
3359 +                       __ip_set_get(set->binding);
3360 +                       write_unlock_bh(&ip_set_lock);
3361 +                       DP("default binding: %u", set->binding);
3362 +               }
3363 +               if (res != 0)
3364 +                       return line;
3365 +       }
3366 +       if (used != len)
3367 +               return line;
3368 +       
3369 +       return 0;       
3370 +}
3371 +
3372 +static int
3373 +ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
3374 +{
3375 +       void *data;
3376 +       int res = 0;            /* Assume OK */
3377 +       unsigned *op;
3378 +       struct ip_set_req_adt *req_adt;
3379 +       ip_set_id_t index = IP_SET_INVALID_ID;
3380 +       int (*adtfn)(ip_set_id_t index,
3381 +                    const void *data, u_int32_t size);
3382 +       struct fn_table {
3383 +               int (*fn)(ip_set_id_t index,
3384 +                         const void *data, u_int32_t size);
3385 +       } adtfn_table[] =
3386 +       { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
3387 +         { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
3388 +       };
3389 +
3390 +       DP("optval=%d, user=%p, len=%d", optval, user, len);
3391 +       if (!capable(CAP_NET_ADMIN))
3392 +               return -EPERM;
3393 +       if (optval != SO_IP_SET)
3394 +               return -EBADF;
3395 +       if (len <= sizeof(unsigned)) {
3396 +               ip_set_printk("short userdata (want >%zu, got %u)",
3397 +                             sizeof(unsigned), len);
3398 +               return -EINVAL;
3399 +       }
3400 +       data = vmalloc(len);
3401 +       if (!data) {
3402 +               DP("out of mem for %u bytes", len);
3403 +               return -ENOMEM;
3404 +       }
3405 +       if (copy_from_user(data, user, len) != 0) {
3406 +               res = -EFAULT;
3407 +               goto done;
3408 +       }
3409 +       if (down_interruptible(&ip_set_app_mutex)) {
3410 +               res = -EINTR;
3411 +               goto done;
3412 +       }
3413 +
3414 +       op = (unsigned *)data;
3415 +       DP("op=%x", *op);
3416 +       
3417 +       if (*op < IP_SET_OP_VERSION) {
3418 +               /* Check the version at the beginning of operations */
3419 +               struct ip_set_req_version *req_version = data;
3420 +               if (req_version->version != IP_SET_PROTOCOL_VERSION) {
3421 +                       res = -EPROTO;
3422 +                       goto done;
3423 +               }
3424 +       }
3425 +
3426 +       switch (*op) {
3427 +       case IP_SET_OP_CREATE:{
3428 +               struct ip_set_req_create *req_create = data;
3429 +               
3430 +               if (len < sizeof(struct ip_set_req_create)) {
3431 +                       ip_set_printk("short CREATE data (want >=%zu, got %u)",
3432 +                                     sizeof(struct ip_set_req_create), len);
3433 +                       res = -EINVAL;
3434 +                       goto done;
3435 +               }
3436 +               req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
3437 +               req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
3438 +               res = ip_set_create(req_create->name,
3439 +                                   req_create->typename,
3440 +                                   IP_SET_INVALID_ID,
3441 +                                   data + sizeof(struct ip_set_req_create),
3442 +                                   len - sizeof(struct ip_set_req_create));
3443 +               goto done;
3444 +       }
3445 +       case IP_SET_OP_DESTROY:{
3446 +               struct ip_set_req_std *req_destroy = data;
3447 +               
3448 +               if (len != sizeof(struct ip_set_req_std)) {
3449 +                       ip_set_printk("invalid DESTROY data (want %zu, got %u)",
3450 +                                     sizeof(struct ip_set_req_std), len);
3451 +                       res = -EINVAL;
3452 +                       goto done;
3453 +               }
3454 +               if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
3455 +                       /* Destroy all sets */
3456 +                       index = IP_SET_INVALID_ID;
3457 +               } else {
3458 +                       req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
3459 +                       index = ip_set_find_byname(req_destroy->name);
3460 +
3461 +                       if (index == IP_SET_INVALID_ID) {
3462 +                               res = -ENOENT;
3463 +                               goto done;
3464 +                       }
3465 +               }
3466 +                       
3467 +               res = ip_set_destroy(index);
3468 +               goto done;
3469 +       }
3470 +       case IP_SET_OP_FLUSH:{
3471 +               struct ip_set_req_std *req_flush = data;
3472 +
3473 +               if (len != sizeof(struct ip_set_req_std)) {
3474 +                       ip_set_printk("invalid FLUSH data (want %zu, got %u)",
3475 +                                     sizeof(struct ip_set_req_std), len);
3476 +                       res = -EINVAL;
3477 +                       goto done;
3478 +               }
3479 +               if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
3480 +                       /* Flush all sets */
3481 +                       index = IP_SET_INVALID_ID;
3482 +               } else {
3483 +                       req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
3484 +                       index = ip_set_find_byname(req_flush->name);
3485 +
3486 +                       if (index == IP_SET_INVALID_ID) {
3487 +                               res = -ENOENT;
3488 +                               goto done;
3489 +                       }
3490 +               }
3491 +               res = ip_set_flush(index);
3492 +               goto done;
3493 +       }
3494 +       case IP_SET_OP_RENAME:{
3495 +               struct ip_set_req_create *req_rename = data;
3496 +
3497 +               if (len != sizeof(struct ip_set_req_create)) {
3498 +                       ip_set_printk("invalid RENAME data (want %zu, got %u)",
3499 +                                     sizeof(struct ip_set_req_create), len);
3500 +                       res = -EINVAL;
3501 +                       goto done;
3502 +               }
3503 +
3504 +               req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
3505 +               req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
3506 +                       
3507 +               index = ip_set_find_byname(req_rename->name);
3508 +               if (index == IP_SET_INVALID_ID) {
3509 +                       res = -ENOENT;
3510 +                       goto done;
3511 +               }
3512 +               res = ip_set_rename(index, req_rename->typename);
3513 +               goto done;
3514 +       }
3515 +       case IP_SET_OP_SWAP:{
3516 +               struct ip_set_req_create *req_swap = data;
3517 +               ip_set_id_t to_index;
3518 +
3519 +               if (len != sizeof(struct ip_set_req_create)) {
3520 +                       ip_set_printk("invalid SWAP data (want %zu, got %u)",
3521 +                                     sizeof(struct ip_set_req_create), len);
3522 +                       res = -EINVAL;
3523 +                       goto done;
3524 +               }
3525 +
3526 +               req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
3527 +               req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
3528 +
3529 +               index = ip_set_find_byname(req_swap->name);
3530 +               if (index == IP_SET_INVALID_ID) {
3531 +                       res = -ENOENT;
3532 +                       goto done;
3533 +               }
3534 +               to_index = ip_set_find_byname(req_swap->typename);
3535 +               if (to_index == IP_SET_INVALID_ID) {
3536 +                       res = -ENOENT;
3537 +                       goto done;
3538 +               }
3539 +               res = ip_set_swap(index, to_index);
3540 +               goto done;
3541 +       }
3542 +       default:
3543 +               break;  /* Set identified by id */
3544 +       }
3545 +       
3546 +       /* There we may have add/del/test/bind/unbind/test_bind operations */
3547 +       if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
3548 +               res = -EBADMSG;
3549 +               goto done;
3550 +       }
3551 +       adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
3552 +
3553 +       if (len < sizeof(struct ip_set_req_adt)) {
3554 +               ip_set_printk("short data in adt request (want >=%zu, got %u)",
3555 +                             sizeof(struct ip_set_req_adt), len);
3556 +               res = -EINVAL;
3557 +               goto done;
3558 +       }
3559 +       req_adt = data;
3560 +
3561 +       /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
3562 +       if (!(*op == IP_SET_OP_UNBIND_SET
3563 +             && req_adt->index == IP_SET_INVALID_ID)) {
3564 +               index = ip_set_find_byindex(req_adt->index);
3565 +               if (index == IP_SET_INVALID_ID) {
3566 +                       res = -ENOENT;
3567 +                       goto done;
3568 +               }
3569 +       }
3570 +       res = adtfn(index, data, len);
3571 +
3572 +    done:
3573 +       up(&ip_set_app_mutex);
3574 +       vfree(data);
3575 +       if (res > 0)
3576 +               res = 0;
3577 +       DP("final result %d", res);
3578 +       return res;
3579 +}
3580 +
3581 +static int
3582 +ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
3583 +{
3584 +       int res = 0;
3585 +       unsigned *op;
3586 +       ip_set_id_t index = IP_SET_INVALID_ID;
3587 +       void *data;
3588 +       int copylen = *len;
3589 +
3590 +       DP("optval=%d, user=%p, len=%d", optval, user, *len);
3591 +       if (!capable(CAP_NET_ADMIN))
3592 +               return -EPERM;
3593 +       if (optval != SO_IP_SET)
3594 +               return -EBADF;
3595 +       if (*len < sizeof(unsigned)) {
3596 +               ip_set_printk("short userdata (want >=%zu, got %d)",
3597 +                             sizeof(unsigned), *len);
3598 +               return -EINVAL;
3599 +       }
3600 +       data = vmalloc(*len);
3601 +       if (!data) {
3602 +               DP("out of mem for %d bytes", *len);
3603 +               return -ENOMEM;
3604 +       }
3605 +       if (copy_from_user(data, user, *len) != 0) {
3606 +               res = -EFAULT;
3607 +               goto done;
3608 +       }
3609 +       if (down_interruptible(&ip_set_app_mutex)) {
3610 +               res = -EINTR;
3611 +               goto done;
3612 +       }
3613 +
3614 +       op = (unsigned *) data;
3615 +       DP("op=%x", *op);
3616 +
3617 +       if (*op < IP_SET_OP_VERSION) {
3618 +               /* Check the version at the beginning of operations */
3619 +               struct ip_set_req_version *req_version = data;
3620 +               if (req_version->version != IP_SET_PROTOCOL_VERSION) {
3621 +                       res = -EPROTO;
3622 +                       goto done;
3623 +               }
3624 +       }
3625 +
3626 +       switch (*op) {
3627 +       case IP_SET_OP_VERSION: {
3628 +               struct ip_set_req_version *req_version = data;
3629 +
3630 +               if (*len != sizeof(struct ip_set_req_version)) {
3631 +                       ip_set_printk("invalid VERSION (want %zu, got %d)",
3632 +                                     sizeof(struct ip_set_req_version),
3633 +                                     *len);
3634 +                       res = -EINVAL;
3635 +                       goto done;
3636 +               }
3637 +
3638 +               req_version->version = IP_SET_PROTOCOL_VERSION;
3639 +               res = copy_to_user(user, req_version,
3640 +                                  sizeof(struct ip_set_req_version));
3641 +               goto done;
3642 +       }
3643 +       case IP_SET_OP_GET_BYNAME: {
3644 +               struct ip_set_req_get_set *req_get = data;
3645 +
3646 +               if (*len != sizeof(struct ip_set_req_get_set)) {
3647 +                       ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
3648 +                                     sizeof(struct ip_set_req_get_set), *len);
3649 +                       res = -EINVAL;
3650 +                       goto done;
3651 +               }
3652 +               req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
3653 +               index = ip_set_find_byname(req_get->set.name);
3654 +               req_get->set.index = index;
3655 +               goto copy;
3656 +       }
3657 +       case IP_SET_OP_GET_BYINDEX: {
3658 +               struct ip_set_req_get_set *req_get = data;
3659 +
3660 +               if (*len != sizeof(struct ip_set_req_get_set)) {
3661 +                       ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
3662 +                                     sizeof(struct ip_set_req_get_set), *len);
3663 +                       res = -EINVAL;
3664 +                       goto done;
3665 +               }
3666 +               req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
3667 +               index = ip_set_find_byindex(req_get->set.index);
3668 +               strncpy(req_get->set.name,
3669 +                       index == IP_SET_INVALID_ID ? ""
3670 +                       : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
3671 +               goto copy;
3672 +       }
3673 +       case IP_SET_OP_ADT_GET: {
3674 +               struct ip_set_req_adt_get *req_get = data;
3675 +
3676 +               if (*len != sizeof(struct ip_set_req_adt_get)) {
3677 +                       ip_set_printk("invalid ADT_GET (want %zu, got %d)",
3678 +                                     sizeof(struct ip_set_req_adt_get), *len);
3679 +                       res = -EINVAL;
3680 +                       goto done;
3681 +               }
3682 +               req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
3683 +               index = ip_set_find_byname(req_get->set.name);
3684 +               if (index != IP_SET_INVALID_ID) {
3685 +                       req_get->set.index = index;
3686 +                       strncpy(req_get->typename,
3687 +                               ip_set_list[index]->type->typename,
3688 +                               IP_SET_MAXNAMELEN - 1);
3689 +               } else {
3690 +                       res = -ENOENT;
3691 +                       goto done;
3692 +               }
3693 +               goto copy;
3694 +       }
3695 +       case IP_SET_OP_MAX_SETS: {
3696 +               struct ip_set_req_max_sets *req_max_sets = data;
3697 +               ip_set_id_t i;
3698 +
3699 +               if (*len != sizeof(struct ip_set_req_max_sets)) {
3700 +                       ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
3701 +                                     sizeof(struct ip_set_req_max_sets), *len);
3702 +                       res = -EINVAL;
3703 +                       goto done;
3704 +               }
3705 +
3706 +               if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
3707 +                       req_max_sets->set.index = IP_SET_INVALID_ID;
3708 +               } else {
3709 +                       req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
3710 +                       req_max_sets->set.index =
3711 +                               ip_set_find_byname(req_max_sets->set.name);
3712 +                       if (req_max_sets->set.index == IP_SET_INVALID_ID) {
3713 +                               res = -ENOENT;
3714 +                               goto done;
3715 +                       }
3716 +               }
3717 +               req_max_sets->max_sets = ip_set_max;
3718 +               req_max_sets->sets = 0;
3719 +               for (i = 0; i < ip_set_max; i++) {
3720 +                       if (ip_set_list[i] != NULL)
3721 +                               req_max_sets->sets++;
3722 +               }
3723 +               goto copy;
3724 +       }
3725 +       case IP_SET_OP_LIST_SIZE:
3726 +       case IP_SET_OP_SAVE_SIZE: {
3727 +               struct ip_set_req_setnames *req_setnames = data;
3728 +               struct ip_set_name_list *name_list;
3729 +               struct ip_set *set;
3730 +               ip_set_id_t i;
3731 +               int used;
3732 +
3733 +               if (*len < sizeof(struct ip_set_req_setnames)) {
3734 +                       ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
3735 +                                     sizeof(struct ip_set_req_setnames), *len);
3736 +                       res = -EINVAL;
3737 +                       goto done;
3738 +               }
3739 +
3740 +               req_setnames->size = 0;
3741 +               used = sizeof(struct ip_set_req_setnames);
3742 +               for (i = 0; i < ip_set_max; i++) {
3743 +                       if (ip_set_list[i] == NULL)
3744 +                               continue;
3745 +                       name_list = data + used;
3746 +                       used += sizeof(struct ip_set_name_list);
3747 +                       if (used > copylen) {
3748 +                               res = -EAGAIN;
3749 +                               goto done;
3750 +                       }
3751 +                       set = ip_set_list[i];
3752 +                       /* Fill in index, name, etc. */
3753 +                       name_list->index = i;
3754 +                       name_list->id = set->id;
3755 +                       strncpy(name_list->name,
3756 +                               set->name,
3757 +                               IP_SET_MAXNAMELEN - 1);
3758 +                       strncpy(name_list->typename,
3759 +                               set->type->typename,
3760 +                               IP_SET_MAXNAMELEN - 1);
3761 +                       DP("filled %s of type %s, index %u\n",
3762 +                          name_list->name, name_list->typename,
3763 +                          name_list->index);
3764 +                       if (!(req_setnames->index == IP_SET_INVALID_ID
3765 +                             || req_setnames->index == i))
3766 +                             continue;
3767 +                       /* Update size */
3768 +                       switch (*op) {
3769 +                       case IP_SET_OP_LIST_SIZE: {
3770 +                               req_setnames->size += sizeof(struct ip_set_list)
3771 +                                       + set->type->header_size
3772 +                                       + set->type->list_members_size(set);
3773 +                               /* Sets are identified by id in the hash */
3774 +                               FOREACH_HASH_DO(__set_hash_bindings_size_list,
3775 +                                               set->id, &req_setnames->size);
3776 +                               break;
3777 +                       }
3778 +                       case IP_SET_OP_SAVE_SIZE: {
3779 +                               req_setnames->size += sizeof(struct ip_set_save)
3780 +                                       + set->type->header_size
3781 +                                       + set->type->list_members_size(set);
3782 +                               FOREACH_HASH_DO(__set_hash_bindings_size_save,
3783 +                                               set->id, &req_setnames->size);
3784 +                               break;
3785 +                       }
3786 +                       default:
3787 +                               break;
3788 +                       }
3789 +               }
3790 +               if (copylen != used) {
3791 +                       res = -EAGAIN;
3792 +                       goto done;
3793 +               }
3794 +               goto copy;
3795 +       }
3796 +       case IP_SET_OP_LIST: {
3797 +               struct ip_set_req_list *req_list = data;
3798 +               ip_set_id_t i;
3799 +               int used;
3800 +
3801 +               if (*len < sizeof(struct ip_set_req_list)) {
3802 +                       ip_set_printk("short LIST (want >=%zu, got %d)",
3803 +                                     sizeof(struct ip_set_req_list), *len);
3804 +                       res = -EINVAL;
3805 +                       goto done;
3806 +               }
3807 +               index = req_list->index;
3808 +               if (index != IP_SET_INVALID_ID
3809 +                   && ip_set_find_byindex(index) != index) {
3810 +                       res = -ENOENT;
3811 +                       goto done;
3812 +               }
3813 +               used = 0;
3814 +               if (index == IP_SET_INVALID_ID) {
3815 +                       /* List all sets */
3816 +                       for (i = 0; i < ip_set_max && res == 0; i++) {
3817 +                               if (ip_set_list[i] != NULL)
3818 +                                       res = ip_set_list_set(i, data, &used, *len);
3819 +                       }
3820 +               } else {
3821 +                       /* List an individual set */
3822 +                       res = ip_set_list_set(index, data, &used, *len);
3823 +               }
3824 +               if (res != 0)
3825 +                       goto done;
3826 +               else if (copylen != used) {
3827 +                       res = -EAGAIN;
3828 +                       goto done;
3829 +               }
3830 +               goto copy;
3831 +       }
3832 +       case IP_SET_OP_SAVE: {
3833 +               struct ip_set_req_list *req_save = data;
3834 +               ip_set_id_t i;
3835 +               int used;
3836 +
3837 +               if (*len < sizeof(struct ip_set_req_list)) {
3838 +                       ip_set_printk("short SAVE (want >=%zu, got %d)",
3839 +                                     sizeof(struct ip_set_req_list), *len);
3840 +                       res = -EINVAL;
3841 +                       goto done;
3842 +               }
3843 +               index = req_save->index;
3844 +               if (index != IP_SET_INVALID_ID
3845 +                   && ip_set_find_byindex(index) != index) {
3846 +                       res = -ENOENT;
3847 +                       goto done;
3848 +               }
3849 +
3850 +#define SETLIST(set)   (strcmp(set->type->typename, "setlist") == 0)
3851 +
3852 +               used = 0;
3853 +               if (index == IP_SET_INVALID_ID) {
3854 +                       /* Save all sets: ugly setlist type dependency */
3855 +                       int setlist = 0;
3856 +               setlists:
3857 +                       for (i = 0; i < ip_set_max && res == 0; i++) {
3858 +                               if (ip_set_list[i] != NULL
3859 +                                   && !(setlist ^ SETLIST(ip_set_list[i])))
3860 +                                       res = ip_set_save_set(i, data, &used, *len);
3861 +                       }
3862 +                       if (!setlist) {
3863 +                               setlist = 1;
3864 +                               goto setlists;
3865 +                       }
3866 +               } else {
3867 +                       /* Save an individual set */
3868 +                       res = ip_set_save_set(index, data, &used, *len);
3869 +               }
3870 +               if (res == 0)
3871 +                       res = ip_set_save_bindings(index, data, &used, *len);
3872 +                       
3873 +               if (res != 0)
3874 +                       goto done;
3875 +               else if (copylen != used) {
3876 +                       res = -EAGAIN;
3877 +                       goto done;
3878 +               }
3879 +               goto copy;
3880 +       }
3881 +       case IP_SET_OP_RESTORE: {
3882 +               struct ip_set_req_setnames *req_restore = data;
3883 +               int line;
3884 +
3885 +               if (*len < sizeof(struct ip_set_req_setnames)
3886 +                   || *len != req_restore->size) {
3887 +                       ip_set_printk("invalid RESTORE (want =%lu, got %d)",
3888 +                                     (long unsigned)req_restore->size, *len);
3889 +                       res = -EINVAL;
3890 +                       goto done;
3891 +               }
3892 +               line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
3893 +                                     req_restore->size - sizeof(struct ip_set_req_setnames));
3894 +               DP("ip_set_restore: %d", line);
3895 +               if (line != 0) {
3896 +                       res = -EAGAIN;
3897 +                       req_restore->size = line;
3898 +                       copylen = sizeof(struct ip_set_req_setnames);
3899 +                       goto copy;
3900 +               }
3901 +               goto done;
3902 +       }
3903 +       default:
3904 +               res = -EBADMSG;
3905 +               goto done;
3906 +       }       /* end of switch(op) */
3907 +
3908 +    copy:
3909 +       DP("set %s, copylen %d", index != IP_SET_INVALID_ID
3910 +                                && ip_set_list[index]
3911 +                    ? ip_set_list[index]->name
3912 +                    : ":all:", copylen);
3913 +       res = copy_to_user(user, data, copylen);
3914 +       
3915 +    done:
3916 +       up(&ip_set_app_mutex);
3917 +       vfree(data);
3918 +       if (res > 0)
3919 +               res = 0;
3920 +       DP("final result %d", res);
3921 +       return res;
3922 +}
3923 +
3924 +static struct nf_sockopt_ops so_set = {
3925 +       .pf             = PF_INET,
3926 +       .set_optmin     = SO_IP_SET,
3927 +       .set_optmax     = SO_IP_SET + 1,
3928 +       .set            = &ip_set_sockfn_set,
3929 +       .get_optmin     = SO_IP_SET,
3930 +       .get_optmax     = SO_IP_SET + 1,
3931 +       .get            = &ip_set_sockfn_get,
3932 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3933 +       .use            = 0,
3934 +#else
3935 +       .owner          = THIS_MODULE,
3936 +#endif
3937 +};
3938 +
3939 +static int max_sets, hash_size;
3940 +
3941 +module_param(max_sets, int, 0600);
3942 +MODULE_PARM_DESC(max_sets, "maximal number of sets");
3943 +module_param(hash_size, int, 0600);
3944 +MODULE_PARM_DESC(hash_size, "hash size for bindings");
3945 +MODULE_LICENSE("GPL");
3946 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3947 +MODULE_DESCRIPTION("module implementing core IP set support");
3948 +
3949 +static int __init ip_set_init(void)
3950 +{
3951 +       int res;
3952 +       ip_set_id_t i;
3953 +
3954 +       get_random_bytes(&ip_set_hash_random, 4);
3955 +       if (max_sets)
3956 +               ip_set_max = max_sets;
3957 +       ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
3958 +       if (!ip_set_list) {
3959 +               printk(KERN_ERR "Unable to create ip_set_list\n");
3960 +               return -ENOMEM;
3961 +       }
3962 +       memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
3963 +       if (hash_size)
3964 +               ip_set_bindings_hash_size = hash_size;
3965 +       ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
3966 +       if (!ip_set_hash) {
3967 +               printk(KERN_ERR "Unable to create ip_set_hash\n");
3968 +               vfree(ip_set_list);
3969 +               return -ENOMEM;
3970 +       }
3971 +       for (i = 0; i < ip_set_bindings_hash_size; i++)
3972 +               INIT_LIST_HEAD(&ip_set_hash[i]);
3973 +
3974 +       INIT_LIST_HEAD(&set_type_list);
3975 +
3976 +       res = nf_register_sockopt(&so_set);
3977 +       if (res != 0) {
3978 +               ip_set_printk("SO_SET registry failed: %d", res);
3979 +               vfree(ip_set_list);
3980 +               vfree(ip_set_hash);
3981 +               return res;
3982 +       }
3983 +       
3984 +       return 0;
3985 +}
3986 +
3987 +static void __exit ip_set_fini(void)
3988 +{
3989 +       /* There can't be any existing set or binding */
3990 +       nf_unregister_sockopt(&so_set);
3991 +       vfree(ip_set_list);
3992 +       vfree(ip_set_hash);
3993 +       DP("these are the famous last words");
3994 +}
3995 +
3996 +EXPORT_SYMBOL(ip_set_register_set_type);
3997 +EXPORT_SYMBOL(ip_set_unregister_set_type);
3998 +
3999 +EXPORT_SYMBOL(ip_set_get_byname);
4000 +EXPORT_SYMBOL(ip_set_get_byindex);
4001 +EXPORT_SYMBOL(ip_set_put_byindex);
4002 +EXPORT_SYMBOL(ip_set_id);
4003 +EXPORT_SYMBOL(__ip_set_get_byname);
4004 +EXPORT_SYMBOL(__ip_set_put_byindex);
4005 +
4006 +EXPORT_SYMBOL(ip_set_addip_kernel);
4007 +EXPORT_SYMBOL(ip_set_delip_kernel);
4008 +EXPORT_SYMBOL(ip_set_testip_kernel);
4009 +
4010 +module_init(ip_set_init);
4011 +module_exit(ip_set_fini);
4012 --- /dev/null
4013 +++ b/net/ipv4/netfilter/ip_set_iphash.c
4014 @@ -0,0 +1,166 @@
4015 +/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4016 + *
4017 + * This program is free software; you can redistribute it and/or modify
4018 + * it under the terms of the GNU General Public License version 2 as
4019 + * published by the Free Software Foundation.
4020 + */
4021 +
4022 +/* Kernel module implementing an ip hash set */
4023 +
4024 +#include <linux/module.h>
4025 +#include <linux/moduleparam.h>
4026 +#include <linux/ip.h>
4027 +#include <linux/skbuff.h>
4028 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4029 +#include <linux/errno.h>
4030 +#include <asm/uaccess.h>
4031 +#include <asm/bitops.h>
4032 +#include <linux/spinlock.h>
4033 +#include <linux/random.h>
4034 +
4035 +#include <net/ip.h>
4036 +
4037 +#include <linux/netfilter_ipv4/ip_set_iphash.h>
4038 +
4039 +static int limit = MAX_RANGE;
4040 +
4041 +static inline __u32
4042 +iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4043 +{
4044 +       struct ip_set_iphash *map = set->data;
4045 +       __u32 id;
4046 +       u_int16_t i;
4047 +       ip_set_ip_t *elem;
4048 +
4049 +       *hash_ip = ip & map->netmask;
4050 +       DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
4051 +          set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
4052 +       
4053 +       for (i = 0; i < map->probes; i++) {
4054 +               id = jhash_ip(map, i, *hash_ip) % map->hashsize;
4055 +               DP("hash key: %u", id);
4056 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4057 +               if (*elem == *hash_ip)
4058 +                       return id;
4059 +               /* No shortcut - there can be deleted entries. */
4060 +       }
4061 +       return UINT_MAX;
4062 +}
4063 +
4064 +static inline int
4065 +iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4066 +{
4067 +       return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
4068 +}
4069 +
4070 +#define KADT_CONDITION
4071 +
4072 +UADT(iphash, test)
4073 +KADT(iphash, test, ipaddr)
4074 +
4075 +static inline int
4076 +__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
4077 +{
4078 +       __u32 probe;
4079 +       u_int16_t i;
4080 +       ip_set_ip_t *elem, *slot = NULL;
4081 +       
4082 +       for (i = 0; i < map->probes; i++) {
4083 +               probe = jhash_ip(map, i, *ip) % map->hashsize;
4084 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4085 +               if (*elem == *ip)
4086 +                       return -EEXIST;
4087 +               if (!(slot || *elem))
4088 +                       slot = elem;
4089 +               /* There can be deleted entries, must check all slots */
4090 +       }
4091 +       if (slot) {
4092 +               *slot = *ip;
4093 +               map->elements++;
4094 +               return 0;
4095 +       }
4096 +       /* Trigger rehashing */
4097 +       return -EAGAIN;
4098 +}
4099 +
4100 +static inline int
4101 +iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4102 +{
4103 +       struct ip_set_iphash *map = set->data;
4104 +       
4105 +       if (!ip || map->elements >= limit)
4106 +               return -ERANGE;
4107 +
4108 +       *hash_ip = ip & map->netmask;
4109 +       
4110 +       return __iphash_add(map, hash_ip);
4111 +}
4112 +
4113 +UADT(iphash, add)
4114 +KADT(iphash, add, ipaddr)
4115 +
4116 +static inline void
4117 +__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
4118 +{
4119 +       tmp->netmask = map->netmask;
4120 +}
4121 +
4122 +HASH_RETRY(iphash, ip_set_ip_t)
4123 +
4124 +static inline int
4125 +iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4126 +{
4127 +       struct ip_set_iphash *map = set->data;
4128 +       ip_set_ip_t id, *elem;
4129 +
4130 +       if (!ip)
4131 +               return -ERANGE;
4132 +
4133 +       id = iphash_id(set, hash_ip, ip);
4134 +       if (id == UINT_MAX)
4135 +               return -EEXIST;
4136 +               
4137 +       elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4138 +       *elem = 0;
4139 +       map->elements--;
4140 +
4141 +       return 0;
4142 +}
4143 +
4144 +UADT(iphash, del)
4145 +KADT(iphash, del, ipaddr)
4146 +
4147 +static inline int
4148 +__iphash_create(const struct ip_set_req_iphash_create *req,
4149 +               struct ip_set_iphash *map)
4150 +{
4151 +       map->netmask = req->netmask;
4152 +       
4153 +       return 0;
4154 +}
4155 +
4156 +HASH_CREATE(iphash, ip_set_ip_t)
4157 +HASH_DESTROY(iphash)
4158 +
4159 +HASH_FLUSH(iphash, ip_set_ip_t)
4160 +
4161 +static inline void
4162 +__iphash_list_header(const struct ip_set_iphash *map,
4163 +                    struct ip_set_req_iphash_create *header)
4164 +{    
4165 +       header->netmask = map->netmask;
4166 +}
4167 +
4168 +HASH_LIST_HEADER(iphash)
4169 +HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
4170 +HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
4171 +
4172 +IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
4173 +
4174 +MODULE_LICENSE("GPL");
4175 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4176 +MODULE_DESCRIPTION("iphash type of IP sets");
4177 +module_param(limit, int, 0600);
4178 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4179 +
4180 +REGISTER_MODULE(iphash)
4181 --- /dev/null
4182 +++ b/net/ipv4/netfilter/ip_set_ipmap.c
4183 @@ -0,0 +1,142 @@
4184 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
4185 + *                         Patrick Schaaf <bof@bof.de>
4186 + * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4187 + *
4188 + * This program is free software; you can redistribute it and/or modify
4189 + * it under the terms of the GNU General Public License version 2 as
4190 + * published by the Free Software Foundation.
4191 + */
4192 +
4193 +/* Kernel module implementing an IP set type: the single bitmap type */
4194 +
4195 +#include <linux/module.h>
4196 +#include <linux/ip.h>
4197 +#include <linux/skbuff.h>
4198 +#include <linux/errno.h>
4199 +#include <asm/uaccess.h>
4200 +#include <asm/bitops.h>
4201 +#include <linux/spinlock.h>
4202 +
4203 +#include <linux/netfilter_ipv4/ip_set_ipmap.h>
4204 +
4205 +static inline ip_set_ip_t
4206 +ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
4207 +{
4208 +       return (ip - map->first_ip)/map->hosts;
4209 +}
4210 +
4211 +static inline int
4212 +ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4213 +{
4214 +       const struct ip_set_ipmap *map = set->data;
4215 +       
4216 +       if (ip < map->first_ip || ip > map->last_ip)
4217 +               return -ERANGE;
4218 +
4219 +       *hash_ip = ip & map->netmask;
4220 +       DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
4221 +          set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
4222 +       return !!test_bit(ip_to_id(map, *hash_ip), map->members);
4223 +}
4224 +
4225 +#define KADT_CONDITION
4226 +
4227 +UADT(ipmap, test)
4228 +KADT(ipmap, test, ipaddr)
4229 +
4230 +static inline int
4231 +ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4232 +{
4233 +       struct ip_set_ipmap *map = set->data;
4234 +
4235 +       if (ip < map->first_ip || ip > map->last_ip)
4236 +               return -ERANGE;
4237 +
4238 +       *hash_ip = ip & map->netmask;
4239 +       DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
4240 +       if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
4241 +               return -EEXIST;
4242 +
4243 +       return 0;
4244 +}
4245 +
4246 +UADT(ipmap, add)
4247 +KADT(ipmap, add, ipaddr)
4248 +
4249 +static inline int
4250 +ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
4251 +{
4252 +       struct ip_set_ipmap *map = set->data;
4253 +
4254 +       if (ip < map->first_ip || ip > map->last_ip)
4255 +               return -ERANGE;
4256 +
4257 +       *hash_ip = ip & map->netmask;
4258 +       DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
4259 +       if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
4260 +               return -EEXIST;
4261 +       
4262 +       return 0;
4263 +}
4264 +
4265 +UADT(ipmap, del)
4266 +KADT(ipmap, del, ipaddr)
4267 +
4268 +static inline int
4269 +__ipmap_create(const struct ip_set_req_ipmap_create *req,
4270 +              struct ip_set_ipmap *map)
4271 +{
4272 +       map->netmask = req->netmask;
4273 +
4274 +       if (req->netmask == 0xFFFFFFFF) {
4275 +               map->hosts = 1;
4276 +               map->sizeid = map->last_ip - map->first_ip + 1;
4277 +       } else {
4278 +               unsigned int mask_bits, netmask_bits;
4279 +               ip_set_ip_t mask;
4280 +
4281 +               map->first_ip &= map->netmask;  /* Should we better bark? */
4282 +
4283 +               mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
4284 +               netmask_bits = mask_to_bits(map->netmask);
4285 +
4286 +               if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
4287 +                   || netmask_bits <= mask_bits)
4288 +                       return -ENOEXEC;
4289 +
4290 +               DP("mask_bits %u, netmask_bits %u",
4291 +                  mask_bits, netmask_bits);
4292 +               map->hosts = 2 << (32 - netmask_bits - 1);
4293 +               map->sizeid = 2 << (netmask_bits - mask_bits - 1);
4294 +       }
4295 +       if (map->sizeid > MAX_RANGE + 1) {
4296 +               ip_set_printk("range too big, %d elements (max %d)",
4297 +                              map->sizeid, MAX_RANGE+1);
4298 +               return -ENOEXEC;
4299 +       }
4300 +       DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
4301 +       return bitmap_bytes(0, map->sizeid - 1);
4302 +}
4303 +
4304 +BITMAP_CREATE(ipmap)
4305 +BITMAP_DESTROY(ipmap)
4306 +BITMAP_FLUSH(ipmap)
4307 +
4308 +static inline void
4309 +__ipmap_list_header(const struct ip_set_ipmap *map,
4310 +                   struct ip_set_req_ipmap_create *header)
4311 +{
4312 +       header->netmask = map->netmask;
4313 +}
4314 +
4315 +BITMAP_LIST_HEADER(ipmap)
4316 +BITMAP_LIST_MEMBERS_SIZE(ipmap)
4317 +BITMAP_LIST_MEMBERS(ipmap)
4318 +
4319 +IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
4320 +
4321 +MODULE_LICENSE("GPL");
4322 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4323 +MODULE_DESCRIPTION("ipmap type of IP sets");
4324 +
4325 +REGISTER_MODULE(ipmap)
4326 --- /dev/null
4327 +++ b/net/ipv4/netfilter/ip_set_ipporthash.c
4328 @@ -0,0 +1,203 @@
4329 +/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4330 + *
4331 + * This program is free software; you can redistribute it and/or modify
4332 + * it under the terms of the GNU General Public License version 2 as
4333 + * published by the Free Software Foundation.
4334 + */
4335 +
4336 +/* Kernel module implementing an ip+port hash set */
4337 +
4338 +#include <linux/module.h>
4339 +#include <linux/moduleparam.h>
4340 +#include <linux/ip.h>
4341 +#include <linux/tcp.h>
4342 +#include <linux/udp.h>
4343 +#include <linux/skbuff.h>
4344 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4345 +#include <linux/errno.h>
4346 +#include <asm/uaccess.h>
4347 +#include <asm/bitops.h>
4348 +#include <linux/spinlock.h>
4349 +#include <linux/random.h>
4350 +
4351 +#include <net/ip.h>
4352 +
4353 +#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
4354 +#include <linux/netfilter_ipv4/ip_set_getport.h>
4355 +
4356 +static int limit = MAX_RANGE;
4357 +
4358 +static inline __u32
4359 +ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
4360 +             ip_set_ip_t ip, ip_set_ip_t port)
4361 +{
4362 +       struct ip_set_ipporthash *map = set->data;
4363 +       __u32 id;
4364 +       u_int16_t i;
4365 +       ip_set_ip_t *elem;
4366 +
4367 +       *hash_ip = pack_ip_port(map, ip, port);
4368 +               
4369 +       DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4370 +          set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4371 +       if (!*hash_ip)
4372 +               return UINT_MAX;
4373 +       
4374 +       for (i = 0; i < map->probes; i++) {
4375 +               id = jhash_ip(map, i, *hash_ip) % map->hashsize;
4376 +               DP("hash key: %u", id);
4377 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4378 +               if (*elem == *hash_ip)
4379 +                       return id;
4380 +               /* No shortcut - there can be deleted entries. */
4381 +       }
4382 +       return UINT_MAX;
4383 +}
4384 +
4385 +static inline int
4386 +ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
4387 +               ip_set_ip_t ip, ip_set_ip_t port)
4388 +{
4389 +       struct ip_set_ipporthash *map = set->data;
4390 +       
4391 +       if (ip < map->first_ip || ip > map->last_ip)
4392 +               return -ERANGE;
4393 +
4394 +       return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
4395 +}
4396 +
4397 +#define KADT_CONDITION                                         \
4398 +       ip_set_ip_t port;                                       \
4399 +                                                               \
4400 +       if (flags[index+1] == 0)                                \
4401 +               return 0;                                       \
4402 +                                                               \
4403 +       port = get_port(skb, flags[index+1]);                   \
4404 +                                                               \
4405 +       if (port == INVALID_PORT)                               \
4406 +               return 0;
4407 +
4408 +UADT(ipporthash, test, req->port)
4409 +KADT(ipporthash, test, ipaddr, port)
4410 +
4411 +static inline int
4412 +__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
4413 +{
4414 +       __u32 probe;
4415 +       u_int16_t i;
4416 +       ip_set_ip_t *elem, *slot = NULL;
4417 +
4418 +       for (i = 0; i < map->probes; i++) {
4419 +               probe = jhash_ip(map, i, *ip) % map->hashsize;
4420 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4421 +               if (*elem == *ip)
4422 +                       return -EEXIST;
4423 +               if (!(slot || *elem))
4424 +                       slot = elem;
4425 +               /* There can be deleted entries, must check all slots */
4426 +       }
4427 +       if (slot) {
4428 +               *slot = *ip;
4429 +               map->elements++;
4430 +               return 0;
4431 +       }
4432 +       /* Trigger rehashing */
4433 +       return -EAGAIN;
4434 +}
4435 +
4436 +static inline int
4437 +ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
4438 +              ip_set_ip_t ip, ip_set_ip_t port)
4439 +{
4440 +       struct ip_set_ipporthash *map = set->data;
4441 +       if (map->elements > limit)
4442 +               return -ERANGE;
4443 +       if (ip < map->first_ip || ip > map->last_ip)
4444 +               return -ERANGE;
4445 +
4446 +       *hash_ip = pack_ip_port(map, ip, port);
4447 +
4448 +       if (!*hash_ip)
4449 +               return -ERANGE;
4450 +       
4451 +       return __ipporthash_add(map, hash_ip);
4452 +}
4453 +
4454 +UADT(ipporthash, add, req->port)
4455 +KADT(ipporthash, add, ipaddr, port)
4456 +
4457 +static inline void
4458 +__ipporthash_retry(struct ip_set_ipporthash *tmp,
4459 +                  struct ip_set_ipporthash *map)
4460 +{
4461 +       tmp->first_ip = map->first_ip;
4462 +       tmp->last_ip = map->last_ip;
4463 +}
4464 +
4465 +HASH_RETRY(ipporthash, ip_set_ip_t)
4466 +
4467 +static inline int
4468 +ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
4469 +              ip_set_ip_t ip, ip_set_ip_t port)
4470 +{
4471 +       struct ip_set_ipporthash *map = set->data;
4472 +       ip_set_ip_t id;
4473 +       ip_set_ip_t *elem;
4474 +
4475 +       if (ip < map->first_ip || ip > map->last_ip)
4476 +               return -ERANGE;
4477 +
4478 +       id = ipporthash_id(set, hash_ip, ip, port);
4479 +
4480 +       if (id == UINT_MAX)
4481 +               return -EEXIST;
4482 +               
4483 +       elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4484 +       *elem = 0;
4485 +       map->elements--;
4486 +
4487 +       return 0;
4488 +}
4489 +
4490 +UADT(ipporthash, del, req->port)
4491 +KADT(ipporthash, del, ipaddr, port)
4492 +
4493 +static inline int
4494 +__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
4495 +                   struct ip_set_ipporthash *map)
4496 +{
4497 +       if (req->to - req->from > MAX_RANGE) {
4498 +               ip_set_printk("range too big, %d elements (max %d)",
4499 +                             req->to - req->from + 1, MAX_RANGE+1);
4500 +               return -ENOEXEC;
4501 +       }
4502 +       map->first_ip = req->from;
4503 +       map->last_ip = req->to;
4504 +       return 0;
4505 +}
4506 +
4507 +HASH_CREATE(ipporthash, ip_set_ip_t)
4508 +HASH_DESTROY(ipporthash)
4509 +HASH_FLUSH(ipporthash, ip_set_ip_t)
4510 +
4511 +static inline void
4512 +__ipporthash_list_header(const struct ip_set_ipporthash *map,
4513 +                        struct ip_set_req_ipporthash_create *header)
4514 +{
4515 +       header->from = map->first_ip;
4516 +       header->to = map->last_ip;
4517 +}
4518 +
4519 +HASH_LIST_HEADER(ipporthash)
4520 +HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
4521 +HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
4522 +
4523 +IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
4524 +
4525 +MODULE_LICENSE("GPL");
4526 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4527 +MODULE_DESCRIPTION("ipporthash type of IP sets");
4528 +module_param(limit, int, 0600);
4529 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4530 +
4531 +REGISTER_MODULE(ipporthash)
4532 --- /dev/null
4533 +++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
4534 @@ -0,0 +1,216 @@
4535 +/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4536 + *
4537 + * This program is free software; you can redistribute it and/or modify
4538 + * it under the terms of the GNU General Public License version 2 as
4539 + * published by the Free Software Foundation.
4540 + */
4541 +
4542 +/* Kernel module implementing an ip+port+ip hash set */
4543 +
4544 +#include <linux/module.h>
4545 +#include <linux/moduleparam.h>
4546 +#include <linux/ip.h>
4547 +#include <linux/tcp.h>
4548 +#include <linux/udp.h>
4549 +#include <linux/skbuff.h>
4550 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4551 +#include <linux/errno.h>
4552 +#include <asm/uaccess.h>
4553 +#include <asm/bitops.h>
4554 +#include <linux/spinlock.h>
4555 +#include <linux/random.h>
4556 +
4557 +#include <net/ip.h>
4558 +
4559 +#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
4560 +#include <linux/netfilter_ipv4/ip_set_getport.h>
4561 +
4562 +static int limit = MAX_RANGE;
4563 +
4564 +#define jhash_ip2(map, i, ipport, ip1)         \
4565 +       jhash_2words(ipport, ip1, *(map->initval + i))
4566 +
4567 +static inline __u32
4568 +ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
4569 +               ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4570 +{
4571 +       struct ip_set_ipportiphash *map = set->data;
4572 +       __u32 id;
4573 +       u_int16_t i;
4574 +       struct ipportip *elem;
4575 +
4576 +       *hash_ip = pack_ip_port(map, ip, port);
4577 +       DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4578 +          set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4579 +       if (!(*hash_ip || ip1))
4580 +               return UINT_MAX;
4581 +       
4582 +       for (i = 0; i < map->probes; i++) {
4583 +               id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
4584 +               DP("hash key: %u", id);
4585 +               elem = HARRAY_ELEM(map->members, struct ipportip *, id);
4586 +               if (elem->ip == *hash_ip && elem->ip1 == ip1)
4587 +                       return id;
4588 +               /* No shortcut - there can be deleted entries. */
4589 +       }
4590 +       return UINT_MAX;
4591 +}
4592 +
4593 +static inline int
4594 +ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
4595 +                 ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4596 +{
4597 +       struct ip_set_ipportiphash *map = set->data;
4598 +       
4599 +       if (ip < map->first_ip || ip > map->last_ip)
4600 +               return -ERANGE;
4601 +
4602 +       return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
4603 +}
4604 +
4605 +#define KADT_CONDITION                                         \
4606 +       ip_set_ip_t port, ip1;                                  \
4607 +                                                               \
4608 +       if (flags[index+2] == 0)                                \
4609 +               return 0;                                       \
4610 +                                                               \
4611 +       port = get_port(skb, flags[index+1]);                   \
4612 +       ip1 = ipaddr(skb, flags[index+2]);                      \
4613 +                                                               \
4614 +       if (port == INVALID_PORT)                               \
4615 +               return 0;
4616 +
4617 +UADT(ipportiphash, test, req->port, req->ip1)
4618 +KADT(ipportiphash, test, ipaddr, port, ip1)
4619 +
4620 +static inline int
4621 +__ipportip_add(struct ip_set_ipportiphash *map,
4622 +              ip_set_ip_t hash_ip, ip_set_ip_t ip1)
4623 +{
4624 +       __u32 probe;
4625 +       u_int16_t i;
4626 +       struct ipportip *elem, *slot = NULL;
4627 +
4628 +       for (i = 0; i < map->probes; i++) {
4629 +               probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
4630 +               elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
4631 +               if (elem->ip == hash_ip && elem->ip1 == ip1)
4632 +                       return -EEXIST;
4633 +               if (!(slot || elem->ip || elem->ip1))
4634 +                       slot = elem;
4635 +               /* There can be deleted entries, must check all slots */
4636 +       }
4637 +       if (slot) {
4638 +               slot->ip = hash_ip;
4639 +               slot->ip1 = ip1;
4640 +               map->elements++;
4641 +               return 0;
4642 +       }
4643 +       /* Trigger rehashing */
4644 +       return -EAGAIN;
4645 +}
4646 +
4647 +static inline int
4648 +__ipportiphash_add(struct ip_set_ipportiphash *map,
4649 +                  struct ipportip *elem)
4650 +{
4651 +       return __ipportip_add(map, elem->ip, elem->ip1);
4652 +}
4653 +
4654 +static inline int
4655 +ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
4656 +                ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4657 +{
4658 +       struct ip_set_ipportiphash *map = set->data;
4659 +       
4660 +       if (map->elements > limit)
4661 +               return -ERANGE;
4662 +       if (ip < map->first_ip || ip > map->last_ip)
4663 +               return -ERANGE;
4664 +
4665 +       *hash_ip = pack_ip_port(map, ip, port);
4666 +       if (!(*hash_ip || ip1))
4667 +               return -ERANGE;
4668 +       
4669 +       return __ipportip_add(map, *hash_ip, ip1);
4670 +}
4671 +
4672 +UADT(ipportiphash, add, req->port, req->ip1)
4673 +KADT(ipportiphash, add, ipaddr, port, ip1)
4674 +
4675 +static inline void
4676 +__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
4677 +                    struct ip_set_ipportiphash *map)
4678 +{
4679 +       tmp->first_ip = map->first_ip;
4680 +       tmp->last_ip = map->last_ip;
4681 +}
4682 +
4683 +HASH_RETRY2(ipportiphash, struct ipportip)
4684 +
4685 +static inline int
4686 +ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
4687 +              ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4688 +{
4689 +       struct ip_set_ipportiphash *map = set->data;
4690 +       ip_set_ip_t id;
4691 +       struct ipportip *elem;
4692 +
4693 +       if (ip < map->first_ip || ip > map->last_ip)
4694 +               return -ERANGE;
4695 +
4696 +       id = ipportiphash_id(set, hash_ip, ip, port, ip1);
4697 +
4698 +       if (id == UINT_MAX)
4699 +               return -EEXIST;
4700 +               
4701 +       elem = HARRAY_ELEM(map->members, struct ipportip *, id);
4702 +       elem->ip = elem->ip1 = 0;
4703 +       map->elements--;
4704 +
4705 +       return 0;
4706 +}
4707 +
4708 +UADT(ipportiphash, del, req->port, req->ip1)
4709 +KADT(ipportiphash, del, ipaddr, port, ip1)
4710 +
4711 +static inline int
4712 +__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
4713 +                     struct ip_set_ipportiphash *map)
4714 +{
4715 +       if (req->to - req->from > MAX_RANGE) {
4716 +               ip_set_printk("range too big, %d elements (max %d)",
4717 +                             req->to - req->from + 1, MAX_RANGE+1);
4718 +               return -ENOEXEC;
4719 +       }
4720 +       map->first_ip = req->from;
4721 +       map->last_ip = req->to;
4722 +       return 0;
4723 +}
4724 +
4725 +HASH_CREATE(ipportiphash, struct ipportip)
4726 +HASH_DESTROY(ipportiphash)
4727 +HASH_FLUSH(ipportiphash, struct ipportip)
4728 +
4729 +static inline void
4730 +__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
4731 +                          struct ip_set_req_ipportiphash_create *header)
4732 +{
4733 +       header->from = map->first_ip;
4734 +       header->to = map->last_ip;
4735 +}
4736 +
4737 +HASH_LIST_HEADER(ipportiphash)
4738 +HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
4739 +HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
4740 +
4741 +IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
4742 +                          | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
4743 +
4744 +MODULE_LICENSE("GPL");
4745 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4746 +MODULE_DESCRIPTION("ipportiphash type of IP sets");
4747 +module_param(limit, int, 0600);
4748 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4749 +
4750 +REGISTER_MODULE(ipportiphash)
4751 --- /dev/null
4752 +++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
4753 @@ -0,0 +1,304 @@
4754 +/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4755 + *
4756 + * This program is free software; you can redistribute it and/or modify
4757 + * it under the terms of the GNU General Public License version 2 as
4758 + * published by the Free Software Foundation.
4759 + */
4760 +
4761 +/* Kernel module implementing an ip+port+net hash set */
4762 +
4763 +#include <linux/module.h>
4764 +#include <linux/moduleparam.h>
4765 +#include <linux/ip.h>
4766 +#include <linux/tcp.h>
4767 +#include <linux/udp.h>
4768 +#include <linux/skbuff.h>
4769 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
4770 +#include <linux/errno.h>
4771 +#include <asm/uaccess.h>
4772 +#include <asm/bitops.h>
4773 +#include <linux/spinlock.h>
4774 +#include <linux/random.h>
4775 +
4776 +#include <net/ip.h>
4777 +
4778 +#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
4779 +#include <linux/netfilter_ipv4/ip_set_getport.h>
4780 +
4781 +static int limit = MAX_RANGE;
4782 +
4783 +#define jhash_ip2(map, i, ipport, ip1)         \
4784 +       jhash_2words(ipport, ip1, *(map->initval + i))
4785 +
4786 +static inline __u32
4787 +ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
4788 +                     ip_set_ip_t ip, ip_set_ip_t port,
4789 +                     ip_set_ip_t ip1, uint8_t cidr)
4790 +{
4791 +       struct ip_set_ipportnethash *map = set->data;
4792 +       __u32 id;
4793 +       u_int16_t i;
4794 +       struct ipportip *elem;
4795 +
4796 +       *hash_ip = pack_ip_port(map, ip, port);
4797 +       DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4798 +          set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4799 +       ip1 = pack_ip_cidr(ip1, cidr);
4800 +       if (!(*hash_ip || ip1))
4801 +               return UINT_MAX;
4802 +       
4803 +       for (i = 0; i < map->probes; i++) {
4804 +               id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
4805 +               DP("hash key: %u", id);
4806 +               elem = HARRAY_ELEM(map->members, struct ipportip *, id);
4807 +               if (elem->ip == *hash_ip && elem->ip1 == ip1)
4808 +                       return id;
4809 +               /* No shortcut - there can be deleted entries. */
4810 +       }
4811 +       return UINT_MAX;
4812 +}
4813 +
4814 +static inline __u32
4815 +ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
4816 +                ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4817 +{
4818 +       struct ip_set_ipportnethash *map = set->data;
4819 +       __u32 id = UINT_MAX;
4820 +       int i;
4821 +
4822 +       for (i = 0; i < 30 && map->cidr[i]; i++) {
4823 +               id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, 
4824 +                                          map->cidr[i]);
4825 +               if (id != UINT_MAX)
4826 +                       break;
4827 +       }
4828 +       return id;
4829 +}
4830 +
4831 +static inline int
4832 +ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
4833 +                       ip_set_ip_t ip, ip_set_ip_t port,
4834 +                       ip_set_ip_t ip1, uint8_t cidr)
4835 +{
4836 +       struct ip_set_ipportnethash *map = set->data;
4837 +       
4838 +       if (ip < map->first_ip || ip > map->last_ip)
4839 +               return -ERANGE;
4840 +
4841 +       return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
4842 +                                     cidr) != UINT_MAX);
4843 +}
4844 +
4845 +static inline int
4846 +ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
4847 +                 ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
4848 +{
4849 +       struct ip_set_ipportnethash *map = set->data;
4850 +       
4851 +       if (ip < map->first_ip || ip > map->last_ip)
4852 +               return -ERANGE;
4853 +
4854 +       return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
4855 +}
4856 +
4857 +static int
4858 +ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
4859 +                   ip_set_ip_t *hash_ip)
4860 +{
4861 +       const struct ip_set_req_ipportnethash *req = data;
4862 +
4863 +       if (req->cidr <= 0 || req->cidr > 32)
4864 +               return -EINVAL;
4865 +       return (req->cidr == 32 
4866 +               ? ipportnethash_test(set, hash_ip, req->ip, req->port,
4867 +                                    req->ip1)
4868 +               : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
4869 +                                         req->ip1, req->cidr));
4870 +}
4871 +
4872 +#define KADT_CONDITION                                         \
4873 +       ip_set_ip_t port, ip1;                                  \
4874 +                                                               \
4875 +       if (flags[index+2] == 0)                                \
4876 +               return 0;                                       \
4877 +                                                               \
4878 +       port = get_port(skb, flags[index+1]);                   \
4879 +       ip1 = ipaddr(skb, flags[index+2]);                      \
4880 +                                                               \
4881 +       if (port == INVALID_PORT)                               \
4882 +               return 0;
4883 +
4884 +KADT(ipportnethash, test, ipaddr, port, ip1)
4885 +
4886 +static inline int
4887 +__ipportnet_add(struct ip_set_ipportnethash *map,
4888 +               ip_set_ip_t hash_ip, ip_set_ip_t ip1)
4889 +{
4890 +       __u32 probe;
4891 +       u_int16_t i;
4892 +       struct ipportip *elem, *slot = NULL;
4893 +
4894 +       for (i = 0; i < map->probes; i++) {
4895 +               probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
4896 +               elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
4897 +               if (elem->ip == hash_ip && elem->ip1 == ip1)
4898 +                       return -EEXIST;
4899 +               if (!(slot || elem->ip || elem->ip1))
4900 +                       slot = elem;
4901 +               /* There can be deleted entries, must check all slots */
4902 +       }
4903 +       if (slot) {
4904 +               slot->ip = hash_ip;
4905 +               slot->ip1 = ip1;
4906 +               map->elements++;
4907 +               return 0;
4908 +       }
4909 +       /* Trigger rehashing */
4910 +       return -EAGAIN;
4911 +}
4912 +
4913 +static inline int
4914 +__ipportnethash_add(struct ip_set_ipportnethash *map,
4915 +                   struct ipportip *elem)
4916 +{
4917 +       return __ipportnet_add(map, elem->ip, elem->ip1);
4918 +}
4919 +
4920 +static inline int
4921 +ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
4922 +                ip_set_ip_t ip, ip_set_ip_t port,
4923 +                ip_set_ip_t ip1, uint8_t cidr)
4924 +{
4925 +       struct ip_set_ipportnethash *map = set->data;
4926 +       struct ipportip;
4927 +       int ret;
4928 +       
4929 +       if (map->elements > limit)
4930 +               return -ERANGE;
4931 +       if (ip < map->first_ip || ip > map->last_ip)
4932 +               return -ERANGE;
4933 +       if (cidr <= 0 || cidr >= 32)
4934 +               return -EINVAL;
4935 +       if (map->nets[cidr-1] == UINT16_MAX)
4936 +               return -ERANGE;
4937 +
4938 +       *hash_ip = pack_ip_port(map, ip, port);
4939 +       ip1 = pack_ip_cidr(ip1, cidr);
4940 +       if (!(*hash_ip || ip1))
4941 +               return -ERANGE;
4942 +       
4943 +       ret =__ipportnet_add(map, *hash_ip, ip1);
4944 +       if (ret == 0) {
4945 +               if (!map->nets[cidr-1]++)
4946 +                       add_cidr_size(map->cidr, cidr);
4947 +               map->elements++;
4948 +       }
4949 +       return ret;
4950 +}
4951 +
4952 +#undef KADT_CONDITION
4953 +#define KADT_CONDITION                                                 \
4954 +       struct ip_set_ipportnethash *map = set->data;                   \
4955 +       uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;                \
4956 +       ip_set_ip_t port, ip1;                                          \
4957 +                                                                       \
4958 +       if (flags[index+2] == 0)                                        \
4959 +               return 0;                                               \
4960 +                                                                       \
4961 +       port = get_port(skb, flags[index+1]);                           \
4962 +       ip1 = ipaddr(skb, flags[index+2]);                              \
4963 +                                                                       \
4964 +       if (port == INVALID_PORT)                                       \
4965 +               return 0;
4966 +
4967 +UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
4968 +KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
4969 +
4970 +static inline void
4971 +__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
4972 +                     struct ip_set_ipportnethash *map)
4973 +{
4974 +       tmp->first_ip = map->first_ip;
4975 +       tmp->last_ip = map->last_ip;
4976 +       memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
4977 +       memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
4978 +}
4979 +
4980 +HASH_RETRY2(ipportnethash, struct ipportip)
4981 +
4982 +static inline int
4983 +ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
4984 +                 ip_set_ip_t ip, ip_set_ip_t port,
4985 +                 ip_set_ip_t ip1, uint8_t cidr)
4986 +{
4987 +       struct ip_set_ipportnethash *map = set->data;
4988 +       ip_set_ip_t id;
4989 +       struct ipportip *elem;
4990 +
4991 +       if (ip < map->first_ip || ip > map->last_ip)
4992 +               return -ERANGE;
4993 +       if (!ip)
4994 +               return -ERANGE;
4995 +       if (cidr <= 0 || cidr >= 32)
4996 +               return -EINVAL; 
4997 +
4998 +       id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
4999 +
5000 +       if (id == UINT_MAX)
5001 +               return -EEXIST;
5002 +               
5003 +       elem = HARRAY_ELEM(map->members, struct ipportip *, id);
5004 +       elem->ip = elem->ip1 = 0;
5005 +       map->elements--;
5006 +       if (!map->nets[cidr-1]--)
5007 +               del_cidr_size(map->cidr, cidr);
5008 +
5009 +       return 0;
5010 +}
5011 +
5012 +UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
5013 +KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
5014 +
5015 +static inline int
5016 +__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
5017 +                      struct ip_set_ipportnethash *map)
5018 +{
5019 +       if (req->to - req->from > MAX_RANGE) {
5020 +               ip_set_printk("range too big, %d elements (max %d)",
5021 +                             req->to - req->from + 1, MAX_RANGE+1);
5022 +               return -ENOEXEC;
5023 +       }
5024 +       map->first_ip = req->from;
5025 +       map->last_ip = req->to;
5026 +       memset(map->cidr, 0, sizeof(map->cidr));
5027 +       memset(map->nets, 0, sizeof(map->nets));
5028 +       return 0;
5029 +}
5030 +
5031 +HASH_CREATE(ipportnethash, struct ipportip)
5032 +HASH_DESTROY(ipportnethash)
5033 +HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
5034 +
5035 +static inline void
5036 +__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
5037 +                           struct ip_set_req_ipportnethash_create *header)
5038 +{
5039 +       header->from = map->first_ip;
5040 +       header->to = map->last_ip;
5041 +}
5042 +
5043 +HASH_LIST_HEADER(ipportnethash)
5044 +
5045 +HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
5046 +HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
5047 +
5048 +IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
5049 +                           | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
5050 +
5051 +MODULE_LICENSE("GPL");
5052 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5053 +MODULE_DESCRIPTION("ipportnethash type of IP sets");
5054 +module_param(limit, int, 0600);
5055 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5056 +
5057 +REGISTER_MODULE(ipportnethash)
5058 --- /dev/null
5059 +++ b/net/ipv4/netfilter/ip_set_iptree.c
5060 @@ -0,0 +1,466 @@
5061 +/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5062 + *
5063 + * This program is free software; you can redistribute it and/or modify
5064 + * it under the terms of the GNU General Public License version 2 as
5065 + * published by the Free Software Foundation.
5066 + */
5067 +
5068 +/* Kernel module implementing an IP set type: the iptree type */
5069 +
5070 +#include <linux/module.h>
5071 +#include <linux/moduleparam.h>
5072 +#include <linux/ip.h>
5073 +#include <linux/skbuff.h>
5074 +#include <linux/slab.h>
5075 +#include <linux/delay.h>
5076 +#include <linux/errno.h>
5077 +#include <asm/uaccess.h>
5078 +#include <asm/bitops.h>
5079 +#include <linux/spinlock.h>
5080 +#include <linux/timer.h>
5081 +
5082 +#include <linux/netfilter_ipv4/ip_set.h>
5083 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
5084 +#include <linux/netfilter_ipv4/ip_set_iptree.h>
5085 +
5086 +static int limit = MAX_RANGE;
5087 +
5088 +/* Garbage collection interval in seconds: */
5089 +#define IPTREE_GC_TIME         5*60
5090 +/* Sleep so many milliseconds before trying again
5091 + * to delete the gc timer at destroying/flushing a set */
5092 +#define IPTREE_DESTROY_SLEEP   100
5093 +
5094 +static __KMEM_CACHE_T__ *branch_cachep;
5095 +static __KMEM_CACHE_T__ *leaf_cachep;
5096 +
5097 +
5098 +#if defined(__LITTLE_ENDIAN)
5099 +#define ABCD(a,b,c,d,addrp) do {               \
5100 +       a = ((unsigned char *)addrp)[3];        \
5101 +       b = ((unsigned char *)addrp)[2];        \
5102 +       c = ((unsigned char *)addrp)[1];        \
5103 +       d = ((unsigned char *)addrp)[0];        \
5104 +} while (0)
5105 +#elif defined(__BIG_ENDIAN)
5106 +#define ABCD(a,b,c,d,addrp) do {               \
5107 +       a = ((unsigned char *)addrp)[0];        \
5108 +       b = ((unsigned char *)addrp)[1];        \
5109 +       c = ((unsigned char *)addrp)[2];        \
5110 +       d = ((unsigned char *)addrp)[3];        \
5111 +} while (0)
5112 +#else
5113 +#error "Please fix asm/byteorder.h"
5114 +#endif /* __LITTLE_ENDIAN */
5115 +
5116 +#define TESTIP_WALK(map, elem, branch) do {    \
5117 +       if ((map)->tree[elem]) {                \
5118 +               branch = (map)->tree[elem];     \
5119 +       } else                                  \
5120 +               return 0;                       \
5121 +} while (0)
5122 +
5123 +static inline int
5124 +iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
5125 +{
5126 +       struct ip_set_iptree *map = set->data;
5127 +       struct ip_set_iptreeb *btree;
5128 +       struct ip_set_iptreec *ctree;
5129 +       struct ip_set_iptreed *dtree;
5130 +       unsigned char a,b,c,d;
5131 +
5132 +       if (!ip)
5133 +               return -ERANGE;
5134 +       
5135 +       *hash_ip = ip;
5136 +       ABCD(a, b, c, d, hash_ip);
5137 +       DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
5138 +       TESTIP_WALK(map, a, btree);
5139 +       TESTIP_WALK(btree, b, ctree);
5140 +       TESTIP_WALK(ctree, c, dtree);
5141 +       DP("%lu %lu", dtree->expires[d], jiffies);
5142 +       return dtree->expires[d]
5143 +              && (!map->timeout
5144 +                  || time_after(dtree->expires[d], jiffies));
5145 +}
5146 +
5147 +#define KADT_CONDITION
5148 +
5149 +UADT(iptree, test)
5150 +KADT(iptree, test, ipaddr)
5151 +
5152 +#define ADDIP_WALK(map, elem, branch, type, cachep) do {       \
5153 +       if ((map)->tree[elem]) {                                \
5154 +               DP("found %u", elem);                           \
5155 +               branch = (map)->tree[elem];                     \
5156 +       } else {                                                \
5157 +               branch = (type *)                               \
5158 +                       kmem_cache_alloc(cachep, GFP_ATOMIC);   \
5159 +               if (branch == NULL)                             \
5160 +                       return -ENOMEM;                         \
5161 +               memset(branch, 0, sizeof(*branch));             \
5162 +               (map)->tree[elem] = branch;                     \
5163 +               DP("alloc %u", elem);                           \
5164 +       }                                                       \
5165 +} while (0)    
5166 +
5167 +static inline int
5168 +iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
5169 +          ip_set_ip_t ip, unsigned int timeout)
5170 +{
5171 +       struct ip_set_iptree *map = set->data;
5172 +       struct ip_set_iptreeb *btree;
5173 +       struct ip_set_iptreec *ctree;
5174 +       struct ip_set_iptreed *dtree;
5175 +       unsigned char a,b,c,d;
5176 +       int ret = 0;
5177 +       
5178 +       if (!ip || map->elements >= limit)
5179 +               /* We could call the garbage collector
5180 +                * but it's probably overkill */
5181 +               return -ERANGE;
5182 +       
5183 +       *hash_ip = ip;
5184 +       ABCD(a, b, c, d, hash_ip);
5185 +       DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
5186 +       ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
5187 +       ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
5188 +       ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
5189 +       if (dtree->expires[d]
5190 +           && (!map->timeout || time_after(dtree->expires[d], jiffies)))
5191 +               ret = -EEXIST;
5192 +       if (map->timeout && timeout == 0)
5193 +               timeout = map->timeout;
5194 +       dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
5195 +       /* Lottery: I won! */
5196 +       if (dtree->expires[d] == 0)
5197 +               dtree->expires[d] = 1;
5198 +       DP("%u %lu", d, dtree->expires[d]);
5199 +       if (ret == 0)
5200 +               map->elements++;
5201 +       return ret;
5202 +}
5203 +
5204 +UADT(iptree, add, req->timeout)
5205 +KADT(iptree, add, ipaddr, 0)
5206 +
5207 +#define DELIP_WALK(map, elem, branch) do {     \
5208 +       if ((map)->tree[elem]) {                \
5209 +               branch = (map)->tree[elem];     \
5210 +       } else                                  \
5211 +               return -EEXIST;                 \
5212 +} while (0)
5213 +
5214 +static inline int
5215 +iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
5216 +{
5217 +       struct ip_set_iptree *map = set->data;
5218 +       struct ip_set_iptreeb *btree;
5219 +       struct ip_set_iptreec *ctree;
5220 +       struct ip_set_iptreed *dtree;
5221 +       unsigned char a,b,c,d;
5222 +       
5223 +       if (!ip)
5224 +               return -ERANGE;
5225 +               
5226 +       *hash_ip = ip;
5227 +       ABCD(a, b, c, d, hash_ip);
5228 +       DELIP_WALK(map, a, btree);
5229 +       DELIP_WALK(btree, b, ctree);
5230 +       DELIP_WALK(ctree, c, dtree);
5231 +
5232 +       if (dtree->expires[d]) {
5233 +               dtree->expires[d] = 0;
5234 +               map->elements--;
5235 +               return 0;
5236 +       }
5237 +       return -EEXIST;
5238 +}
5239 +
5240 +UADT(iptree, del)
5241 +KADT(iptree, del, ipaddr)
5242 +
5243 +#define LOOP_WALK_BEGIN(map, i, branch) \
5244 +       for (i = 0; i < 256; i++) {     \
5245 +               if (!(map)->tree[i])    \
5246 +                       continue;       \
5247 +               branch = (map)->tree[i]
5248 +
5249 +#define LOOP_WALK_END }
5250 +
5251 +static void
5252 +ip_tree_gc(unsigned long ul_set)
5253 +{
5254 +       struct ip_set *set = (struct ip_set *) ul_set;
5255 +       struct ip_set_iptree *map = set->data;
5256 +       struct ip_set_iptreeb *btree;
5257 +       struct ip_set_iptreec *ctree;
5258 +       struct ip_set_iptreed *dtree;
5259 +       unsigned int a,b,c,d;
5260 +       unsigned char i,j,k;
5261 +
5262 +       i = j = k = 0;
5263 +       DP("gc: %s", set->name);
5264 +       write_lock_bh(&set->lock);
5265 +       LOOP_WALK_BEGIN(map, a, btree);
5266 +       LOOP_WALK_BEGIN(btree, b, ctree);
5267 +       LOOP_WALK_BEGIN(ctree, c, dtree);
5268 +       for (d = 0; d < 256; d++) {
5269 +               if (dtree->expires[d]) {
5270 +                       DP("gc: %u %u %u %u: expires %lu jiffies %lu",
5271 +                           a, b, c, d,
5272 +                           dtree->expires[d], jiffies);
5273 +                       if (map->timeout
5274 +                           && time_before(dtree->expires[d], jiffies)) {
5275 +                               dtree->expires[d] = 0;
5276 +                               map->elements--;
5277 +                       } else
5278 +                               k = 1;
5279 +               }
5280 +       }
5281 +       if (k == 0) {
5282 +               DP("gc: %s: leaf %u %u %u empty",
5283 +                   set->name, a, b, c);
5284 +               kmem_cache_free(leaf_cachep, dtree);
5285 +               ctree->tree[c] = NULL;
5286 +       } else {
5287 +               DP("gc: %s: leaf %u %u %u not empty",
5288 +                   set->name, a, b, c);
5289 +               j = 1;
5290 +               k = 0;
5291 +       }
5292 +       LOOP_WALK_END;
5293 +       if (j == 0) {
5294 +               DP("gc: %s: branch %u %u empty",
5295 +                   set->name, a, b);
5296 +               kmem_cache_free(branch_cachep, ctree);
5297 +               btree->tree[b] = NULL;
5298 +       } else {
5299 +               DP("gc: %s: branch %u %u not empty",
5300 +                   set->name, a, b);
5301 +               i = 1;
5302 +               j = k = 0;
5303 +       }
5304 +       LOOP_WALK_END;
5305 +       if (i == 0) {
5306 +               DP("gc: %s: branch %u empty",
5307 +                   set->name, a);
5308 +               kmem_cache_free(branch_cachep, btree);
5309 +               map->tree[a] = NULL;
5310 +       } else {
5311 +               DP("gc: %s: branch %u not empty",
5312 +                   set->name, a);
5313 +               i = j = k = 0;
5314 +       }
5315 +       LOOP_WALK_END;
5316 +       write_unlock_bh(&set->lock);
5317 +       
5318 +       map->gc.expires = jiffies + map->gc_interval * HZ;
5319 +       add_timer(&map->gc);
5320 +}
5321 +
5322 +static inline void
5323 +init_gc_timer(struct ip_set *set)
5324 +{
5325 +       struct ip_set_iptree *map = set->data;
5326 +
5327 +       /* Even if there is no timeout for the entries,
5328 +        * we still have to call gc because delete
5329 +        * do not clean up empty branches */
5330 +       map->gc_interval = IPTREE_GC_TIME;
5331 +       init_timer(&map->gc);
5332 +       map->gc.data = (unsigned long) set;
5333 +       map->gc.function = ip_tree_gc;
5334 +       map->gc.expires = jiffies + map->gc_interval * HZ;
5335 +       add_timer(&map->gc);
5336 +}
5337 +
5338 +static int
5339 +iptree_create(struct ip_set *set, const void *data, u_int32_t size)
5340 +{
5341 +       const struct ip_set_req_iptree_create *req = data;
5342 +       struct ip_set_iptree *map;
5343 +
5344 +       if (size != sizeof(struct ip_set_req_iptree_create)) {
5345 +               ip_set_printk("data length wrong (want %zu, have %lu)",
5346 +                             sizeof(struct ip_set_req_iptree_create),
5347 +                             (unsigned long)size);
5348 +               return -EINVAL;
5349 +       }
5350 +
5351 +       map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
5352 +       if (!map) {
5353 +               DP("out of memory for %zu bytes",
5354 +                  sizeof(struct ip_set_iptree));
5355 +               return -ENOMEM;
5356 +       }
5357 +       memset(map, 0, sizeof(*map));
5358 +       map->timeout = req->timeout;
5359 +       map->elements = 0;
5360 +       set->data = map;
5361 +
5362 +       init_gc_timer(set);
5363 +
5364 +       return 0;
5365 +}
5366 +
5367 +static inline void
5368 +__flush(struct ip_set_iptree *map)
5369 +{
5370 +       struct ip_set_iptreeb *btree;
5371 +       struct ip_set_iptreec *ctree;
5372 +       struct ip_set_iptreed *dtree;
5373 +       unsigned int a,b,c;
5374 +
5375 +       LOOP_WALK_BEGIN(map, a, btree);
5376 +       LOOP_WALK_BEGIN(btree, b, ctree);
5377 +       LOOP_WALK_BEGIN(ctree, c, dtree);
5378 +       kmem_cache_free(leaf_cachep, dtree);
5379 +       LOOP_WALK_END;
5380 +       kmem_cache_free(branch_cachep, ctree);
5381 +       LOOP_WALK_END;
5382 +       kmem_cache_free(branch_cachep, btree);
5383 +       LOOP_WALK_END;
5384 +       map->elements = 0;
5385 +}
5386 +
5387 +static void
5388 +iptree_destroy(struct ip_set *set)
5389 +{
5390 +       struct ip_set_iptree *map = set->data;
5391 +
5392 +       /* gc might be running */
5393 +       while (!del_timer(&map->gc))
5394 +               msleep(IPTREE_DESTROY_SLEEP);
5395 +       __flush(map);
5396 +       kfree(map);
5397 +       set->data = NULL;
5398 +}
5399 +
5400 +static void
5401 +iptree_flush(struct ip_set *set)
5402 +{
5403 +       struct ip_set_iptree *map = set->data;
5404 +       unsigned int timeout = map->timeout;
5405 +       
5406 +       /* gc might be running */
5407 +       while (!del_timer(&map->gc))
5408 +               msleep(IPTREE_DESTROY_SLEEP);
5409 +       __flush(map);
5410 +       memset(map, 0, sizeof(*map));
5411 +       map->timeout = timeout;
5412 +
5413 +       init_gc_timer(set);
5414 +}
5415 +
5416 +static void
5417 +iptree_list_header(const struct ip_set *set, void *data)
5418 +{
5419 +       const struct ip_set_iptree *map = set->data;
5420 +       struct ip_set_req_iptree_create *header = data;
5421 +
5422 +       header->timeout = map->timeout;
5423 +}
5424 +
5425 +static int
5426 +iptree_list_members_size(const struct ip_set *set)
5427 +{
5428 +       const struct ip_set_iptree *map = set->data;
5429 +       struct ip_set_iptreeb *btree;
5430 +       struct ip_set_iptreec *ctree;
5431 +       struct ip_set_iptreed *dtree;
5432 +       unsigned int a,b,c,d;
5433 +       unsigned int count = 0;
5434 +
5435 +       LOOP_WALK_BEGIN(map, a, btree);
5436 +       LOOP_WALK_BEGIN(btree, b, ctree);
5437 +       LOOP_WALK_BEGIN(ctree, c, dtree);
5438 +       for (d = 0; d < 256; d++) {
5439 +               if (dtree->expires[d]
5440 +                   && (!map->timeout || time_after(dtree->expires[d], jiffies)))
5441 +                       count++;
5442 +       }
5443 +       LOOP_WALK_END;
5444 +       LOOP_WALK_END;
5445 +       LOOP_WALK_END;
5446 +
5447 +       DP("members %u", count);
5448 +       return (count * sizeof(struct ip_set_req_iptree));
5449 +}
5450 +
5451 +static void
5452 +iptree_list_members(const struct ip_set *set, void *data)
5453 +{
5454 +       const struct ip_set_iptree *map = set->data;
5455 +       struct ip_set_iptreeb *btree;
5456 +       struct ip_set_iptreec *ctree;
5457 +       struct ip_set_iptreed *dtree;
5458 +       unsigned int a,b,c,d;
5459 +       size_t offset = 0;
5460 +       struct ip_set_req_iptree *entry;
5461 +
5462 +       LOOP_WALK_BEGIN(map, a, btree);
5463 +       LOOP_WALK_BEGIN(btree, b, ctree);
5464 +       LOOP_WALK_BEGIN(ctree, c, dtree);
5465 +       for (d = 0; d < 256; d++) {
5466 +               if (dtree->expires[d]
5467 +                   && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
5468 +                       entry = data + offset;
5469 +                       entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
5470 +                       entry->timeout = !map->timeout ? 0
5471 +                               : (dtree->expires[d] - jiffies)/HZ;
5472 +                       offset += sizeof(struct ip_set_req_iptree);
5473 +               }
5474 +       }
5475 +       LOOP_WALK_END;
5476 +       LOOP_WALK_END;
5477 +       LOOP_WALK_END;
5478 +}
5479 +
5480 +IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
5481 +
5482 +MODULE_LICENSE("GPL");
5483 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5484 +MODULE_DESCRIPTION("iptree type of IP sets");
5485 +module_param(limit, int, 0600);
5486 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5487 +
5488 +static int __init ip_set_iptree_init(void)
5489 +{
5490 +       int ret;
5491 +       
5492 +       branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
5493 +                                         sizeof(struct ip_set_iptreeb));
5494 +       if (!branch_cachep) {
5495 +               printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
5496 +               ret = -ENOMEM;
5497 +               goto out;
5498 +       }
5499 +       leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
5500 +                                       sizeof(struct ip_set_iptreed));
5501 +       if (!leaf_cachep) {
5502 +               printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
5503 +               ret = -ENOMEM;
5504 +               goto free_branch;
5505 +       }
5506 +       ret = ip_set_register_set_type(&ip_set_iptree);
5507 +       if (ret == 0)
5508 +               goto out;
5509 +
5510 +       kmem_cache_destroy(leaf_cachep);
5511 +    free_branch:       
5512 +       kmem_cache_destroy(branch_cachep);
5513 +    out:
5514 +       return ret;
5515 +}
5516 +
5517 +static void __exit ip_set_iptree_fini(void)
5518 +{
5519 +       /* FIXME: possible race with ip_set_create() */
5520 +       ip_set_unregister_set_type(&ip_set_iptree);
5521 +       kmem_cache_destroy(leaf_cachep);
5522 +       kmem_cache_destroy(branch_cachep);
5523 +}
5524 +
5525 +module_init(ip_set_iptree_init);
5526 +module_exit(ip_set_iptree_fini);
5527 --- /dev/null
5528 +++ b/net/ipv4/netfilter/ip_set_iptreemap.c
5529 @@ -0,0 +1,708 @@
5530 +/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
5531 + *
5532 + * This program is free software; you can redistribute it and/or modify it
5533 + * under the terms of the GNU General Public License version 2 as published by
5534 + * the Free Software Foundation.
5535 + */
5536 +
5537 +/* This modules implements the iptreemap ipset type. It uses bitmaps to
5538 + * represent every single IPv4 address as a bit. The bitmaps are managed in a
5539 + * tree structure, where the first three octets of an address are used as an
5540 + * index to find the bitmap and the last octet is used as the bit number.
5541 + */
5542 +
5543 +#include <linux/kernel.h>
5544 +#include <linux/module.h>
5545 +#include <linux/ip.h>
5546 +#include <linux/skbuff.h>
5547 +#include <linux/slab.h>
5548 +#include <linux/delay.h>
5549 +#include <linux/errno.h>
5550 +#include <asm/uaccess.h>
5551 +#include <asm/bitops.h>
5552 +#include <linux/spinlock.h>
5553 +#include <linux/timer.h>
5554 +
5555 +#include <linux/netfilter_ipv4/ip_set.h>
5556 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
5557 +#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
5558 +
5559 +#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
5560 +#define IPTREEMAP_DESTROY_SLEEP (100)
5561 +
5562 +static __KMEM_CACHE_T__ *cachep_b;
5563 +static __KMEM_CACHE_T__ *cachep_c;
5564 +static __KMEM_CACHE_T__ *cachep_d;
5565 +
5566 +static struct ip_set_iptreemap_d *fullbitmap_d;
5567 +static struct ip_set_iptreemap_c *fullbitmap_c;
5568 +static struct ip_set_iptreemap_b *fullbitmap_b;
5569 +
5570 +#if defined(__LITTLE_ENDIAN)
5571 +#define ABCD(a, b, c, d, addr) \
5572 +       do { \
5573 +               a = ((unsigned char *)addr)[3]; \
5574 +               b = ((unsigned char *)addr)[2]; \
5575 +               c = ((unsigned char *)addr)[1]; \
5576 +               d = ((unsigned char *)addr)[0]; \
5577 +       } while (0)
5578 +#elif defined(__BIG_ENDIAN)
5579 +#define ABCD(a,b,c,d,addrp) do {               \
5580 +       a = ((unsigned char *)addrp)[0];        \
5581 +       b = ((unsigned char *)addrp)[1];        \
5582 +       c = ((unsigned char *)addrp)[2];        \
5583 +       d = ((unsigned char *)addrp)[3];        \
5584 +} while (0)
5585 +#else
5586 +#error "Please fix asm/byteorder.h"
5587 +#endif /* __LITTLE_ENDIAN */
5588 +
5589 +#define TESTIP_WALK(map, elem, branch, full) \
5590 +       do { \
5591 +               branch = (map)->tree[elem]; \
5592 +               if (!branch) \
5593 +                       return 0; \
5594 +               else if (branch == full) \
5595 +                       return 1; \
5596 +       } while (0)
5597 +
5598 +#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
5599 +       do { \
5600 +               branch = (map)->tree[elem]; \
5601 +               if (!branch) { \
5602 +                       branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
5603 +                       if (!branch) \
5604 +                               return -ENOMEM; \
5605 +                       memset(branch, 0, sizeof(*branch)); \
5606 +                       (map)->tree[elem] = branch; \
5607 +               } else if (branch == full) { \
5608 +                       return -EEXIST; \
5609 +               } \
5610 +       } while (0)
5611 +
5612 +#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
5613 +       for (a = a1; a <= a2; a++) { \
5614 +               branch = (map)->tree[a]; \
5615 +               if (branch != full) { \
5616 +                       if ((a > a1 && a < a2) || (hint)) { \
5617 +                               if (branch) \
5618 +                                       free(branch); \
5619 +                               (map)->tree[a] = full; \
5620 +                               continue; \
5621 +                       } else if (!branch) { \
5622 +                               branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
5623 +                               if (!branch) \
5624 +                                       return -ENOMEM; \
5625 +                               memset(branch, 0, sizeof(*branch)); \
5626 +                               (map)->tree[a] = branch; \
5627 +                       }
5628 +
5629 +#define ADDIP_RANGE_LOOP_END() \
5630 +               } \
5631 +       }
5632 +
5633 +#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
5634 +       do { \
5635 +               branch = (map)->tree[elem]; \
5636 +               if (!branch) { \
5637 +                       return -EEXIST; \
5638 +               } else if (branch == full) { \
5639 +                       branch = kmem_cache_alloc(cachep, flags); \
5640 +                       if (!branch) \
5641 +                               return -ENOMEM; \
5642 +                       memcpy(branch, full, sizeof(*full)); \
5643 +                       (map)->tree[elem] = branch; \
5644 +               } \
5645 +       } while (0)
5646 +
5647 +#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
5648 +       for (a = a1; a <= a2; a++) { \
5649 +               branch = (map)->tree[a]; \
5650 +               if (branch) { \
5651 +                       if ((a > a1 && a < a2) || (hint)) { \
5652 +                               if (branch != full) \
5653 +                                       free(branch); \
5654 +                               (map)->tree[a] = NULL; \
5655 +                               continue; \
5656 +                       } else if (branch == full) { \
5657 +                               branch = kmem_cache_alloc(cachep, flags); \
5658 +                               if (!branch) \
5659 +                                       return -ENOMEM; \
5660 +                               memcpy(branch, full, sizeof(*branch)); \
5661 +                               (map)->tree[a] = branch; \
5662 +                       }
5663 +
5664 +#define DELIP_RANGE_LOOP_END() \
5665 +               } \
5666 +       }
5667 +
5668 +#define LOOP_WALK_BEGIN(map, i, branch) \
5669 +       for (i = 0; i < 256; i++) { \
5670 +               branch = (map)->tree[i]; \
5671 +               if (likely(!branch)) \
5672 +                       continue;
5673 +
5674 +#define LOOP_WALK_END() \
5675 +       }
5676 +
5677 +#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
5678 +       count = -256; \
5679 +       for (i = 0; i < 256; i++) { \
5680 +               branch = (map)->tree[i]; \
5681 +               if (likely(!branch)) \
5682 +                       continue; \
5683 +               count++; \
5684 +               if (branch == full) { \
5685 +                       count++; \
5686 +                       continue; \
5687 +               }
5688 +
5689 +#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
5690 +               if (-256 == count) { \
5691 +                       kmem_cache_free(cachep, branch); \
5692 +                       (map)->tree[i] = NULL; \
5693 +               } else if (256 == count) { \
5694 +                       kmem_cache_free(cachep, branch); \
5695 +                       (map)->tree[i] = full; \
5696 +               } \
5697 +       }
5698 +
5699 +#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
5700 +       for (i = 0; i < 256; i++) { \
5701 +               if (!(map)->tree[i]) { \
5702 +                       if (inrange) { \
5703 +                               count++; \
5704 +                               inrange = 0; \
5705 +                       } \
5706 +                       continue; \
5707 +               } \
5708 +               branch = (map)->tree[i];
5709 +
5710 +#define LOOP_WALK_END_COUNT() \
5711 +       }
5712 +
5713 +#define GETVALUE1(a, a1, b1, r) \
5714 +       (a == a1 ? b1 : r)
5715 +
5716 +#define GETVALUE2(a, b, a1, b1, c1, r) \
5717 +       (a == a1 && b == b1 ? c1 : r)
5718 +
5719 +#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
5720 +       (a == a1 && b == b1 && c == c1 ? d1 : r)
5721 +
5722 +#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
5723 +       ( \
5724 +               GETVALUE1(a, a1, b1, 0) == 0 \
5725 +               && GETVALUE1(a, a2, b2, 255) == 255 \
5726 +               && c1 == 0 \
5727 +               && c2 == 255 \
5728 +               && d1 == 0 \
5729 +               && d2 == 255 \
5730 +       )
5731 +
5732 +#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
5733 +       ( \
5734 +               GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
5735 +               && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
5736 +               && d1 == 0 \
5737 +               && d2 == 255 \
5738 +       )
5739 +
5740 +#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
5741 +       ( \
5742 +               GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
5743 +               && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
5744 +       )
5745 +
5746 +
5747 +static inline void
5748 +free_d(struct ip_set_iptreemap_d *map)
5749 +{
5750 +       kmem_cache_free(cachep_d, map);
5751 +}
5752 +
5753 +static inline void
5754 +free_c(struct ip_set_iptreemap_c *map)
5755 +{
5756 +       struct ip_set_iptreemap_d *dtree;
5757 +       unsigned int i;
5758 +
5759 +       LOOP_WALK_BEGIN(map, i, dtree) {
5760 +               if (dtree != fullbitmap_d)
5761 +                       free_d(dtree);
5762 +       } LOOP_WALK_END();
5763 +
5764 +       kmem_cache_free(cachep_c, map);
5765 +}
5766 +
5767 +static inline void
5768 +free_b(struct ip_set_iptreemap_b *map)
5769 +{
5770 +       struct ip_set_iptreemap_c *ctree;
5771 +       unsigned int i;
5772 +
5773 +       LOOP_WALK_BEGIN(map, i, ctree) {
5774 +               if (ctree != fullbitmap_c)
5775 +                       free_c(ctree);
5776 +       } LOOP_WALK_END();
5777 +
5778 +       kmem_cache_free(cachep_b, map);
5779 +}
5780 +
5781 +static inline int
5782 +iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
5783 +{
5784 +       struct ip_set_iptreemap *map = set->data;
5785 +       struct ip_set_iptreemap_b *btree;
5786 +       struct ip_set_iptreemap_c *ctree;
5787 +       struct ip_set_iptreemap_d *dtree;
5788 +       unsigned char a, b, c, d;
5789 +
5790 +       *hash_ip = ip;
5791 +
5792 +       ABCD(a, b, c, d, hash_ip);
5793 +
5794 +       TESTIP_WALK(map, a, btree, fullbitmap_b);
5795 +       TESTIP_WALK(btree, b, ctree, fullbitmap_c);
5796 +       TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
5797 +
5798 +       return !!test_bit(d, (void *) dtree->bitmap);
5799 +}
5800 +
5801 +#define KADT_CONDITION
5802 +
5803 +UADT(iptreemap, test)
5804 +KADT(iptreemap, test, ipaddr)
5805 +
5806 +static inline int
5807 +__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
5808 +{
5809 +       struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5810 +       struct ip_set_iptreemap_b *btree;
5811 +       struct ip_set_iptreemap_c *ctree;
5812 +       struct ip_set_iptreemap_d *dtree;
5813 +       unsigned char a, b, c, d;
5814 +
5815 +       *hash_ip = ip;
5816 +
5817 +       ABCD(a, b, c, d, hash_ip);
5818 +
5819 +       ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
5820 +       ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
5821 +       ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
5822 +
5823 +       if (__test_and_set_bit(d, (void *) dtree->bitmap))
5824 +               return -EEXIST;
5825 +
5826 +       __set_bit(b, (void *) btree->dirty);
5827 +
5828 +       return 0;
5829 +}
5830 +
5831 +static inline int
5832 +iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
5833 +             ip_set_ip_t start, ip_set_ip_t end)
5834 +{
5835 +       struct ip_set_iptreemap *map = set->data;
5836 +       struct ip_set_iptreemap_b *btree;
5837 +       struct ip_set_iptreemap_c *ctree;
5838 +       struct ip_set_iptreemap_d *dtree;
5839 +       unsigned int a, b, c, d;
5840 +       unsigned char a1, b1, c1, d1;
5841 +       unsigned char a2, b2, c2, d2;
5842 +
5843 +       if (start == end)
5844 +               return __addip_single(set, hash_ip, start);
5845 +
5846 +       *hash_ip = start;
5847 +
5848 +       ABCD(a1, b1, c1, d1, &start);
5849 +       ABCD(a2, b2, c2, d2, &end);
5850 +
5851 +       /* This is sooo ugly... */
5852 +       ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
5853 +               ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
5854 +                       ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
5855 +                               for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5856 +                                       __set_bit(d, (void *) dtree->bitmap);
5857 +                               __set_bit(b, (void *) btree->dirty);
5858 +                       } ADDIP_RANGE_LOOP_END();
5859 +               } ADDIP_RANGE_LOOP_END();
5860 +       } ADDIP_RANGE_LOOP_END();
5861 +
5862 +       return 0;
5863 +}
5864 +
5865 +UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
5866 +KADT(iptreemap, add, ipaddr, ip)
5867 +
5868 +static inline int
5869 +__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
5870 +              ip_set_ip_t ip, gfp_t flags)
5871 +{
5872 +       struct ip_set_iptreemap *map = set->data;
5873 +       struct ip_set_iptreemap_b *btree;
5874 +       struct ip_set_iptreemap_c *ctree;
5875 +       struct ip_set_iptreemap_d *dtree;
5876 +       unsigned char a,b,c,d;
5877 +
5878 +       *hash_ip = ip;
5879 +
5880 +       ABCD(a, b, c, d, hash_ip);
5881 +
5882 +       DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
5883 +       DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
5884 +       DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
5885 +
5886 +       if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
5887 +               return -EEXIST;
5888 +
5889 +       __set_bit(b, (void *) btree->dirty);
5890 +
5891 +       return 0;
5892 +}
5893 +
5894 +static inline int
5895 +iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
5896 +             ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
5897 +{
5898 +       struct ip_set_iptreemap *map = set->data;
5899 +       struct ip_set_iptreemap_b *btree;
5900 +       struct ip_set_iptreemap_c *ctree;
5901 +       struct ip_set_iptreemap_d *dtree;
5902 +       unsigned int a, b, c, d;
5903 +       unsigned char a1, b1, c1, d1;
5904 +       unsigned char a2, b2, c2, d2;
5905 +
5906 +       if (start == end)
5907 +               return __delip_single(set, hash_ip, start, flags);
5908 +
5909 +       *hash_ip = start;
5910 +
5911 +       ABCD(a1, b1, c1, d1, &start);
5912 +       ABCD(a2, b2, c2, d2, &end);
5913 +
5914 +       /* This is sooo ugly... */
5915 +       DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
5916 +               DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
5917 +                       DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
5918 +                               for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5919 +                                       __clear_bit(d, (void *) dtree->bitmap);
5920 +                               __set_bit(b, (void *) btree->dirty);
5921 +                       } DELIP_RANGE_LOOP_END();
5922 +               } DELIP_RANGE_LOOP_END();
5923 +       } DELIP_RANGE_LOOP_END();
5924 +
5925 +       return 0;
5926 +}
5927 +
5928 +UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
5929 +KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
5930 +
5931 +/* Check the status of the bitmap
5932 + * -1 == all bits cleared
5933 + *  1 == all bits set
5934 + *  0 == anything else
5935 + */
5936 +static inline int
5937 +bitmap_status(struct ip_set_iptreemap_d *dtree)
5938 +{
5939 +       unsigned char first = dtree->bitmap[0];
5940 +       int a;
5941 +
5942 +       for (a = 1; a < 32; a++)
5943 +               if (dtree->bitmap[a] != first)
5944 +                       return 0;
5945 +
5946 +       return (first == 0 ? -1 : (first == 255 ? 1 : 0));
5947 +}
5948 +
5949 +static void
5950 +gc(unsigned long addr)
5951 +{
5952 +       struct ip_set *set = (struct ip_set *) addr;
5953 +       struct ip_set_iptreemap *map = set->data;
5954 +       struct ip_set_iptreemap_b *btree;
5955 +       struct ip_set_iptreemap_c *ctree;
5956 +       struct ip_set_iptreemap_d *dtree;
5957 +       unsigned int a, b, c;
5958 +       int i, j, k;
5959 +
5960 +       write_lock_bh(&set->lock);
5961 +
5962 +       LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
5963 +               LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
5964 +                       if (!__test_and_clear_bit(b, (void *) btree->dirty))
5965 +                               continue;
5966 +                       LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
5967 +                               switch (bitmap_status(dtree)) {
5968 +                                       case -1:
5969 +                                               kmem_cache_free(cachep_d, dtree);
5970 +                                               ctree->tree[c] = NULL;
5971 +                                               k--;
5972 +                                       break;
5973 +                                       case 1:
5974 +                                               kmem_cache_free(cachep_d, dtree);
5975 +                                               ctree->tree[c] = fullbitmap_d;
5976 +                                               k++;
5977 +                                       break;
5978 +                               }
5979 +                       } LOOP_WALK_END();
5980 +               } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
5981 +       } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
5982 +
5983 +       write_unlock_bh(&set->lock);
5984 +
5985 +       map->gc.expires = jiffies + map->gc_interval * HZ;
5986 +       add_timer(&map->gc);
5987 +}
5988 +
5989 +static inline void
5990 +init_gc_timer(struct ip_set *set)
5991 +{
5992 +       struct ip_set_iptreemap *map = set->data;
5993 +
5994 +       init_timer(&map->gc);
5995 +       map->gc.data = (unsigned long) set;
5996 +       map->gc.function = gc;
5997 +       map->gc.expires = jiffies + map->gc_interval * HZ;
5998 +       add_timer(&map->gc);
5999 +}
6000 +
6001 +static int
6002 +iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
6003 +{
6004 +       const struct ip_set_req_iptreemap_create *req = data;
6005 +       struct ip_set_iptreemap *map;
6006 +
6007 +       map = kzalloc(sizeof(*map), GFP_KERNEL);
6008 +       if (!map)
6009 +               return -ENOMEM;
6010 +
6011 +       map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
6012 +       set->data = map;
6013 +
6014 +       init_gc_timer(set);
6015 +
6016 +       return 0;
6017 +}
6018 +
6019 +static inline void
6020 +__flush(struct ip_set_iptreemap *map)
6021 +{
6022 +       struct ip_set_iptreemap_b *btree;
6023 +       unsigned int a;
6024 +
6025 +       LOOP_WALK_BEGIN(map, a, btree);
6026 +               if (btree != fullbitmap_b)
6027 +                       free_b(btree);
6028 +       LOOP_WALK_END();
6029 +}
6030 +
6031 +static void
6032 +iptreemap_destroy(struct ip_set *set)
6033 +{
6034 +       struct ip_set_iptreemap *map = set->data;
6035 +
6036 +       while (!del_timer(&map->gc))
6037 +               msleep(IPTREEMAP_DESTROY_SLEEP);
6038 +
6039 +       __flush(map);
6040 +       kfree(map);
6041 +
6042 +       set->data = NULL;
6043 +}
6044 +
6045 +static void
6046 +iptreemap_flush(struct ip_set *set)
6047 +{
6048 +       struct ip_set_iptreemap *map = set->data;
6049 +
6050 +       while (!del_timer(&map->gc))
6051 +               msleep(IPTREEMAP_DESTROY_SLEEP);
6052 +
6053 +       __flush(map);
6054 +
6055 +       memset(map, 0, sizeof(*map));
6056 +
6057 +       init_gc_timer(set);
6058 +}
6059 +
6060 +static void
6061 +iptreemap_list_header(const struct ip_set *set, void *data)
6062 +{
6063 +       struct ip_set_iptreemap *map = set->data;
6064 +       struct ip_set_req_iptreemap_create *header = data;
6065 +
6066 +       header->gc_interval = map->gc_interval;
6067 +}
6068 +
6069 +static int
6070 +iptreemap_list_members_size(const struct ip_set *set)
6071 +{
6072 +       struct ip_set_iptreemap *map = set->data;
6073 +       struct ip_set_iptreemap_b *btree;
6074 +       struct ip_set_iptreemap_c *ctree;
6075 +       struct ip_set_iptreemap_d *dtree;
6076 +       unsigned int a, b, c, d, inrange = 0, count = 0;
6077 +
6078 +       LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
6079 +               LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
6080 +                       LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
6081 +                               for (d = 0; d < 256; d++) {
6082 +                                       if (test_bit(d, (void *) dtree->bitmap)) {
6083 +                                               inrange = 1;
6084 +                                       } else if (inrange) {
6085 +                                               count++;
6086 +                                               inrange = 0;
6087 +                                       }
6088 +                               }
6089 +                       } LOOP_WALK_END_COUNT();
6090 +               } LOOP_WALK_END_COUNT();
6091 +       } LOOP_WALK_END_COUNT();
6092 +
6093 +       if (inrange)
6094 +               count++;
6095 +
6096 +       return (count * sizeof(struct ip_set_req_iptreemap));
6097 +}
6098 +
6099 +static inline u_int32_t
6100 +add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
6101 +{
6102 +       struct ip_set_req_iptreemap *entry = data + offset;
6103 +
6104 +       entry->ip = start;
6105 +       entry->end = end;
6106 +
6107 +       return sizeof(*entry);
6108 +}
6109 +
6110 +static void
6111 +iptreemap_list_members(const struct ip_set *set, void *data)
6112 +{
6113 +       struct ip_set_iptreemap *map = set->data;
6114 +       struct ip_set_iptreemap_b *btree;
6115 +       struct ip_set_iptreemap_c *ctree;
6116 +       struct ip_set_iptreemap_d *dtree;
6117 +       unsigned int a, b, c, d, inrange = 0;
6118 +       size_t offset = 0;
6119 +       ip_set_ip_t start = 0, end = 0, ip;
6120 +
6121 +       LOOP_WALK_BEGIN(map, a, btree) {
6122 +               LOOP_WALK_BEGIN(btree, b, ctree) {
6123 +                       LOOP_WALK_BEGIN(ctree, c, dtree) {
6124 +                               for (d = 0; d < 256; d++) {
6125 +                                       if (test_bit(d, (void *) dtree->bitmap)) {
6126 +                                               ip = ((a << 24) | (b << 16) | (c << 8) | d);
6127 +                                               if (!inrange) {
6128 +                                                       inrange = 1;
6129 +                                                       start = ip;
6130 +                                               } else if (end < ip - 1) {
6131 +                                                       offset += add_member(data, offset, start, end);
6132 +                                                       start = ip;
6133 +                                               }
6134 +                                               end = ip;
6135 +                                       } else if (inrange) {
6136 +                                               offset += add_member(data, offset, start, end);
6137 +                                               inrange = 0;
6138 +                                       }
6139 +                               }
6140 +                       } LOOP_WALK_END();
6141 +               } LOOP_WALK_END();
6142 +       } LOOP_WALK_END();
6143 +
6144 +       if (inrange)
6145 +               add_member(data, offset, start, end);
6146 +}
6147 +
6148 +IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
6149 +
6150 +MODULE_LICENSE("GPL");
6151 +MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
6152 +MODULE_DESCRIPTION("iptreemap type of IP sets");
6153 +
6154 +static int __init ip_set_iptreemap_init(void)
6155 +{
6156 +       int ret = -ENOMEM;
6157 +       int a;
6158 +
6159 +       cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
6160 +                                    sizeof(struct ip_set_iptreemap_b));
6161 +       if (!cachep_b) {
6162 +               ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
6163 +               goto out;
6164 +       }
6165 +
6166 +       cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
6167 +                                    sizeof(struct ip_set_iptreemap_c));
6168 +       if (!cachep_c) {
6169 +               ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
6170 +               goto outb;
6171 +       }
6172 +
6173 +       cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
6174 +                                    sizeof(struct ip_set_iptreemap_d));
6175 +       if (!cachep_d) {
6176 +               ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
6177 +               goto outc;
6178 +       }
6179 +
6180 +       fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
6181 +       if (!fullbitmap_d)
6182 +               goto outd;
6183 +
6184 +       fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
6185 +       if (!fullbitmap_c)
6186 +               goto outbitmapd;
6187 +
6188 +       fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
6189 +       if (!fullbitmap_b)
6190 +               goto outbitmapc;
6191 +
6192 +       ret = ip_set_register_set_type(&ip_set_iptreemap);
6193 +       if (0 > ret)
6194 +               goto outbitmapb;
6195 +
6196 +       /* Now init our global bitmaps */
6197 +       memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
6198 +
6199 +       for (a = 0; a < 256; a++)
6200 +               fullbitmap_c->tree[a] = fullbitmap_d;
6201 +
6202 +       for (a = 0; a < 256; a++)
6203 +               fullbitmap_b->tree[a] = fullbitmap_c;
6204 +       memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
6205 +
6206 +       return 0;
6207 +
6208 +outbitmapb:
6209 +       kmem_cache_free(cachep_b, fullbitmap_b);
6210 +outbitmapc:
6211 +       kmem_cache_free(cachep_c, fullbitmap_c);
6212 +outbitmapd:
6213 +       kmem_cache_free(cachep_d, fullbitmap_d);
6214 +outd:
6215 +       kmem_cache_destroy(cachep_d);
6216 +outc:
6217 +       kmem_cache_destroy(cachep_c);
6218 +outb:
6219 +       kmem_cache_destroy(cachep_b);
6220 +out:
6221 +
6222 +       return ret;
6223 +}
6224 +
6225 +static void __exit ip_set_iptreemap_fini(void)
6226 +{
6227 +       ip_set_unregister_set_type(&ip_set_iptreemap);
6228 +       kmem_cache_free(cachep_d, fullbitmap_d);
6229 +       kmem_cache_free(cachep_c, fullbitmap_c);
6230 +       kmem_cache_free(cachep_b, fullbitmap_b);
6231 +       kmem_cache_destroy(cachep_d);
6232 +       kmem_cache_destroy(cachep_c);
6233 +       kmem_cache_destroy(cachep_b);
6234 +}
6235 +
6236 +module_init(ip_set_iptreemap_init);
6237 +module_exit(ip_set_iptreemap_fini);
6238 --- /dev/null
6239 +++ b/net/ipv4/netfilter/ip_set_macipmap.c
6240 @@ -0,0 +1,164 @@
6241 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
6242 + *                         Patrick Schaaf <bof@bof.de>
6243 + *                         Martin Josefsson <gandalf@wlug.westbo.se>
6244 + * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6245 + *
6246 + * This program is free software; you can redistribute it and/or modify
6247 + * it under the terms of the GNU General Public License version 2 as
6248 + * published by the Free Software Foundation.
6249 + */
6250 +
6251 +/* Kernel module implementing an IP set type: the macipmap type */
6252 +
6253 +#include <linux/module.h>
6254 +#include <linux/ip.h>
6255 +#include <linux/skbuff.h>
6256 +#include <linux/errno.h>
6257 +#include <asm/uaccess.h>
6258 +#include <asm/bitops.h>
6259 +#include <linux/spinlock.h>
6260 +#include <linux/if_ether.h>
6261 +
6262 +#include <linux/netfilter_ipv4/ip_set_macipmap.h>
6263 +
6264 +static int
6265 +macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
6266 +              ip_set_ip_t *hash_ip)
6267 +{
6268 +       const struct ip_set_macipmap *map = set->data;
6269 +       const struct ip_set_macip *table = map->members;        
6270 +       const struct ip_set_req_macipmap *req = data;
6271 +
6272 +       if (req->ip < map->first_ip || req->ip > map->last_ip)
6273 +               return -ERANGE;
6274 +
6275 +       *hash_ip = req->ip;
6276 +       DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6277 +          set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));             
6278 +       if (table[req->ip - map->first_ip].match) {
6279 +               return (memcmp(req->ethernet,
6280 +                              &table[req->ip - map->first_ip].ethernet,
6281 +                              ETH_ALEN) == 0);
6282 +       } else {
6283 +               return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6284 +       }
6285 +}
6286 +
6287 +static int
6288 +macipmap_ktest(struct ip_set *set,
6289 +              const struct sk_buff *skb,
6290 +              ip_set_ip_t *hash_ip,
6291 +              const u_int32_t *flags,
6292 +              unsigned char index)
6293 +{
6294 +       const struct ip_set_macipmap *map = set->data;
6295 +       const struct ip_set_macip *table = map->members;
6296 +       ip_set_ip_t ip;
6297 +       
6298 +       ip = ipaddr(skb, flags[index]);
6299 +
6300 +       if (ip < map->first_ip || ip > map->last_ip)
6301 +               return 0;
6302 +
6303 +       *hash_ip = ip;  
6304 +       DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6305 +          set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));          
6306 +       if (table[ip - map->first_ip].match) {
6307 +               /* Is mac pointer valid?
6308 +                * If so, compare... */
6309 +               return (skb_mac_header(skb) >= skb->head
6310 +                       && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
6311 +                       && (memcmp(eth_hdr(skb)->h_source,
6312 +                                  &table[ip - map->first_ip].ethernet,
6313 +                                  ETH_ALEN) == 0));
6314 +       } else {
6315 +               return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6316 +       }
6317 +}
6318 +
6319 +/* returns 0 on success */
6320 +static inline int
6321 +macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
6322 +            ip_set_ip_t ip, const unsigned char *ethernet)
6323 +{
6324 +       struct ip_set_macipmap *map = set->data;
6325 +       struct ip_set_macip *table = map->members;
6326 +
6327 +       if (ip < map->first_ip || ip > map->last_ip)
6328 +               return -ERANGE;
6329 +       if (table[ip - map->first_ip].match)
6330 +               return -EEXIST;
6331 +
6332 +       *hash_ip = ip;
6333 +       DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6334 +       memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
6335 +       table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
6336 +       return 0;
6337 +}
6338 +
6339 +#define KADT_CONDITION                                         \
6340 +       if (!(skb_mac_header(skb) >= skb->head                  \
6341 +             && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
6342 +               return -EINVAL;
6343 +
6344 +UADT(macipmap, add, req->ethernet)
6345 +KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
6346 +
6347 +static inline int
6348 +macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
6349 +{
6350 +       struct ip_set_macipmap *map = set->data;
6351 +       struct ip_set_macip *table = map->members;
6352 +
6353 +       if (ip < map->first_ip || ip > map->last_ip)
6354 +               return -ERANGE;
6355 +       if (!table[ip - map->first_ip].match)
6356 +               return -EEXIST;
6357 +
6358 +       *hash_ip = ip;
6359 +       table[ip - map->first_ip].match = 0;
6360 +       DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6361 +       return 0;
6362 +}
6363 +
6364 +#undef KADT_CONDITION
6365 +#define KADT_CONDITION
6366 +
6367 +UADT(macipmap, del)
6368 +KADT(macipmap, del, ipaddr)
6369 +
6370 +static inline int
6371 +__macipmap_create(const struct ip_set_req_macipmap_create *req,
6372 +                 struct ip_set_macipmap *map)
6373 +{
6374 +       if (req->to - req->from > MAX_RANGE) {
6375 +               ip_set_printk("range too big, %d elements (max %d)",
6376 +                             req->to - req->from + 1, MAX_RANGE+1);
6377 +               return -ENOEXEC;
6378 +       }
6379 +       map->flags = req->flags;
6380 +       return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
6381 +}
6382 +
6383 +BITMAP_CREATE(macipmap)
6384 +BITMAP_DESTROY(macipmap)
6385 +BITMAP_FLUSH(macipmap)
6386 +
6387 +static inline void
6388 +__macipmap_list_header(const struct ip_set_macipmap *map,
6389 +                      struct ip_set_req_macipmap_create *header)
6390 +{
6391 +       header->flags = map->flags;
6392 +}
6393 +
6394 +BITMAP_LIST_HEADER(macipmap)
6395 +BITMAP_LIST_MEMBERS_SIZE(macipmap)
6396 +BITMAP_LIST_MEMBERS(macipmap)
6397 +
6398 +IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
6399 +
6400 +MODULE_LICENSE("GPL");
6401 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6402 +MODULE_DESCRIPTION("macipmap type of IP sets");
6403 +
6404 +REGISTER_MODULE(macipmap)
6405 --- /dev/null
6406 +++ b/net/ipv4/netfilter/ip_set_nethash.c
6407 @@ -0,0 +1,225 @@
6408 +/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6409 + *
6410 + * This program is free software; you can redistribute it and/or modify
6411 + * it under the terms of the GNU General Public License version 2 as
6412 + * published by the Free Software Foundation.
6413 + */
6414 +
6415 +/* Kernel module implementing a cidr nethash set */
6416 +
6417 +#include <linux/module.h>
6418 +#include <linux/moduleparam.h>
6419 +#include <linux/ip.h>
6420 +#include <linux/skbuff.h>
6421 +#include <linux/netfilter_ipv4/ip_set_jhash.h>
6422 +#include <linux/errno.h>
6423 +#include <asm/uaccess.h>
6424 +#include <asm/bitops.h>
6425 +#include <linux/spinlock.h>
6426 +#include <linux/random.h>
6427 +
6428 +#include <net/ip.h>
6429 +
6430 +#include <linux/netfilter_ipv4/ip_set_nethash.h>
6431 +
6432 +static int limit = MAX_RANGE;
6433 +
6434 +static inline __u32
6435 +nethash_id_cidr(const struct ip_set_nethash *map,
6436 +               ip_set_ip_t *hash_ip,
6437 +               ip_set_ip_t ip,
6438 +               uint8_t cidr)
6439 +{
6440 +       __u32 id;
6441 +       u_int16_t i;
6442 +       ip_set_ip_t *elem;
6443 +
6444 +       *hash_ip = pack_ip_cidr(ip, cidr);
6445 +       if (!*hash_ip)
6446 +               return MAX_RANGE;
6447 +       
6448 +       for (i = 0; i < map->probes; i++) {
6449 +               id = jhash_ip(map, i, *hash_ip) % map->hashsize;
6450 +               DP("hash key: %u", id);
6451 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6452 +               if (*elem == *hash_ip)
6453 +                       return id;
6454 +               /* No shortcut - there can be deleted entries. */
6455 +       }
6456 +       return UINT_MAX;
6457 +}
6458 +
6459 +static inline __u32
6460 +nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
6461 +{
6462 +       const struct ip_set_nethash *map = set->data;
6463 +       __u32 id = UINT_MAX;
6464 +       int i;
6465 +
6466 +       for (i = 0; i < 30 && map->cidr[i]; i++) {
6467 +               id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
6468 +               if (id != UINT_MAX)
6469 +                       break;
6470 +       }
6471 +       return id;
6472 +}
6473 +
6474 +static inline int
6475 +nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
6476 +                 ip_set_ip_t ip, uint8_t cidr)
6477 +{
6478 +       const struct ip_set_nethash *map = set->data;
6479 +
6480 +       return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
6481 +}
6482 +
6483 +static inline int
6484 +nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
6485 +{
6486 +       return (nethash_id(set, hash_ip, ip) != UINT_MAX);
6487 +}
6488 +
6489 +static int
6490 +nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
6491 +             ip_set_ip_t *hash_ip)
6492 +{
6493 +       const struct ip_set_req_nethash *req = data;
6494 +
6495 +       if (req->cidr <= 0 || req->cidr > 32)
6496 +               return -EINVAL;
6497 +       return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
6498 +               : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
6499 +}
6500 +
6501 +#define KADT_CONDITION
6502 +
6503 +KADT(nethash, test, ipaddr)
6504 +
6505 +static inline int
6506 +__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
6507 +{
6508 +       __u32 probe;
6509 +       u_int16_t i;
6510 +       ip_set_ip_t *elem, *slot = NULL;
6511 +       
6512 +       for (i = 0; i < map->probes; i++) {
6513 +               probe = jhash_ip(map, i, *ip) % map->hashsize;
6514 +               elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
6515 +               if (*elem == *ip)
6516 +                       return -EEXIST;
6517 +               if (!(slot || *elem))
6518 +                       slot = elem;
6519 +               /* There can be deleted entries, must check all slots */
6520 +       }
6521 +       if (slot) {
6522 +               *slot = *ip;
6523 +               map->elements++;
6524 +               return 0;
6525 +       }
6526 +       /* Trigger rehashing */
6527 +       return -EAGAIN;
6528 +}
6529 +
6530 +static inline int
6531 +nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
6532 +           ip_set_ip_t ip, uint8_t cidr)
6533 +{
6534 +       struct ip_set_nethash *map = set->data;
6535 +       int ret;
6536 +       
6537 +       if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
6538 +               return -ERANGE; 
6539 +       if (cidr <= 0 || cidr >= 32)
6540 +               return -EINVAL;
6541 +
6542 +       *hash_ip = pack_ip_cidr(ip, cidr);
6543 +       DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
6544 +       if (!*hash_ip)
6545 +               return -ERANGE;
6546 +       
6547 +       ret = __nethash_add(map, hash_ip);
6548 +       if (ret == 0) {
6549 +               if (!map->nets[cidr-1]++)
6550 +                       add_cidr_size(map->cidr, cidr);
6551 +               map->elements++;
6552 +       }
6553 +       
6554 +       return ret;
6555 +}
6556 +
6557 +#undef KADT_CONDITION
6558 +#define KADT_CONDITION                                                 \
6559 +       struct ip_set_nethash *map = set->data;                         \
6560 +       uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
6561 +
6562 +UADT(nethash, add, req->cidr)
6563 +KADT(nethash, add, ipaddr, cidr)
6564 +
6565 +static inline void
6566 +__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
6567 +{
6568 +       memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
6569 +       memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
6570 +}
6571 +
6572 +HASH_RETRY(nethash, ip_set_ip_t)
6573 +
6574 +static inline int
6575 +nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
6576 +           ip_set_ip_t ip, uint8_t cidr)
6577 +{
6578 +       struct ip_set_nethash *map = set->data;
6579 +       ip_set_ip_t id, *elem;
6580 +
6581 +       if (cidr <= 0 || cidr >= 32)
6582 +               return -EINVAL; 
6583 +       
6584 +       id = nethash_id_cidr(map, hash_ip, ip, cidr);
6585 +       if (id == UINT_MAX)
6586 +               return -EEXIST;
6587 +               
6588 +       elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6589 +       *elem = 0;
6590 +       map->elements--;
6591 +       if (!map->nets[cidr-1]--)
6592 +               del_cidr_size(map->cidr, cidr);
6593 +       return 0;
6594 +}
6595 +
6596 +UADT(nethash, del, req->cidr)
6597 +KADT(nethash, del, ipaddr, cidr)
6598 +
6599 +static inline int
6600 +__nethash_create(const struct ip_set_req_nethash_create *req,
6601 +                struct ip_set_nethash *map)
6602 +{
6603 +       memset(map->cidr, 0, sizeof(map->cidr));
6604 +       memset(map->nets, 0, sizeof(map->nets));
6605 +       
6606 +       return 0;
6607 +}
6608 +
6609 +HASH_CREATE(nethash, ip_set_ip_t)
6610 +HASH_DESTROY(nethash)
6611 +
6612 +HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
6613 +
6614 +static inline void
6615 +__nethash_list_header(const struct ip_set_nethash *map,
6616 +                     struct ip_set_req_nethash_create *header)
6617 +{    
6618 +}
6619 +
6620 +HASH_LIST_HEADER(nethash)
6621 +HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
6622 +HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
6623 +
6624 +IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
6625 +
6626 +MODULE_LICENSE("GPL");
6627 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6628 +MODULE_DESCRIPTION("nethash type of IP sets");
6629 +module_param(limit, int, 0600);
6630 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
6631 +
6632 +REGISTER_MODULE(nethash)
6633 --- /dev/null
6634 +++ b/net/ipv4/netfilter/ip_set_portmap.c
6635 @@ -0,0 +1,114 @@
6636 +/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6637 + *
6638 + * This program is free software; you can redistribute it and/or modify
6639 + * it under the terms of the GNU General Public License version 2 as
6640 + * published by the Free Software Foundation.
6641 + */
6642 +
6643 +/* Kernel module implementing a port set type as a bitmap */
6644 +
6645 +#include <linux/module.h>
6646 +#include <linux/ip.h>
6647 +#include <linux/tcp.h>
6648 +#include <linux/udp.h>
6649 +#include <linux/skbuff.h>
6650 +#include <linux/errno.h>
6651 +#include <asm/uaccess.h>
6652 +#include <asm/bitops.h>
6653 +#include <linux/spinlock.h>
6654 +
6655 +#include <net/ip.h>
6656 +
6657 +#include <linux/netfilter_ipv4/ip_set_portmap.h>
6658 +#include <linux/netfilter_ipv4/ip_set_getport.h>
6659 +
6660 +static inline int
6661 +portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
6662 +            ip_set_ip_t port)
6663 +{
6664 +       const struct ip_set_portmap *map = set->data;
6665 +
6666 +       if (port < map->first_ip || port > map->last_ip)
6667 +               return -ERANGE;
6668 +               
6669 +       *hash_port = port;
6670 +       DP("set: %s, port:%u, %u", set->name, port, *hash_port);
6671 +       return !!test_bit(port - map->first_ip, map->members);
6672 +}
6673 +
6674 +#define KADT_CONDITION                 \
6675 +       if (ip == INVALID_PORT)         \
6676 +               return 0;       
6677 +
6678 +UADT(portmap, test)
6679 +KADT(portmap, test, get_port)
6680 +
6681 +static inline int
6682 +portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
6683 +{
6684 +       struct ip_set_portmap *map = set->data;
6685 +
6686 +       if (port < map->first_ip || port > map->last_ip)
6687 +               return -ERANGE;
6688 +       if (test_and_set_bit(port - map->first_ip, map->members))
6689 +               return -EEXIST;
6690 +               
6691 +       *hash_port = port;
6692 +       DP("port %u", port);
6693 +       return 0;
6694 +}
6695 +
6696 +UADT(portmap, add)
6697 +KADT(portmap, add, get_port)
6698 +
6699 +static inline int
6700 +portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
6701 +{
6702 +       struct ip_set_portmap *map = set->data;
6703 +
6704 +       if (port < map->first_ip || port > map->last_ip)
6705 +               return -ERANGE;
6706 +       if (!test_and_clear_bit(port - map->first_ip, map->members))
6707 +               return -EEXIST;
6708 +               
6709 +       *hash_port = port;
6710 +       DP("port %u", port);
6711 +       return 0;
6712 +}
6713 +
6714 +UADT(portmap, del)
6715 +KADT(portmap, del, get_port)
6716 +
6717 +static inline int
6718 +__portmap_create(const struct ip_set_req_portmap_create *req,
6719 +                struct ip_set_portmap *map)
6720 +{
6721 +       if (req->to - req->from > MAX_RANGE) {
6722 +               ip_set_printk("range too big, %d elements (max %d)",
6723 +                             req->to - req->from + 1, MAX_RANGE+1);
6724 +               return -ENOEXEC;
6725 +       }
6726 +       return bitmap_bytes(req->from, req->to);
6727 +}
6728 +
6729 +BITMAP_CREATE(portmap)
6730 +BITMAP_DESTROY(portmap)
6731 +BITMAP_FLUSH(portmap)
6732 +
6733 +static inline void
6734 +__portmap_list_header(const struct ip_set_portmap *map,
6735 +                     struct ip_set_req_portmap_create *header)
6736 +{
6737 +}
6738 +
6739 +BITMAP_LIST_HEADER(portmap)
6740 +BITMAP_LIST_MEMBERS_SIZE(portmap)
6741 +BITMAP_LIST_MEMBERS(portmap)
6742 +
6743 +IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
6744 +
6745 +MODULE_LICENSE("GPL");
6746 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6747 +MODULE_DESCRIPTION("portmap type of IP sets");
6748 +
6749 +REGISTER_MODULE(portmap)
6750 --- /dev/null
6751 +++ b/net/ipv4/netfilter/ip_set_setlist.c
6752 @@ -0,0 +1,330 @@
6753 +/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6754 + *
6755 + * This program is free software; you can redistribute it and/or modify
6756 + * it under the terms of the GNU General Public License version 2 as
6757 + * published by the Free Software Foundation.
6758 + */
6759 +
6760 +/* Kernel module implementing an IP set type: the setlist type */
6761 +
6762 +#include <linux/module.h>
6763 +#include <linux/ip.h>
6764 +#include <linux/skbuff.h>
6765 +#include <linux/errno.h>
6766 +
6767 +#include <linux/netfilter_ipv4/ip_set.h>
6768 +#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
6769 +#include <linux/netfilter_ipv4/ip_set_setlist.h>
6770 +
6771 +/*
6772 + * before ==> index, ref
6773 + * after  ==> ref, index
6774 + */
6775 +
6776 +static inline int
6777 +next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
6778 +{
6779 +       return i < map->size && map->index[i] == index;
6780 +}
6781 +
6782 +static int
6783 +setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
6784 +              ip_set_ip_t *hash_ip)
6785 +{
6786 +       const struct ip_set_setlist *map = set->data;
6787 +       const struct ip_set_req_setlist *req = data;
6788 +       ip_set_id_t index, ref = IP_SET_INVALID_ID;
6789 +       int i, res = 0;
6790 +       struct ip_set *s;
6791 +       
6792 +       if (req->before && req->ref[0] == '\0')
6793 +               return 0;
6794 +
6795 +       index = __ip_set_get_byname(req->name, &s);
6796 +       if (index == IP_SET_INVALID_ID)
6797 +               return 0;
6798 +       if (req->ref[0] != '\0') {
6799 +               ref = __ip_set_get_byname(req->ref, &s);
6800 +               if (ref == IP_SET_INVALID_ID)
6801 +                       goto finish;
6802 +       }
6803 +       for (i = 0; i < map->size
6804 +                   && map->index[i] != IP_SET_INVALID_ID; i++) {
6805 +               if (req->before && map->index[i] == index) {
6806 +                       res = next_index_eq(map, i + 1, ref);
6807 +                       break;
6808 +               } else if (!req->before) {
6809 +                       if ((ref == IP_SET_INVALID_ID
6810 +                            && map->index[i] == index)
6811 +                           || (map->index[i] == ref
6812 +                               && next_index_eq(map, i + 1, index))) {
6813 +                               res = 1;
6814 +                               break;
6815 +                       }
6816 +               }
6817 +       }
6818 +       if (ref != IP_SET_INVALID_ID)
6819 +               __ip_set_put_byindex(ref);
6820 +finish:
6821 +       __ip_set_put_byindex(index);
6822 +       return res;
6823 +}
6824 +
6825 +static int
6826 +setlist_ktest(struct ip_set *set,
6827 +              const struct sk_buff *skb,
6828 +              ip_set_ip_t *hash_ip,
6829 +              const u_int32_t *flags,
6830 +              unsigned char index)
6831 +{
6832 +       struct ip_set_setlist *map = set->data;
6833 +       int i, res = 0;
6834 +       
6835 +       for (i = 0; i < map->size
6836 +                   && map->index[i] != IP_SET_INVALID_ID
6837 +                   && res == 0; i++)
6838 +               res = ip_set_testip_kernel(map->index[i], skb, flags);
6839 +       return res;
6840 +}
6841 +
6842 +static inline int
6843 +insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
6844 +{
6845 +       ip_set_id_t tmp;
6846 +       int j;
6847 +
6848 +       DP("i: %u, last %u\n", i, map->index[map->size - 1]);   
6849 +       if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
6850 +               return -ERANGE;
6851 +       
6852 +       for (j = i; j < map->size
6853 +                   && index != IP_SET_INVALID_ID; j++) {
6854 +               tmp = map->index[j];
6855 +               map->index[j] = index;
6856 +               index = tmp;
6857 +       }
6858 +       return 0;
6859 +}
6860 +
6861 +static int
6862 +setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
6863 +            ip_set_ip_t *hash_ip)
6864 +{
6865 +       struct ip_set_setlist *map = set->data;
6866 +       const struct ip_set_req_setlist *req = data;
6867 +       ip_set_id_t index, ref = IP_SET_INVALID_ID;
6868 +       int i, res = -ERANGE;
6869 +       struct ip_set *s;
6870 +       
6871 +       if (req->before && req->ref[0] == '\0')
6872 +               return -EINVAL;
6873 +
6874 +       index = __ip_set_get_byname(req->name, &s);
6875 +       if (index == IP_SET_INVALID_ID)
6876 +               return -EEXIST;
6877 +       /* "Loop detection" */
6878 +       if (strcmp(s->type->typename, "setlist") == 0)
6879 +               goto finish;
6880 +
6881 +       if (req->ref[0] != '\0') {
6882 +               ref = __ip_set_get_byname(req->ref, &s);
6883 +               if (ref == IP_SET_INVALID_ID) {
6884 +                       res = -EEXIST;
6885 +                       goto finish;
6886 +               }
6887 +       }
6888 +       for (i = 0; i < map->size; i++) {
6889 +               if (map->index[i] != ref)
6890 +                       continue;
6891 +               if (req->before) 
6892 +                       res = insert_setlist(map, i, index);
6893 +               else
6894 +                       res = insert_setlist(map,
6895 +                               ref == IP_SET_INVALID_ID ? i : i + 1,
6896 +                               index);
6897 +               break;
6898 +       }
6899 +       if (ref != IP_SET_INVALID_ID)
6900 +               __ip_set_put_byindex(ref);
6901 +       /* In case of success, we keep the reference to the set */
6902 +finish:
6903 +       if (res != 0)
6904 +               __ip_set_put_byindex(index);
6905 +       return res;
6906 +}
6907 +
6908 +static int
6909 +setlist_kadd(struct ip_set *set,
6910 +            const struct sk_buff *skb,
6911 +            ip_set_ip_t *hash_ip,
6912 +            const u_int32_t *flags,
6913 +            unsigned char index)
6914 +{
6915 +       struct ip_set_setlist *map = set->data;
6916 +       int i, res = -EINVAL;
6917 +       
6918 +       for (i = 0; i < map->size
6919 +                   && map->index[i] != IP_SET_INVALID_ID
6920 +                   && res != 0; i++)
6921 +               res = ip_set_addip_kernel(map->index[i], skb, flags);
6922 +       return res;
6923 +}
6924 +
6925 +static inline int
6926 +unshift_setlist(struct ip_set_setlist *map, int i)
6927 +{
6928 +       int j;
6929 +       
6930 +       for (j = i; j < map->size - 1; j++)
6931 +               map->index[j] = map->index[j+1];
6932 +       map->index[map->size-1] = IP_SET_INVALID_ID;
6933 +       return 0;
6934 +}
6935 +
6936 +static int
6937 +setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
6938 +            ip_set_ip_t *hash_ip)
6939 +{
6940 +       struct ip_set_setlist *map = set->data;
6941 +       const struct ip_set_req_setlist *req = data;
6942 +       ip_set_id_t index, ref = IP_SET_INVALID_ID;
6943 +       int i, res = -EEXIST;
6944 +       struct ip_set *s;
6945 +       
6946 +       if (req->before && req->ref[0] == '\0')
6947 +               return -EINVAL;
6948 +
6949 +       index = __ip_set_get_byname(req->name, &s);
6950 +       if (index == IP_SET_INVALID_ID)
6951 +               return -EEXIST;
6952 +       if (req->ref[0] != '\0') {
6953 +               ref = __ip_set_get_byname(req->ref, &s);
6954 +               if (ref == IP_SET_INVALID_ID)
6955 +                       goto finish;
6956 +       }
6957 +       for (i = 0; i < map->size
6958 +                   && map->index[i] != IP_SET_INVALID_ID; i++) {
6959 +               if (req->before) {
6960 +                       if (map->index[i] == index
6961 +                           && next_index_eq(map, i + 1, ref)) {
6962 +                               res = unshift_setlist(map, i);
6963 +                               break;
6964 +                       }
6965 +               } else if (ref == IP_SET_INVALID_ID) {
6966 +                       if (map->index[i] == index) {
6967 +                               res = unshift_setlist(map, i);
6968 +                               break;
6969 +                       }
6970 +               } else if (map->index[i] == ref
6971 +                          && next_index_eq(map, i + 1, index)) {
6972 +                       res = unshift_setlist(map, i + 1);
6973 +                       break;
6974 +               }
6975 +       }
6976 +       if (ref != IP_SET_INVALID_ID)
6977 +               __ip_set_put_byindex(ref);
6978 +finish:
6979 +       __ip_set_put_byindex(index);
6980 +       /* In case of success, release the reference to the set */
6981 +       if (res == 0)
6982 +               __ip_set_put_byindex(index);
6983 +       return res;
6984 +}
6985 +
6986 +static int
6987 +setlist_kdel(struct ip_set *set,
6988 +            const struct sk_buff *skb,
6989 +            ip_set_ip_t *hash_ip,
6990 +            const u_int32_t *flags,
6991 +            unsigned char index)
6992 +{
6993 +       struct ip_set_setlist *map = set->data;
6994 +       int i, res = -EINVAL;
6995 +       
6996 +       for (i = 0; i < map->size
6997 +                   && map->index[i] != IP_SET_INVALID_ID
6998 +                   && res != 0; i++)
6999 +               res = ip_set_delip_kernel(map->index[i], skb, flags);
7000 +       return res;
7001 +}
7002 +
7003 +static int
7004 +setlist_create(struct ip_set *set, const void *data, u_int32_t size)
7005 +{
7006 +       struct ip_set_setlist *map;
7007 +       const struct ip_set_req_setlist_create *req = data;
7008 +       int i;
7009 +       
7010 +       map = kmalloc(sizeof(struct ip_set_setlist) +
7011 +                     req->size * sizeof(ip_set_id_t), GFP_KERNEL);
7012 +       if (!map)
7013 +               return -ENOMEM;
7014 +       map->size = req->size;
7015 +       for (i = 0; i < map->size; i++)
7016 +               map->index[i] = IP_SET_INVALID_ID;
7017 +       
7018 +       set->data = map;
7019 +       return 0;
7020 +}                        
7021 +
7022 +static void
7023 +setlist_destroy(struct ip_set *set)
7024 +{
7025 +       struct ip_set_setlist *map = set->data;
7026 +       int i;
7027 +       
7028 +       for (i = 0; i < map->size
7029 +                   && map->index[i] != IP_SET_INVALID_ID; i++)
7030 +               __ip_set_put_byindex(map->index[i]);
7031 +
7032 +       kfree(map);
7033 +       set->data = NULL;
7034 +}
7035 +
7036 +static void
7037 +setlist_flush(struct ip_set *set)
7038 +{
7039 +       struct ip_set_setlist *map = set->data;
7040 +       int i;
7041 +       
7042 +       for (i = 0; i < map->size
7043 +                   && map->index[i] != IP_SET_INVALID_ID; i++) {
7044 +               __ip_set_put_byindex(map->index[i]);
7045 +               map->index[i] = IP_SET_INVALID_ID;
7046 +       }
7047 +}
7048 +
7049 +static void
7050 +setlist_list_header(const struct ip_set *set, void *data)
7051 +{
7052 +       const struct ip_set_setlist *map = set->data;
7053 +       struct ip_set_req_setlist_create *header = data;
7054 +       
7055 +       header->size = map->size;
7056 +}
7057 +
7058 +static int
7059 +setlist_list_members_size(const struct ip_set *set)
7060 +{
7061 +       const struct ip_set_setlist *map = set->data;
7062 +       
7063 +       return map->size * sizeof(ip_set_id_t);
7064 +}
7065 +
7066 +static void
7067 +setlist_list_members(const struct ip_set *set, void *data)
7068 +{
7069 +       struct ip_set_setlist *map = set->data;
7070 +       int i;
7071 +       
7072 +       for (i = 0; i < map->size; i++)
7073 +               *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
7074 +}
7075 +
7076 +IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
7077 +
7078 +MODULE_LICENSE("GPL");
7079 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7080 +MODULE_DESCRIPTION("setlist type of IP sets");
7081 +
7082 +REGISTER_MODULE(setlist)
7083 --- /dev/null
7084 +++ b/net/ipv4/netfilter/ipt_set.c
7085 @@ -0,0 +1,238 @@
7086 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7087 + *                         Patrick Schaaf <bof@bof.de>
7088 + *                         Martin Josefsson <gandalf@wlug.westbo.se>
7089 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7090 + *
7091 + * This program is free software; you can redistribute it and/or modify
7092 + * it under the terms of the GNU General Public License version 2 as
7093 + * published by the Free Software Foundation.
7094 + */
7095 +
7096 +/* Kernel module to match an IP set. */
7097 +
7098 +#include <linux/module.h>
7099 +#include <linux/ip.h>
7100 +#include <linux/skbuff.h>
7101 +#include <linux/version.h>
7102 +
7103 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7104 +#include <linux/netfilter_ipv4/ip_tables.h>
7105 +#define xt_register_match      ipt_register_match
7106 +#define xt_unregister_match    ipt_unregister_match
7107 +#define xt_match               ipt_match
7108 +#else
7109 +#include <linux/netfilter/x_tables.h>
7110 +#endif
7111 +#include <linux/netfilter_ipv4/ip_set.h>
7112 +#include <linux/netfilter_ipv4/ipt_set.h>
7113 +
7114 +static inline int
7115 +match_set(const struct ipt_set_info *info,
7116 +         const struct sk_buff *skb,
7117 +         int inv)
7118 +{      
7119 +       if (ip_set_testip_kernel(info->index, skb, info->flags))
7120 +               inv = !inv;
7121 +       return inv;
7122 +}
7123 +
7124 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
7125 +static int
7126 +match(const struct sk_buff *skb,
7127 +      const struct net_device *in,
7128 +      const struct net_device *out,
7129 +      const void *matchinfo,
7130 +      int offset,
7131 +      const void *hdr,
7132 +      u_int16_t datalen,
7133 +      int *hotdrop) 
7134 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7135 +static int
7136 +match(const struct sk_buff *skb,
7137 +      const struct net_device *in,
7138 +      const struct net_device *out,
7139 +      const void *matchinfo,
7140 +      int offset,
7141 +      int *hotdrop) 
7142 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7143 +static int
7144 +match(const struct sk_buff *skb,
7145 +      const struct net_device *in,
7146 +      const struct net_device *out,
7147 +      const void *matchinfo,
7148 +      int offset,
7149 +      unsigned int protoff,
7150 +      int *hotdrop)
7151 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
7152 +static int
7153 +match(const struct sk_buff *skb,
7154 +      const struct net_device *in,
7155 +      const struct net_device *out,
7156 +      const struct xt_match *match,
7157 +      const void *matchinfo,
7158 +      int offset,
7159 +      unsigned int protoff,
7160 +      int *hotdrop)
7161 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7162 +static bool
7163 +match(const struct sk_buff *skb,
7164 +      const struct net_device *in,
7165 +      const struct net_device *out,
7166 +      const struct xt_match *match,
7167 +      const void *matchinfo,
7168 +      int offset, 
7169 +      unsigned int protoff, 
7170 +      bool *hotdrop)
7171 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7172 +static bool
7173 +match(const struct sk_buff *skb,
7174 +      const struct xt_match_param *par)
7175 +#endif
7176 +{
7177 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7178 +       const struct ipt_set_info_match *info = matchinfo;
7179 +#else
7180 +       const struct ipt_set_info_match *info = par->matchinfo;
7181 +#endif
7182 +               
7183 +       return match_set(&info->match_set,
7184 +                        skb,
7185 +                        info->match_set.flags[0] & IPSET_MATCH_INV);
7186 +}
7187 +
7188 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7189 +static int
7190 +checkentry(const char *tablename,
7191 +          const struct ipt_ip *ip,
7192 +          void *matchinfo,
7193 +          unsigned int matchsize,
7194 +          unsigned int hook_mask)
7195 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7196 +static int
7197 +checkentry(const char *tablename,
7198 +          const void *inf,
7199 +          void *matchinfo,
7200 +          unsigned int matchsize,
7201 +          unsigned int hook_mask)
7202 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7203 +static int
7204 +checkentry(const char *tablename,
7205 +          const void *inf,
7206 +          const struct xt_match *match,
7207 +          void *matchinfo,
7208 +          unsigned int matchsize,
7209 +          unsigned int hook_mask)
7210 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
7211 +static int
7212 +checkentry(const char *tablename,
7213 +          const void *inf,
7214 +          const struct xt_match *match,
7215 +          void *matchinfo,
7216 +          unsigned int hook_mask)
7217 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7218 +static bool
7219 +checkentry(const char *tablename,
7220 +          const void *inf,
7221 +          const struct xt_match *match,
7222 +          void *matchinfo,
7223 +          unsigned int hook_mask)
7224 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7225 +static bool
7226 +checkentry(const struct xt_mtchk_param *par)
7227 +#endif
7228 +{
7229 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7230 +       struct ipt_set_info_match *info = matchinfo;
7231 +#else
7232 +       struct ipt_set_info_match *info = par->matchinfo;
7233 +#endif
7234 +       ip_set_id_t index;
7235 +
7236 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7237 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7238 +               ip_set_printk("invalid matchsize %d", matchsize);
7239 +               return 0;
7240 +       }
7241 +#endif
7242 +
7243 +       index = ip_set_get_byindex(info->match_set.index);
7244 +               
7245 +       if (index == IP_SET_INVALID_ID) {
7246 +               ip_set_printk("Cannot find set indentified by id %u to match",
7247 +                             info->match_set.index);
7248 +               return 0;       /* error */
7249 +       }
7250 +       if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7251 +               ip_set_printk("That's nasty!");
7252 +               return 0;       /* error */
7253 +       }
7254 +
7255 +       return 1;
7256 +}
7257 +
7258 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7259 +static void destroy(void *matchinfo,
7260 +                   unsigned int matchsize)
7261 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7262 +static void destroy(const struct xt_match *match,
7263 +                   void *matchinfo,
7264 +                   unsigned int matchsize)
7265 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7266 +static void destroy(const struct xt_match *match,
7267 +                   void *matchinfo)
7268 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7269 +static void destroy(const struct xt_mtdtor_param *par)
7270 +#endif
7271 +{
7272 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7273 +       struct ipt_set_info_match *info = matchinfo;
7274 +#else
7275 +       struct ipt_set_info_match *info = par->matchinfo;
7276 +#endif
7277 +
7278 +
7279 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7280 +       if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7281 +               ip_set_printk("invalid matchsize %d", matchsize);
7282 +               return;
7283 +       }
7284 +#endif
7285 +       ip_set_put_byindex(info->match_set.index);
7286 +}
7287 +
7288 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7289 +static struct xt_match set_match = {
7290 +       .name           = "set",
7291 +       .match          = &match,
7292 +       .checkentry     = &checkentry,
7293 +       .destroy        = &destroy,
7294 +       .me             = THIS_MODULE
7295 +};
7296 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
7297 +static struct xt_match set_match = {
7298 +       .name           = "set",
7299 +       .family         = AF_INET,
7300 +       .match          = &match,
7301 +       .matchsize      = sizeof(struct ipt_set_info_match),
7302 +       .checkentry     = &checkentry,
7303 +       .destroy        = &destroy,
7304 +       .me             = THIS_MODULE
7305 +};
7306 +#endif
7307 +
7308 +MODULE_LICENSE("GPL");
7309 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7310 +MODULE_DESCRIPTION("iptables IP set match module");
7311 +
7312 +static int __init ipt_ipset_init(void)
7313 +{
7314 +       return xt_register_match(&set_match);
7315 +}
7316 +
7317 +static void __exit ipt_ipset_fini(void)
7318 +{
7319 +       xt_unregister_match(&set_match);
7320 +}
7321 +
7322 +module_init(ipt_ipset_init);
7323 +module_exit(ipt_ipset_fini);
7324 --- /dev/null
7325 +++ b/net/ipv4/netfilter/ipt_SET.c
7326 @@ -0,0 +1,242 @@
7327 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7328 + *                         Patrick Schaaf <bof@bof.de>
7329 + *                         Martin Josefsson <gandalf@wlug.westbo.se>
7330 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7331 + *
7332 + * This program is free software; you can redistribute it and/or modify
7333 + * it under the terms of the GNU General Public License version 2 as
7334 + * published by the Free Software Foundation.
7335 + */
7336 +
7337 +/* ipt_SET.c - netfilter target to manipulate IP sets */
7338 +
7339 +#include <linux/module.h>
7340 +#include <linux/ip.h>
7341 +#include <linux/skbuff.h>
7342 +#include <linux/version.h>
7343 +
7344 +#include <linux/netfilter_ipv4.h>
7345 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7346 +#include <linux/netfilter_ipv4/ip_tables.h>
7347 +#define xt_register_target     ipt_register_target
7348 +#define xt_unregister_target   ipt_unregister_target
7349 +#define xt_target              ipt_target
7350 +#define XT_CONTINUE            IPT_CONTINUE
7351 +#else
7352 +#include <linux/netfilter/x_tables.h>
7353 +#endif
7354 +#include <linux/netfilter_ipv4/ipt_set.h>
7355 +
7356 +static unsigned int
7357 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
7358 +target(struct sk_buff **pskb,
7359 +       unsigned int hooknum,
7360 +       const struct net_device *in,
7361 +       const struct net_device *out,
7362 +       const void *targinfo,
7363 +       void *userinfo)
7364 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7365 +target(struct sk_buff **pskb,
7366 +       const struct net_device *in,
7367 +       const struct net_device *out,
7368 +       unsigned int hooknum,
7369 +       const void *targinfo,
7370 +       void *userinfo)
7371 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7372 +target(struct sk_buff **pskb,
7373 +       const struct net_device *in,
7374 +       const struct net_device *out,
7375 +       unsigned int hooknum,
7376 +       const struct xt_target *target,
7377 +       const void *targinfo,
7378 +       void *userinfo)
7379 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
7380 +target(struct sk_buff **pskb,
7381 +       const struct net_device *in,
7382 +       const struct net_device *out,
7383 +       unsigned int hooknum,
7384 +       const struct xt_target *target,
7385 +       const void *targinfo)
7386 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7387 +target(struct sk_buff *skb,
7388 +       const struct net_device *in,
7389 +       const struct net_device *out,
7390 +       unsigned int hooknum,
7391 +       const struct xt_target *target,
7392 +       const void *targinfo)
7393 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7394 +target(struct sk_buff *skb,
7395 +       const struct xt_target_param *par)
7396 +#endif
7397 +{
7398 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7399 +       const struct ipt_set_info_target *info = targinfo;
7400 +#else
7401 +       const struct ipt_set_info_target *info = par->targinfo;
7402 +#endif
7403 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
7404 +       struct sk_buff *skb = *pskb;
7405 +#endif
7406 +
7407 +       
7408 +       if (info->add_set.index != IP_SET_INVALID_ID)
7409 +               ip_set_addip_kernel(info->add_set.index,
7410 +                                   skb,
7411 +                                   info->add_set.flags);
7412 +       if (info->del_set.index != IP_SET_INVALID_ID)
7413 +               ip_set_delip_kernel(info->del_set.index,
7414 +                                   skb,
7415 +                                   info->del_set.flags);
7416 +
7417 +       return XT_CONTINUE;
7418 +}
7419 +
7420 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
7421 +static int
7422 +checkentry(const char *tablename,
7423 +          const struct ipt_entry *e,
7424 +          void *targinfo,
7425 +          unsigned int targinfosize,
7426 +          unsigned int hook_mask)
7427 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7428 +static int
7429 +checkentry(const char *tablename,
7430 +          const void *e,
7431 +          void *targinfo,
7432 +          unsigned int targinfosize,
7433 +          unsigned int hook_mask)
7434 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7435 +static int
7436 +checkentry(const char *tablename,
7437 +          const void *e,
7438 +          const struct xt_target *target,
7439 +          void *targinfo,
7440 +          unsigned int targinfosize,
7441 +          unsigned int hook_mask)
7442 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
7443 +static int
7444 +checkentry(const char *tablename,
7445 +          const void *e,
7446 +          const struct xt_target *target,
7447 +          void *targinfo,
7448 +          unsigned int hook_mask)
7449 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7450 +static bool
7451 +checkentry(const char *tablename,
7452 +          const void *e,
7453 +          const struct xt_target *target,
7454 +          void *targinfo,
7455 +          unsigned int hook_mask)
7456 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7457 +static bool
7458 +checkentry(const struct xt_tgchk_param *par)
7459 +#endif
7460 +{
7461 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7462 +       const struct ipt_set_info_target *info = targinfo;
7463 +#else
7464 +       const struct ipt_set_info_target *info = par->targinfo;
7465 +#endif
7466 +       ip_set_id_t index;
7467 +
7468 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7469 +       if (targinfosize != IPT_ALIGN(sizeof(*info))) {
7470 +               DP("bad target info size %u", targinfosize);
7471 +               return 0;
7472 +       }
7473 +#endif
7474 +
7475 +       if (info->add_set.index != IP_SET_INVALID_ID) {
7476 +               index = ip_set_get_byindex(info->add_set.index);
7477 +               if (index == IP_SET_INVALID_ID) {
7478 +                       ip_set_printk("cannot find add_set index %u as target",
7479 +                                     info->add_set.index);
7480 +                       return 0;       /* error */
7481 +               }
7482 +       }
7483 +
7484 +       if (info->del_set.index != IP_SET_INVALID_ID) {
7485 +               index = ip_set_get_byindex(info->del_set.index);
7486 +               if (index == IP_SET_INVALID_ID) {
7487 +                       ip_set_printk("cannot find del_set index %u as target",
7488 +                                     info->del_set.index);
7489 +                       return 0;       /* error */
7490 +               }
7491 +       }
7492 +       if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
7493 +           || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7494 +               ip_set_printk("That's nasty!");
7495 +               return 0;       /* error */
7496 +       }
7497 +
7498 +       return 1;
7499 +}
7500 +
7501 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7502 +static void destroy(void *targetinfo,
7503 +                   unsigned int targetsize)
7504 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7505 +static void destroy(const struct xt_target *target,
7506 +                   void *targetinfo,
7507 +                   unsigned int targetsize)
7508 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7509 +static void destroy(const struct xt_target *target,
7510 +                   void *targetinfo)
7511 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
7512 +static void destroy(const struct xt_tgdtor_param *par)
7513 +#endif
7514 +{
7515 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
7516 +       const struct ipt_set_info_target *info = targetinfo;
7517 +#else
7518 +       const struct ipt_set_info_target *info = par->targinfo;
7519 +#endif
7520 +
7521 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7522 +       if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
7523 +               ip_set_printk("invalid targetsize %d", targetsize);
7524 +               return;
7525 +       }
7526 +#endif
7527 +       if (info->add_set.index != IP_SET_INVALID_ID)
7528 +               ip_set_put_byindex(info->add_set.index);
7529 +       if (info->del_set.index != IP_SET_INVALID_ID)
7530 +               ip_set_put_byindex(info->del_set.index);
7531 +}
7532 +
7533 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
7534 +static struct xt_target SET_target = {
7535 +       .name           = "SET",
7536 +       .target         = target,
7537 +       .checkentry     = checkentry,
7538 +       .destroy        = destroy,
7539 +       .me             = THIS_MODULE
7540 +};
7541 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
7542 +static struct xt_target SET_target = {
7543 +       .name           = "SET",
7544 +       .family         = AF_INET,
7545 +       .target         = target,
7546 +       .targetsize     = sizeof(struct ipt_set_info_target),
7547 +       .checkentry     = checkentry,
7548 +       .destroy        = destroy,
7549 +       .me             = THIS_MODULE
7550 +};
7551 +#endif
7552 +
7553 +MODULE_LICENSE("GPL");
7554 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7555 +MODULE_DESCRIPTION("iptables IP set target module");
7556 +
7557 +static int __init ipt_SET_init(void)
7558 +{
7559 +       return xt_register_target(&SET_target);
7560 +}
7561 +
7562 +static void __exit ipt_SET_fini(void)
7563 +{
7564 +       xt_unregister_target(&SET_target);
7565 +}
7566 +
7567 +module_init(ipt_SET_init);
7568 +module_exit(ipt_SET_fini);
7569 --- a/net/ipv4/netfilter/Kconfig
7570 +++ b/net/ipv4/netfilter/Kconfig
7571 @@ -388,5 +388,146 @@ config IP_NF_ARP_MANGLE
7572  
7573  endif # IP_NF_ARPTABLES
7574  
7575 +config IP_NF_SET
7576 +       tristate "IP set support"
7577 +       depends on INET && NETFILTER
7578 +       help
7579 +         This option adds IP set support to the kernel.
7580 +         In order to define and use sets, you need the userspace utility
7581 +         ipset(8).
7582 +
7583 +         To compile it as a module, choose M here.  If unsure, say N.
7584 +
7585 +config IP_NF_SET_MAX
7586 +       int "Maximum number of IP sets"
7587 +       default 256
7588 +       range 2 65534
7589 +       depends on IP_NF_SET
7590 +       help
7591 +         You can define here default value of the maximum number 
7592 +         of IP sets for the kernel.
7593 +
7594 +         The value can be overriden by the 'max_sets' module
7595 +         parameter of the 'ip_set' module.
7596 +
7597 +config IP_NF_SET_HASHSIZE
7598 +       int "Hash size for bindings of IP sets"
7599 +       default 1024
7600 +       depends on IP_NF_SET
7601 +       help
7602 +         You can define here default value of the hash size for
7603 +         bindings of IP sets.
7604 +
7605 +         The value can be overriden by the 'hash_size' module
7606 +         parameter of the 'ip_set' module.
7607 +
7608 +config IP_NF_SET_IPMAP
7609 +       tristate "ipmap set support"
7610 +       depends on IP_NF_SET
7611 +       help
7612 +         This option adds the ipmap set type support.
7613 +
7614 +         To compile it as a module, choose M here.  If unsure, say N.
7615 +
7616 +config IP_NF_SET_MACIPMAP
7617 +       tristate "macipmap set support"
7618 +       depends on IP_NF_SET
7619 +       help
7620 +         This option adds the macipmap set type support.
7621 +
7622 +         To compile it as a module, choose M here.  If unsure, say N.
7623 +
7624 +config IP_NF_SET_PORTMAP
7625 +       tristate "portmap set support"
7626 +       depends on IP_NF_SET
7627 +       help
7628 +         This option adds the portmap set type support.
7629 +
7630 +         To compile it as a module, choose M here.  If unsure, say N.
7631 +
7632 +config IP_NF_SET_IPHASH
7633 +       tristate "iphash set support"
7634 +       depends on IP_NF_SET
7635 +       help
7636 +         This option adds the iphash set type support.
7637 +
7638 +         To compile it as a module, choose M here.  If unsure, say N.
7639 +
7640 +config IP_NF_SET_NETHASH
7641 +       tristate "nethash set support"
7642 +       depends on IP_NF_SET
7643 +       help
7644 +         This option adds the nethash set type support.
7645 +
7646 +         To compile it as a module, choose M here.  If unsure, say N.
7647 +
7648 +config IP_NF_SET_IPPORTHASH
7649 +       tristate "ipporthash set support"
7650 +       depends on IP_NF_SET
7651 +       help
7652 +         This option adds the ipporthash set type support.
7653 +
7654 +         To compile it as a module, choose M here.  If unsure, say N.
7655 +
7656 +config IP_NF_SET_IPPORTIPHASH
7657 +       tristate "ipportiphash set support"
7658 +       depends on IP_NF_SET
7659 +       help
7660 +         This option adds the ipportiphash set type support.
7661 +
7662 +         To compile it as a module, choose M here.  If unsure, say N.
7663 +
7664 +config IP_NF_SET_IPPORTNETHASH
7665 +       tristate "ipportnethash set support"
7666 +       depends on IP_NF_SET
7667 +       help
7668 +         This option adds the ipportnethash set type support.
7669 +
7670 +         To compile it as a module, choose M here.  If unsure, say N.
7671 +
7672 +config IP_NF_SET_IPTREE
7673 +       tristate "iptree set support"
7674 +       depends on IP_NF_SET
7675 +       help
7676 +         This option adds the iptree set type support.
7677 +
7678 +         To compile it as a module, choose M here.  If unsure, say N.
7679 +
7680 +config IP_NF_SET_IPTREEMAP
7681 +       tristate "iptreemap set support"
7682 +       depends on IP_NF_SET
7683 +       help
7684 +         This option adds the iptreemap set type support.
7685 +
7686 +         To compile it as a module, choose M here.  If unsure, say N.
7687 +
7688 +config IP_NF_SET_SETLIST
7689 +       tristate "setlist set support"
7690 +       depends on IP_NF_SET
7691 +       help
7692 +         This option adds the setlist set type support.
7693 +
7694 +         To compile it as a module, choose M here.  If unsure, say N.
7695 +
7696 +config IP_NF_MATCH_SET
7697 +       tristate "set match support"
7698 +       depends on IP_NF_SET
7699 +       help
7700 +         Set matching matches against given IP sets.
7701 +         You need the ipset utility to create and set up the sets.
7702 +
7703 +         To compile it as a module, choose M here.  If unsure, say N.
7704 +
7705 +config IP_NF_TARGET_SET
7706 +       tristate "SET target support"
7707 +       depends on IP_NF_SET
7708 +       help
7709 +         The SET target makes possible to add/delete entries
7710 +         in IP sets.
7711 +         You need the ipset utility to create and set up the sets.
7712 +
7713 +         To compile it as a module, choose M here.  If unsure, say N.
7714 +
7715 +
7716  endmenu
7717  
7718 --- a/net/ipv4/netfilter/Makefile
7719 +++ b/net/ipv4/netfilter/Makefile
7720 @@ -51,6 +51,7 @@ obj-$(CONFIG_IP_NF_SECURITY) += iptable_
7721  obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
7722  obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
7723  obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
7724 +obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
7725  
7726  # targets
7727  obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
7728 @@ -61,6 +62,21 @@ obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt
7729  obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
7730  obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
7731  obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
7732 +obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
7733 +
7734 +# sets
7735 +obj-$(CONFIG_IP_NF_SET) += ip_set.o
7736 +obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
7737 +obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
7738 +obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
7739 +obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
7740 +obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
7741 +obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
7742 +obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
7743 +obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
7744 +obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
7745 +obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
7746 +obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
7747  
7748  # generic ARP tables
7749  obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o