1 --- a/include/linux/netfilter_ipv4/ip_tables.h
2 +++ b/include/linux/netfilter_ipv4/ip_tables.h
3 @@ -62,6 +62,7 @@ struct ipt_ip {
4 #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
5 #define IPT_F_GOTO 0x02 /* Set if jump is a goto */
6 #define IPT_F_MASK 0x03 /* All possible flag bits mask. */
7 +#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
9 /* Values for "inv" field in struct ipt_ip. */
10 #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
11 --- a/net/ipv4/netfilter/ip_tables.c
12 +++ b/net/ipv4/netfilter/ip_tables.c
13 @@ -87,6 +87,9 @@ ip_packet_match(const struct iphdr *ip,
15 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
17 + if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
20 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
22 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
23 @@ -137,13 +140,35 @@ ip_packet_match(const struct iphdr *ip,
32 -ip_checkentry(const struct ipt_ip *ip)
33 +ip_checkentry(struct ipt_ip *ip)
35 - if (ip->flags & ~IPT_F_MASK) {
36 +#define FWINV(bool, invflg) ((bool) || (ip->invflags & (invflg)))
38 + if (FWINV(ip->smsk.s_addr, IPT_INV_SRCIP) ||
39 + FWINV(ip->dmsk.s_addr, IPT_INV_DSTIP))
40 + goto has_match_rules;
42 + if (FWINV(!!((const unsigned long *)ip->iniface_mask)[0],
44 + FWINV(!!((const unsigned long *)ip->outiface_mask)[0],
46 + goto has_match_rules;
48 + if (FWINV(ip->proto, IPT_INV_PROTO))
49 + goto has_match_rules;
51 + if (FWINV(ip->flags&IPT_F_FRAG, IPT_INV_FRAG))
52 + goto has_match_rules;
54 + ip->flags |= IPT_F_NO_DEF_MATCH;
57 + if (ip->flags & ~(IPT_F_MASK|IPT_F_NO_DEF_MATCH)) {
58 duprintf("Unknown flag bits set: %08X\n",
59 ip->flags & ~IPT_F_MASK);
61 @@ -153,6 +178,8 @@ ip_checkentry(const struct ipt_ip *ip)
62 ip->invflags & ~IPT_INV_MASK);
70 @@ -200,7 +227,6 @@ unconditional(const struct ipt_ip *ip)
77 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
78 @@ -318,8 +344,28 @@ ipt_do_table(struct sk_buff *skb,
79 struct xt_match_param mtpar;
80 struct xt_target_param tgpar;
82 - /* Initialization */
85 + IP_NF_ASSERT(table->valid_hooks & (1 << hook));
86 + xt_info_rdlock_bh();
87 + private = table->private;
88 + table_base = private->entries[smp_processor_id()];
89 + e = get_entry(table_base, private->hook_entry[hook]);
91 + if (e->target_offset <= sizeof(struct ipt_entry) &&
92 + (e->ip.flags & IPT_F_NO_DEF_MATCH)) {
93 + struct ipt_entry_target *t = ipt_get_target(e);
94 + if (!t->u.kernel.target->target) {
95 + int v = ((struct ipt_standard_target *)t)->verdict;
96 + if ((v < 0) && (v != IPT_RETURN)) {
97 + ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
98 + xt_info_rdunlock_bh();
99 + return (unsigned)(-v) - 1;
104 + /* Initialization */
105 datalen = skb->len - ip->ihl * 4;
106 indev = in ? in->name : nulldevname;
107 outdev = out ? out->name : nulldevname;
108 @@ -337,13 +383,6 @@ ipt_do_table(struct sk_buff *skb,
109 mtpar.family = tgpar.family = NFPROTO_IPV4;
110 tgpar.hooknum = hook;
112 - IP_NF_ASSERT(table->valid_hooks & (1 << hook));
113 - xt_info_rdlock_bh();
114 - private = table->private;
115 - table_base = private->entries[smp_processor_id()];
117 - e = get_entry(table_base, private->hook_entry[hook]);
119 /* For return from builtin chain */
120 back = get_entry(table_base, private->underflow[hook]);
122 @@ -976,6 +1015,7 @@ copy_entries_to_user(unsigned int total_
124 const struct ipt_entry_match *m;
125 const struct ipt_entry_target *t;
128 e = (struct ipt_entry *)(loc_cpu_entry + off);
129 if (copy_to_user(userptr + off
130 @@ -986,6 +1026,14 @@ copy_entries_to_user(unsigned int total_
134 + flags = e->ip.flags & ~IPT_F_NO_DEF_MATCH;
135 + if (copy_to_user(userptr + off
136 + + offsetof(struct ipt_entry, ip.flags),
137 + &flags, sizeof(flags)) != 0) {
139 + goto free_counters;
142 for (i = sizeof(struct ipt_entry);
143 i < e->target_offset;
144 i += m->u.match_size) {