Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6

This commit is contained in:
David S. Miller
2010-08-02 15:07:58 -07:00
45 changed files with 807 additions and 364 deletions

View File

@@ -283,16 +283,13 @@ unsigned int arpt_do_table(struct sk_buff *skb,
arp = arp_hdr(skb);
do {
const struct arpt_entry_target *t;
int hdr_len;
if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
e = arpt_next_entry(e);
continue;
}
hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) +
(2 * skb->dev->addr_len);
ADD_COUNTER(e->counters, hdr_len, 1);
ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1);
t = arpt_get_target_c(e);
@@ -713,7 +710,7 @@ static void get_counters(const struct xt_table_info *t,
struct arpt_entry *iter;
unsigned int cpu;
unsigned int i;
unsigned int curcpu;
unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
@@ -723,14 +720,16 @@ static void get_counters(const struct xt_table_info *t,
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
curcpu = smp_processor_id();
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
@@ -744,7 +743,7 @@ static void get_counters(const struct xt_table_info *t,
}
xt_info_wrunlock(cpu);
}
local_bh_enable();
put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)

View File

@@ -364,7 +364,7 @@ ipt_do_table(struct sk_buff *skb,
goto no_match;
}
ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
ADD_COUNTER(e->counters, skb->len, 1);
t = ipt_get_target(e);
IP_NF_ASSERT(t->u.kernel.target);
@@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t,
struct ipt_entry *iter;
unsigned int cpu;
unsigned int i;
unsigned int curcpu;
unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
@@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t,
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
curcpu = smp_processor_id();
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
local_bh_enable();
/* Processing counters from other cpus, we can let bottom half enabled,
* (preemption is disabled)
*/
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
@@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t,
}
xt_info_wrunlock(cpu);
}
local_bh_enable();
put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)

View File

@@ -95,10 +95,11 @@ static void send_reset(struct sk_buff *oldskb, int hook)
}
tcph->rst = 1;
tcph->check = tcp_v4_check(sizeof(struct tcphdr),
niph->saddr, niph->daddr,
csum_partial(tcph,
sizeof(struct tcphdr), 0));
tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
niph->daddr, 0);
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (unsigned char *)tcph - nskb->head;
nskb->csum_offset = offsetof(struct tcphdr, check);
addr_type = RTN_UNSPEC;
if (hook != NF_INET_FORWARD
@@ -115,7 +116,6 @@ static void send_reset(struct sk_buff *oldskb, int hook)
goto free_nskb;
niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT);
nskb->ip_summed = CHECKSUM_NONE;
/* "Never happens" */
if (nskb->len > dst_mtu(skb_dst(nskb)))

View File

@@ -261,14 +261,9 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
rcu_read_lock();
proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
/* Change protocol info to have some randomization */
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
proto->unique_tuple(tuple, range, maniptype, ct);
goto out;
}
/* Only bother mapping if it's not already in range and unique */
if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM) &&
(!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
!nf_nat_used_tuple(tuple, ct))
goto out;
@@ -440,7 +435,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
return 0;
inside = (void *)skb->data + ip_hdrlen(skb);
inside = (void *)skb->data + hdrlen;
/* We're actually going to mangle it beyond trivial checksum
adjustment, so make sure the current checksum is correct. */
@@ -470,12 +465,10 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
/* rcu_read_lock()ed by nf_hook_slow */
l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
if (!nf_ct_get_tuple(skb,
ip_hdrlen(skb) + sizeof(struct icmphdr),
(ip_hdrlen(skb) +
if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr),
(hdrlen +
sizeof(struct icmphdr) + inside->ip.ihl * 4),
(u_int16_t)AF_INET,
inside->ip.protocol,
(u_int16_t)AF_INET, inside->ip.protocol,
&inner, l3proto, l4proto))
return 0;
@@ -484,15 +477,13 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
pass all hooks (locally-generated ICMP). Consider incoming
packet: PREROUTING (DST manip), routing produces ICMP, goes
through POSTROUTING (which must correct the DST manip). */
if (!manip_pkt(inside->ip.protocol, skb,
ip_hdrlen(skb) + sizeof(inside->icmp),
&ct->tuplehash[!dir].tuple,
!manip))
if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
&ct->tuplehash[!dir].tuple, !manip))
return 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
/* Reloading "inside" here since manip_pkt inner. */
inside = (void *)skb->data + ip_hdrlen(skb);
inside = (void *)skb->data + hdrlen;
inside->icmp.checksum = 0;
inside->icmp.checksum =
csum_fold(skb_checksum(skb, hdrlen,

View File

@@ -34,7 +34,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
}
EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct,
@@ -53,7 +53,7 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
/* If it's dst rewrite, can't change port */
if (maniptype == IP_NAT_MANIP_DST)
return false;
return;
if (ntohs(*portptr) < 1024) {
/* Loose convention: >> 512 is credential passing */
@@ -81,15 +81,15 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
else
off = *rover;
for (i = 0; i < range_size; i++, off++) {
for (i = 0; ; ++off) {
*portptr = htons(min + off % range_size);
if (nf_nat_used_tuple(tuple, ct))
if (++i != range_size && nf_nat_used_tuple(tuple, ct))
continue;
if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
*rover = off;
return true;
return;
}
return false;
return;
}
EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);

View File

@@ -22,14 +22,14 @@
static u_int16_t dccp_port_rover;
static bool
static void
dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&dccp_port_rover);
nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&dccp_port_rover);
}
static bool

View File

@@ -37,7 +37,7 @@ MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
/* generate unique tuple ... */
static bool
static void
gre_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
@@ -50,7 +50,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
/* If there is no master conntrack we are not PPTP,
do not change tuples */
if (!ct->master)
return false;
return;
if (maniptype == IP_NAT_MANIP_SRC)
keyptr = &tuple->src.u.gre.key;
@@ -68,14 +68,14 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
pr_debug("min = %u, range_size = %u\n", min, range_size);
for (i = 0; i < range_size; i++, key++) {
for (i = 0; ; ++key) {
*keyptr = htons(min + key % range_size);
if (!nf_nat_used_tuple(tuple, ct))
return true;
if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
return;
}
pr_debug("%p: no NAT mapping\n", ct);
return false;
return;
}
/* manipulate a GRE packet according to maniptype */

View File

@@ -27,7 +27,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
}
static bool
static void
icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
@@ -42,13 +42,13 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
range_size = 0xFFFF;
for (i = 0; i < range_size; i++, id++) {
for (i = 0; ; ++id) {
tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
(id % range_size));
if (!nf_nat_used_tuple(tuple, ct))
return true;
if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
return;
}
return false;
return;
}
static bool

View File

@@ -16,14 +16,14 @@
static u_int16_t nf_sctp_port_rover;
static bool
static void
sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&nf_sctp_port_rover);
nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&nf_sctp_port_rover);
}
static bool

View File

@@ -20,14 +20,13 @@
static u_int16_t tcp_port_rover;
static bool
static void
tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&tcp_port_rover);
nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover);
}
static bool

View File

@@ -19,14 +19,13 @@
static u_int16_t udp_port_rover;
static bool
static void
udp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&udp_port_rover);
nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udp_port_rover);
}
static bool

View File

@@ -18,14 +18,14 @@
static u_int16_t udplite_port_rover;
static bool
static void
udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&udplite_port_rover);
nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&udplite_port_rover);
}
static bool

View File

@@ -26,14 +26,14 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
return true;
}
static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
/* Sorry: we can't help you; if it's not unique, we can't frob
anything. */
return false;
return;
}
static bool