Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter/IPVS updates for net-next The following patchset contains Netfilter/IPVS updates for net-next, they are: 1) Count pre-established connections as active in "least connection" schedulers such that pre-established connections to avoid overloading backend servers on peak demands, from Michal Kubecek via Simon Horman. 2) Address a race condition when resizing the conntrack table by caching the bucket size when fulling iterating over the hashtable in these three possible scenarios: 1) dump via /proc/net/nf_conntrack, 2) unlinking userspace helper and 3) unlinking custom conntrack timeout. From Liping Zhang. 3) Revisit early_drop() path to perform lockless traversal on conntrack eviction under stress, use del_timer() as synchronization point to avoid two CPUs evicting the same entry, from Florian Westphal. 4) Move NAT hlist_head to nf_conn object, this simplifies the existing NAT extension and it doesn't increase size since recent patches to align nf_conn, from Florian. 5) Use rhashtable for the by-source NAT hashtable, also from Florian. 6) Don't allow --physdev-is-out from OUTPUT chain, just like --physdev-out is not either, from Hangbin Liu. 7) Automagically set on nf_conntrack counters if the user tries to match ct bytes/packets from nftables, from Liping Zhang. 8) Remove possible_net_t fields in nf_tables set objects since we just simply pass the net pointer to the backend set type implementations. 9) Fix possible off-by-one in h323, from Toby DiPasquale. 10) early_drop() may be called from ctnetlink patch, so we must hold rcu read size lock from them too, this amends Florian's patch #3 coming in this batch, from Liping Zhang. 11) Use binary search to validate jump offset in x_tables, this addresses the O(n!) validation that was introduced recently resolve security issues with unpriviledge namespaces, from Florian. 12) Fix reference leak to connlabel in error path of nft_ct, from Zhang. 13) Three updates for nft_log: Fix log prefix leak in error path. Bail out on loglevel larger than debug in nft_log and set on the new NF_LOG_F_COPY_LEN flag when snaplen is specified. Again from Zhang. 14) Allow to filter rule dumps in nf_tables based on table and chain names. 15) Simplify connlabel to always use 128 bits to store labels and get rid of unused function in xt_connlabel, from Florian. 16) Replace set_expect_timeout() by mod_timer() from the h323 conntrack helper, by Gao Feng. 17) Put back x_tables module reference in nft_compat on error, from Liping Zhang. 18) Add a reference count to the x_tables extensions cache in nft_compat, so we can remove them when unused and avoid a crash if the extensions are rmmod, again from Zhang. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -299,23 +299,12 @@ static inline bool unconditional(const struct arpt_entry *e)
|
||||
memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
|
||||
}
|
||||
|
||||
static bool find_jump_target(const struct xt_table_info *t,
|
||||
const struct arpt_entry *target)
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
|
||||
xt_entry_foreach(iter, t->entries, t->size) {
|
||||
if (iter == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
* there are loops. Puts hook bitmask in comefrom.
|
||||
*/
|
||||
static int mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
unsigned int valid_hooks, void *entry0,
|
||||
unsigned int *offsets)
|
||||
{
|
||||
unsigned int hook;
|
||||
|
||||
@@ -388,10 +377,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
||||
XT_STANDARD_TARGET) == 0 &&
|
||||
newpos >= 0) {
|
||||
/* This a jump; chase it. */
|
||||
if (!xt_find_jump_offset(offsets, newpos,
|
||||
newinfo->number))
|
||||
return 0;
|
||||
e = (struct arpt_entry *)
|
||||
(entry0 + newpos);
|
||||
if (!find_jump_target(newinfo, e))
|
||||
return 0;
|
||||
} else {
|
||||
/* ... this is a fallthru */
|
||||
newpos = pos + e->next_offset;
|
||||
@@ -543,6 +533,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
const struct arpt_replace *repl)
|
||||
{
|
||||
struct arpt_entry *iter;
|
||||
unsigned int *offsets;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
@@ -555,6 +546,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
newinfo->underflow[i] = 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
offsets = xt_alloc_entry_offsets(newinfo->number);
|
||||
if (!offsets)
|
||||
return -ENOMEM;
|
||||
i = 0;
|
||||
|
||||
/* Walk through entries, checking offsets. */
|
||||
@@ -565,17 +559,20 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
repl->underflow,
|
||||
repl->valid_hooks);
|
||||
if (ret != 0)
|
||||
break;
|
||||
goto out_free;
|
||||
if (i < repl->num_entries)
|
||||
offsets[i] = (void *)iter - entry0;
|
||||
++i;
|
||||
if (strcmp(arpt_get_target(iter)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
++newinfo->stacksize;
|
||||
}
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
goto out_free;
|
||||
|
||||
ret = -EINVAL;
|
||||
if (i != repl->num_entries)
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
|
||||
/* Check hooks all assigned */
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
@@ -583,13 +580,16 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
if (!(repl->valid_hooks & (1 << i)))
|
||||
continue;
|
||||
if (newinfo->hook_entry[i] == 0xFFFFFFFF)
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
if (newinfo->underflow[i] == 0xFFFFFFFF)
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
|
||||
return -ELOOP;
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
|
||||
ret = -ELOOP;
|
||||
goto out_free;
|
||||
}
|
||||
kvfree(offsets);
|
||||
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
@@ -609,6 +609,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
out_free:
|
||||
kvfree(offsets);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -373,23 +373,12 @@ ipt_do_table(struct sk_buff *skb,
|
||||
else return verdict;
|
||||
}
|
||||
|
||||
static bool find_jump_target(const struct xt_table_info *t,
|
||||
const struct ipt_entry *target)
|
||||
{
|
||||
struct ipt_entry *iter;
|
||||
|
||||
xt_entry_foreach(iter, t->entries, t->size) {
|
||||
if (iter == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
there are loops. Puts hook bitmask in comefrom. */
|
||||
static int
|
||||
mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
unsigned int valid_hooks, void *entry0,
|
||||
unsigned int *offsets)
|
||||
{
|
||||
unsigned int hook;
|
||||
|
||||
@@ -458,10 +447,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
||||
XT_STANDARD_TARGET) == 0 &&
|
||||
newpos >= 0) {
|
||||
/* This a jump; chase it. */
|
||||
if (!xt_find_jump_offset(offsets, newpos,
|
||||
newinfo->number))
|
||||
return 0;
|
||||
e = (struct ipt_entry *)
|
||||
(entry0 + newpos);
|
||||
if (!find_jump_target(newinfo, e))
|
||||
return 0;
|
||||
} else {
|
||||
/* ... this is a fallthru */
|
||||
newpos = pos + e->next_offset;
|
||||
@@ -694,6 +684,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
||||
const struct ipt_replace *repl)
|
||||
{
|
||||
struct ipt_entry *iter;
|
||||
unsigned int *offsets;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
@@ -706,6 +697,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
||||
newinfo->underflow[i] = 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
offsets = xt_alloc_entry_offsets(newinfo->number);
|
||||
if (!offsets)
|
||||
return -ENOMEM;
|
||||
i = 0;
|
||||
/* Walk through entries, checking offsets. */
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
@@ -715,15 +709,18 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
||||
repl->underflow,
|
||||
repl->valid_hooks);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
goto out_free;
|
||||
if (i < repl->num_entries)
|
||||
offsets[i] = (void *)iter - entry0;
|
||||
++i;
|
||||
if (strcmp(ipt_get_target(iter)->u.user.name,
|
||||
XT_ERROR_TARGET) == 0)
|
||||
++newinfo->stacksize;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (i != repl->num_entries)
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
|
||||
/* Check hooks all assigned */
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
@@ -731,13 +728,16 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
||||
if (!(repl->valid_hooks & (1 << i)))
|
||||
continue;
|
||||
if (newinfo->hook_entry[i] == 0xFFFFFFFF)
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
if (newinfo->underflow[i] == 0xFFFFFFFF)
|
||||
return -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
|
||||
return -ELOOP;
|
||||
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
|
||||
ret = -ELOOP;
|
||||
goto out_free;
|
||||
}
|
||||
kvfree(offsets);
|
||||
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
@@ -757,6 +757,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
out_free:
|
||||
kvfree(offsets);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -26,6 +26,8 @@
|
||||
|
||||
struct ct_iter_state {
|
||||
struct seq_net_private p;
|
||||
struct hlist_nulls_head *hash;
|
||||
unsigned int htable_size;
|
||||
unsigned int bucket;
|
||||
};
|
||||
|
||||
@@ -35,10 +37,10 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
|
||||
struct hlist_nulls_node *n;
|
||||
|
||||
for (st->bucket = 0;
|
||||
st->bucket < nf_conntrack_htable_size;
|
||||
st->bucket < st->htable_size;
|
||||
st->bucket++) {
|
||||
n = rcu_dereference(
|
||||
hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
|
||||
hlist_nulls_first_rcu(&st->hash[st->bucket]));
|
||||
if (!is_a_nulls(n))
|
||||
return n;
|
||||
}
|
||||
@@ -53,11 +55,11 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
|
||||
head = rcu_dereference(hlist_nulls_next_rcu(head));
|
||||
while (is_a_nulls(head)) {
|
||||
if (likely(get_nulls_value(head) == st->bucket)) {
|
||||
if (++st->bucket >= nf_conntrack_htable_size)
|
||||
if (++st->bucket >= st->htable_size)
|
||||
return NULL;
|
||||
}
|
||||
head = rcu_dereference(
|
||||
hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
|
||||
hlist_nulls_first_rcu(&st->hash[st->bucket]));
|
||||
}
|
||||
return head;
|
||||
}
|
||||
@@ -75,7 +77,11 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
|
||||
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
{
|
||||
struct ct_iter_state *st = seq->private;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
nf_conntrack_get_ht(&st->hash, &st->htable_size);
|
||||
return ct_get_idx(seq, *pos);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user