netfilter: x_tables: pack percpu counter allocations
instead of allocating each xt_counter individually, allocate 4k chunks and then use these for counter allocation requests. This should speed up rule evaluation by increasing data locality, also speeds up ruleset loading because we reduce calls to the percpu allocator. As Eric points out we can't use PAGE_SIZE, page_allocator would fail on arches with 64k page size. Suggested-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Florian Westphal <fw@strlen.de> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:

committed by
Pablo Neira Ayuso

parent
f28e15bace
commit
ae0ac0ed6f
@@ -411,13 +411,14 @@ static inline int check_target(struct arpt_entry *e, const char *name)
|
||||
}
|
||||
|
||||
static inline int
|
||||
find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
|
||||
find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
|
||||
struct xt_percpu_counter_alloc_state *alloc_state)
|
||||
{
|
||||
struct xt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
int ret;
|
||||
|
||||
if (!xt_percpu_counter_alloc(&e->counters))
|
||||
if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
|
||||
return -ENOMEM;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
@@ -525,6 +526,7 @@ static inline void cleanup_entry(struct arpt_entry *e)
|
||||
static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
const struct arpt_replace *repl)
|
||||
{
|
||||
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
|
||||
struct arpt_entry *iter;
|
||||
unsigned int *offsets;
|
||||
unsigned int i;
|
||||
@@ -587,7 +589,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
ret = find_check_entry(iter, repl->name, repl->size);
|
||||
ret = find_check_entry(iter, repl->name, repl->size,
|
||||
&alloc_state);
|
||||
if (ret != 0)
|
||||
break;
|
||||
++i;
|
||||
|
@@ -531,7 +531,8 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name)
|
||||
|
||||
static int
|
||||
find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
||||
unsigned int size)
|
||||
unsigned int size,
|
||||
struct xt_percpu_counter_alloc_state *alloc_state)
|
||||
{
|
||||
struct xt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
@@ -540,7 +541,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
||||
struct xt_mtchk_param mtpar;
|
||||
struct xt_entry_match *ematch;
|
||||
|
||||
if (!xt_percpu_counter_alloc(&e->counters))
|
||||
if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
|
||||
return -ENOMEM;
|
||||
|
||||
j = 0;
|
||||
@@ -676,6 +677,7 @@ static int
|
||||
translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
||||
const struct ipt_replace *repl)
|
||||
{
|
||||
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
|
||||
struct ipt_entry *iter;
|
||||
unsigned int *offsets;
|
||||
unsigned int i;
|
||||
@@ -735,7 +737,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
xt_entry_foreach(iter, entry0, newinfo->size) {
|
||||
ret = find_check_entry(iter, net, repl->name, repl->size);
|
||||
ret = find_check_entry(iter, net, repl->name, repl->size,
|
||||
&alloc_state);
|
||||
if (ret != 0)
|
||||
break;
|
||||
++i;
|
||||
|
Reference in New Issue
Block a user