[NET]: use __read_mostly on kmem_cache_t , DEFINE_SNMP_STAT pointers
This patch puts mostly read only data in the right section (read_mostly), to help sharing of these data between CPUS without memory ping pongs. On one of my production machine, tcp_statistics was sitting in a heavily modified cache line, so *every* SNMP update had to force a reload. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Este commit está contenido en:

cometido por
David S. Miller

padre
29cb9f9c55
commit
ba89966c19
@@ -113,7 +113,7 @@
|
||||
#include <linux/mroute.h>
|
||||
#endif
|
||||
|
||||
DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
|
||||
DEFINE_SNMP_STAT(struct linux_mib, net_statistics) __read_mostly;
|
||||
|
||||
extern void ip_mc_drop_socket(struct sock *sk);
|
||||
|
||||
|
@@ -45,8 +45,8 @@
|
||||
|
||||
#include "fib_lookup.h"
|
||||
|
||||
static kmem_cache_t *fn_hash_kmem;
|
||||
static kmem_cache_t *fn_alias_kmem;
|
||||
static kmem_cache_t *fn_hash_kmem __read_mostly;
|
||||
static kmem_cache_t *fn_alias_kmem __read_mostly;
|
||||
|
||||
struct fib_node {
|
||||
struct hlist_node fn_hash;
|
||||
|
@@ -166,7 +166,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn);
|
||||
static void tnode_free(struct tnode *tn);
|
||||
static void trie_dump_seq(struct seq_file *seq, struct trie *t);
|
||||
|
||||
static kmem_cache_t *fn_alias_kmem;
|
||||
static kmem_cache_t *fn_alias_kmem __read_mostly;
|
||||
static struct trie *trie_local = NULL, *trie_main = NULL;
|
||||
|
||||
|
||||
|
@@ -114,7 +114,7 @@ struct icmp_bxm {
|
||||
/*
|
||||
* Statistics
|
||||
*/
|
||||
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
|
||||
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics) __read_mostly;
|
||||
|
||||
/* An array of errno for error messages from dest unreach. */
|
||||
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
|
||||
|
@@ -73,7 +73,7 @@
|
||||
/* Exported for inet_getid inline function. */
|
||||
DEFINE_SPINLOCK(inet_peer_idlock);
|
||||
|
||||
static kmem_cache_t *peer_cachep;
|
||||
static kmem_cache_t *peer_cachep __read_mostly;
|
||||
|
||||
#define node_height(x) x->avl_height
|
||||
static struct inet_peer peer_fake_node = {
|
||||
|
@@ -150,7 +150,7 @@
|
||||
* SNMP management statistics
|
||||
*/
|
||||
|
||||
DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics);
|
||||
DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
|
||||
|
||||
/*
|
||||
* Process Router Attention IP option
|
||||
|
@@ -103,7 +103,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
|
||||
In this case data path is free of exclusive locks at all.
|
||||
*/
|
||||
|
||||
static kmem_cache_t *mrt_cachep;
|
||||
static kmem_cache_t *mrt_cachep __read_mostly;
|
||||
|
||||
static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
|
||||
static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
|
||||
|
@@ -40,7 +40,7 @@
|
||||
static struct list_head *ip_vs_conn_tab;
|
||||
|
||||
/* SLAB cache for IPVS connections */
|
||||
static kmem_cache_t *ip_vs_conn_cachep;
|
||||
static kmem_cache_t *ip_vs_conn_cachep __read_mostly;
|
||||
|
||||
/* counter for current IPVS connections */
|
||||
static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
|
||||
|
@@ -70,8 +70,8 @@ static LIST_HEAD(helpers);
|
||||
unsigned int ip_conntrack_htable_size = 0;
|
||||
int ip_conntrack_max;
|
||||
struct list_head *ip_conntrack_hash;
|
||||
static kmem_cache_t *ip_conntrack_cachep;
|
||||
static kmem_cache_t *ip_conntrack_expect_cachep;
|
||||
static kmem_cache_t *ip_conntrack_cachep __read_mostly;
|
||||
static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
|
||||
struct ip_conntrack ip_conntrack_untracked;
|
||||
unsigned int ip_ct_log_invalid;
|
||||
static LIST_HEAD(unconfirmed);
|
||||
|
@@ -94,7 +94,7 @@ struct ipt_hashlimit_htable {
|
||||
static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
|
||||
static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */
|
||||
static HLIST_HEAD(hashlimit_htables);
|
||||
static kmem_cache_t *hashlimit_cachep;
|
||||
static kmem_cache_t *hashlimit_cachep __read_mostly;
|
||||
|
||||
static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
|
||||
{
|
||||
|
@@ -269,7 +269,7 @@
|
||||
|
||||
int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
|
||||
|
||||
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
|
||||
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
|
||||
|
||||
atomic_t tcp_orphan_count = ATOMIC_INIT(0);
|
||||
|
||||
|
@@ -113,7 +113,7 @@
|
||||
* Snmp MIB for the UDP layer
|
||||
*/
|
||||
|
||||
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
|
||||
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
|
||||
|
||||
struct hlist_head udp_hash[UDP_HTABLE_SIZE];
|
||||
DEFINE_RWLOCK(udp_hash_lock);
|
||||
|
Referencia en una nueva incidencia
Block a user