diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2005-08-26 12:05:31 -0700 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 16:11:18 -0700 |
commit | ba89966c1984513f4f2cc0a6c182266be44ddd03 (patch) | |
tree | 6e5766fc5c287708c03e0a162531dfd4785b0703 /net/core | |
parent | 29cb9f9c5502f6218cd3ea574efe46a5e55522d2 (diff) |
[NET]: use __read_mostly on kmem_cache_t , DEFINE_SNMP_STAT pointers
This patch puts mostly read only data in the right section
(read_mostly), to help sharing of these data between CPUS without
memory ping pongs.
On one of my production machine, tcp_statistics was sitting in a
heavily modified cache line, so *every* SNMP update had to force a
reload.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/flow.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 4 |
2 files changed, 3 insertions, 3 deletions
diff --git a/net/core/flow.c b/net/core/flow.c index f289570b15a3..7e95b39de9fd 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; #define flow_table(cpu) (per_cpu(flow_tables, cpu)) -static kmem_cache_t *flow_cachep; +static kmem_cache_t *flow_cachep __read_mostly; static int flow_lwm, flow_hwm; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b853a9b29eb6..f80a28785610 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -68,8 +68,8 @@ #include <asm/uaccess.h> #include <asm/system.h> -static kmem_cache_t *skbuff_head_cache; -static kmem_cache_t *skbuff_fclone_cache; +static kmem_cache_t *skbuff_head_cache __read_mostly; +static kmem_cache_t *skbuff_fclone_cache __read_mostly; struct timeval __read_mostly skb_tv_base; |