summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-01-14 15:17:06 -0800
committerDavid S. Miller <davem@davemloft.net>2015-01-15 18:26:16 -0500
commit5055c371bfd53fd369b895051b541318c2bad495 (patch)
tree28748ee77408e82a64ae13dbb4bd65f1eff6aa26 /net
parent0799c2d6f42db2b275b6479035f5b7a30ef4ee39 (diff)
ipv4: per cpu uncached list
RAW sockets with hdrinc suffer from contention on rt_uncached_lock spinlock. One solution is to use percpu lists, since most routes are destroyed by the cpu that created them. It is unclear why we even have to put these routes in uncached_list, as all outgoing packets should be freed when a device is dismantled. Signed-off-by: Eric Dumazet <edumazet@google.com> Fixes: caacf05e5ad1 ("ipv4: Properly purge netdev references on uncached routes.") Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/route.c46
1 files changed, 33 insertions, 13 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6a2155b02602..ce112d0f2698 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1325,14 +1325,22 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
return ret;
}
-static DEFINE_SPINLOCK(rt_uncached_lock);
-static LIST_HEAD(rt_uncached_list);
+struct uncached_list {
+ spinlock_t lock;
+ struct list_head head;
+};
+
+static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
static void rt_add_uncached_list(struct rtable *rt)
{
- spin_lock_bh(&rt_uncached_lock);
- list_add_tail(&rt->rt_uncached, &rt_uncached_list);
- spin_unlock_bh(&rt_uncached_lock);
+ struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
+
+ rt->rt_uncached_list = ul;
+
+ spin_lock_bh(&ul->lock);
+ list_add_tail(&rt->rt_uncached, &ul->head);
+ spin_unlock_bh(&ul->lock);
}
static void ipv4_dst_destroy(struct dst_entry *dst)
@@ -1340,27 +1348,32 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
struct rtable *rt = (struct rtable *) dst;
if (!list_empty(&rt->rt_uncached)) {
- spin_lock_bh(&rt_uncached_lock);
+ struct uncached_list *ul = rt->rt_uncached_list;
+
+ spin_lock_bh(&ul->lock);
list_del(&rt->rt_uncached);
- spin_unlock_bh(&rt_uncached_lock);
+ spin_unlock_bh(&ul->lock);
}
}
void rt_flush_dev(struct net_device *dev)
{
- if (!list_empty(&rt_uncached_list)) {
- struct net *net = dev_net(dev);
- struct rtable *rt;
+ struct net *net = dev_net(dev);
+ struct rtable *rt;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
- spin_lock_bh(&rt_uncached_lock);
- list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
+ spin_lock_bh(&ul->lock);
+ list_for_each_entry(rt, &ul->head, rt_uncached) {
if (rt->dst.dev != dev)
continue;
rt->dst.dev = net->loopback_dev;
dev_hold(rt->dst.dev);
dev_put(dev);
}
- spin_unlock_bh(&rt_uncached_lock);
+ spin_unlock_bh(&ul->lock);
}
}
@@ -2717,6 +2730,7 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
int __init ip_rt_init(void)
{
int rc = 0;
+ int cpu;
ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
if (!ip_idents)
@@ -2724,6 +2738,12 @@ int __init ip_rt_init(void)
prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
+ for_each_possible_cpu(cpu) {
+ struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
+
+ INIT_LIST_HEAD(&ul->head);
+ spin_lock_init(&ul->lock);
+ }
#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)