diff options
author | Eric Dumazet <edumazet@google.com> | 2022-06-10 20:30:16 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-06-13 13:35:25 +0100 |
commit | 219160be496f7f9cd105c5708e37cf22ab4ce0c7 (patch) | |
tree | cf2ec0a5c25c90def3f59acc871d490b4aa3afba | |
parent | 4066bf4ce3ae3e322fa0c3c6418e45d99ff086b8 (diff) |
tcp: sk_forced_mem_schedule() optimization
sk_memory_allocated_add() has three callers, and returns
to them @memory_allocated.
sk_forced_mem_schedule() is one of them, and ignores
the returned value.
Change sk_memory_allocated_add() to return void.
Change sock_reserve_memory() and __sk_mem_raise_allocated()
to call sk_memory_allocated().
This removes one cache line miss [1] for RPC workloads,
as first skbs in TCP write queue and receive queue go through
sk_forced_mem_schedule().
[1] Cache line holding tcp_memory_allocated.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/sock.h | 3 | ||||
-rw-r--r-- | net/core/sock.c | 9 |
2 files changed, 7 insertions, 5 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 0063e8410a4e..304a5e39d41e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1412,7 +1412,7 @@ sk_memory_allocated(const struct sock *sk) /* 1 MB per cpu, in page units */ #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT)) -static inline long +static inline void sk_memory_allocated_add(struct sock *sk, int amt) { int local_reserve; @@ -1424,7 +1424,6 @@ sk_memory_allocated_add(struct sock *sk, int amt) atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); } preempt_enable(); - return sk_memory_allocated(sk); } static inline void diff --git a/net/core/sock.c b/net/core/sock.c index 697d5c8e2f0d..92a0296ccb18 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1019,7 +1019,8 @@ static int sock_reserve_memory(struct sock *sk, int bytes) return -ENOMEM; /* pre-charge to forward_alloc */ - allocated = sk_memory_allocated_add(sk, pages); + sk_memory_allocated_add(sk, pages); + allocated = sk_memory_allocated(sk); /* If the system goes into memory pressure with this * precharge, give up and return error. */ @@ -2906,11 +2907,13 @@ EXPORT_SYMBOL(sk_wait_data); */ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) { - struct proto *prot = sk->sk_prot; - long allocated = sk_memory_allocated_add(sk, amt); bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg; + struct proto *prot = sk->sk_prot; bool charged = true; + long allocated; + sk_memory_allocated_add(sk, amt); + allocated = sk_memory_allocated(sk); if (memcg_charge && !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt, gfp_memcg_charge()))) |