diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2022-04-28 11:58:19 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-05-01 12:19:01 +0100 |
commit | 0a8afd9f026a7f6c835be0fed2ab709d4133797f (patch) | |
tree | e64a468c235a58d43c0433f99e8e233e03a211c2 /net/core | |
parent | 052ada096842a910327936b4ed203048906eb2c3 (diff) |
sock: optimise sock_def_write_space barriers
Now we have a separate path for sock_def_write_space() and can go one
step further. When it's called from sock_wfree() we know that there is a
preceding atomic for putting down ->sk_wmem_alloc. We can use it to
replace to replace smb_mb() with a less expensive
smp_mb__after_atomic(). It also removes an extra RCU read lock/unlock as
a small bonus.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/sock.c | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index ab865b04130b..be20a1af20e5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -146,6 +146,7 @@ static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); +static void sock_def_write_space_wfree(struct sock *sk); static void sock_def_write_space(struct sock *sk); /** @@ -2333,7 +2334,7 @@ void sock_wfree(struct sk_buff *skb) sk->sk_write_space == sock_def_write_space) { rcu_read_lock(); free = refcount_sub_and_test(len, &sk->sk_wmem_alloc); - sock_def_write_space(sk); + sock_def_write_space_wfree(sk); rcu_read_unlock(); if (unlikely(free)) __sk_free(sk); @@ -3218,6 +3219,29 @@ static void sock_def_write_space(struct sock *sk) rcu_read_unlock(); } +/* An optimised version of sock_def_write_space(), should only be called + * for SOCK_RCU_FREE sockets under RCU read section and after putting + * ->sk_wmem_alloc. + */ +static void sock_def_write_space_wfree(struct sock *sk) +{ + /* Do not wake up a writer until he can make "significant" + * progress. --DaveM + */ + if (sock_writeable(sk)) { + struct socket_wq *wq = rcu_dereference(sk->sk_wq); + + /* rely on refcount_sub from sock_wfree() */ + smp_mb__after_atomic(); + if (wq && waitqueue_active(&wq->wait)) + wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | + EPOLLWRNORM | EPOLLWRBAND); + + /* Should agree with poll, otherwise some programs break */ + sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + } +} + static void sock_def_destruct(struct sock *sk) { } |