summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_ulp.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-10-13 02:45:56 +0200
committerAlexei Starovoitov <ast@kernel.org>2018-10-15 12:23:19 -0700
commit8b9088f806e1ccd10c3d48b3b6d3d5d7855d92c5 (patch)
tree3fb306de5696a8c855340ce1ca9492a76702529d /net/ipv4/tcp_ulp.c
parent67e89ac32828a29adc74e5c9bd59bd70943466f0 (diff)
tcp, ulp: enforce sock_owned_by_me upon ulp init and cleanup
Whenever the ULP data on the socket is mangled, enforce that the caller has the socket lock held as otherwise things may race with initialization and cleanup callbacks from ulp ops as both would mangle internal socket state. Joint work with John. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net/ipv4/tcp_ulp.c')
-rw-r--r--net/ipv4/tcp_ulp.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index a5995bb2eaca..34e96353f115 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -123,6 +123,8 @@ void tcp_cleanup_ulp(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
+ sock_owned_by_me(sk);
+
if (!icsk->icsk_ulp_ops)
return;
@@ -140,6 +142,7 @@ int tcp_set_ulp(struct sock *sk, const char *name)
const struct tcp_ulp_ops *ulp_ops;
int err = 0;
+ sock_owned_by_me(sk);
if (icsk->icsk_ulp_ops)
return -EEXIST;
@@ -168,6 +171,7 @@ int tcp_set_ulp_id(struct sock *sk, int ulp)
const struct tcp_ulp_ops *ulp_ops;
int err;
+ sock_owned_by_me(sk);
if (icsk->icsk_ulp_ops)
return -EEXIST;