diff options
author | Eric Dumazet <edumazet@google.com> | 2015-10-02 11:43:23 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-03 04:32:36 -0700 |
commit | fff1f3001cc58b5064a0f1154a7ac09b76f29c44 (patch) | |
tree | 910fe6f27dc9b9b7c6a0a56a9442ee264c9a6667 /net | |
parent | f6d3125fa3c2f55ddf7cf69365c41089de6cfae6 (diff) |
tcp: add a spinlock to protect struct request_sock_queue
struct request_sock_queue fields are currently protected
by the listener 'lock' (not a real spinlock)
We need to add a private spinlock instead, so that softirq handlers
creating children do not have to worry with backlog notion
that the listener 'lock' carries.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/request_sock.c | 1 | ||||
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 21 |
2 files changed, 8 insertions, 14 deletions
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index e22cfa4ed25f..8d9fd31d3d06 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -58,6 +58,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, return -ENOMEM; get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); + spin_lock_init(&queue->rskq_lock); spin_lock_init(&queue->syn_wait_lock); spin_lock_init(&queue->fastopenq.lock); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index e1527882a578..0085612b9e49 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -330,10 +330,9 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) if (error) goto out_err; } - req = reqsk_queue_remove(queue); + req = reqsk_queue_remove(queue, sk); newsk = req->sk; - sk_acceptq_removed(sk); if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { spin_lock_bh(&queue->fastopenq.lock); @@ -832,11 +831,7 @@ void inet_csk_listen_stop(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct request_sock_queue *queue = &icsk->icsk_accept_queue; - struct request_sock *acc_req; - struct request_sock *req; - - /* make all the listen_opt local to us */ - acc_req = reqsk_queue_yank_acceptq(queue); + struct request_sock *next, *req; /* Following specs, it would be better either to send FIN * (and enter FIN-WAIT-1, it is normal close) @@ -848,11 +843,9 @@ void inet_csk_listen_stop(struct sock *sk) */ reqsk_queue_destroy(queue); - while ((req = acc_req) != NULL) { + while ((req = reqsk_queue_remove(queue, sk)) != NULL) { struct sock *child = req->sk; - acc_req = req->dl_next; - local_bh_disable(); bh_lock_sock(child); WARN_ON(sock_owned_by_user(child)); @@ -882,18 +875,18 @@ void inet_csk_listen_stop(struct sock *sk) local_bh_enable(); sock_put(child); - sk_acceptq_removed(sk); reqsk_put(req); } if (queue->fastopenq.rskq_rst_head) { /* Free all the reqs queued in rskq_rst_head. */ spin_lock_bh(&queue->fastopenq.lock); - acc_req = queue->fastopenq.rskq_rst_head; + req = queue->fastopenq.rskq_rst_head; queue->fastopenq.rskq_rst_head = NULL; spin_unlock_bh(&queue->fastopenq.lock); - while ((req = acc_req) != NULL) { - acc_req = req->dl_next; + while (req != NULL) { + next = req->dl_next; reqsk_put(req); + req = next; } } WARN_ON(sk->sk_ack_backlog); |