summaryrefslogtreecommitdiff
path: root/net/sctp
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2022-06-08 23:34:11 -0700
committerJakub Kicinski <kuba@kernel.org>2022-06-10 16:21:27 -0700
commit4890b686f4088c90432149bd6de567e621266fa2 (patch)
treeaaade033f34396e305b8ccd8cb6f1804205e715b /net/sctp
parent7c80b038d23e1f4c7fcc311f43f83b8c60e7fb80 (diff)
net: keep sk->sk_forward_alloc as small as possible
Currently, tcp_memory_allocated can hit tcp_mem[] limits quite fast. Each TCP socket can forward allocate up to 2 MB of memory, even after flow became less active. 10,000 sockets can have reserved 20 GB of memory, and we have no shrinker in place to reclaim that. Instead of trying to reclaim the extra allocations in some places, just keep sk->sk_forward_alloc values as small as possible. This should not impact performance too much now we have per-cpu reserves: Changes to tcp_memory_allocated should not be too frequent. For sockets not using SO_RESERVE_MEM: - idle sockets (no packets in tx/rx queues) have zero forward alloc. - non idle sockets have a forward alloc smaller than one page. Note: - Removal of SK_RECLAIM_CHUNK and SK_RECLAIM_THRESHOLD is left to MPTCP maintainers as a follow up. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/sctp/stream_interleave.c2
-rw-r--r--net/sctp/ulpqueue.c4
4 files changed, 0 insertions, 13 deletions
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 52edee1322fc..f6ee7f4040c1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -6590,8 +6590,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
pr_debug("%s: under pressure, reneging for tsn:%u\n",
__func__, tsn);
deliver = SCTP_CMD_RENEGE;
- } else {
- sk_mem_reclaim(sk);
}
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 05174acd981a..171f1a35d205 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1824,9 +1824,6 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sctp_wspace(asoc) < (int)msg_len)
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
- if (sk_under_memory_pressure(sk))
- sk_mem_reclaim(sk);
-
if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
@@ -9195,8 +9192,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
goto do_error;
if (signal_pending(current))
goto do_interrupted;
- if (sk_under_memory_pressure(sk))
- sk_mem_reclaim(sk);
if ((int)msg_len <= sctp_wspace(asoc) &&
sk_wmem_schedule(sk, msg_len))
break;
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 6b13f737ebf2..bb22b71df7a3 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -979,8 +979,6 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
sctp_intl_start_pd(ulpq, gfp);
-
- sk_mem_reclaim(asoc->base.sk);
}
static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 407fed46931b..0a8510a0c5e6 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1100,12 +1100,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
else if (retval == 1)
sctp_ulpq_reasm_drain(ulpq);
}
-
- sk_mem_reclaim(asoc->base.sk);
}
-
-
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/