diff options
author | Eric Dumazet <edumazet@google.com> | 2022-05-15 21:24:56 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-05-16 11:33:59 +0100 |
commit | 909876500251b3b48480a840bbf9053588254eee (patch) | |
tree | d5be1a42e32e55e228aeee48bc252b5fa2db4d8b /net/core/dev.c | |
parent | 39564c3fdc6684c6726b63e131d2a9f3809811cb (diff) |
net: call skb_defer_free_flush() before each napi_poll()
skb_defer_free_flush() can consume cpu cycles,
it seems better to call it in the inner loop:
- Potentially frees page/skb that will be reallocated while hot.
- Account for the cpu cycles in the @time_limit determination.
- Keep softnet_data.defer_count small to reduce chances for
skb_attempt_defer_free() to send an IPI.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 6359f8953269..04fd056f7f74 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6655,6 +6655,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) for (;;) { struct napi_struct *n; + skb_defer_free_flush(sd); + if (list_empty(&list)) { if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) goto end; @@ -6684,8 +6686,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) __raise_softirq_irqoff(NET_RX_SOFTIRQ); net_rps_action_and_irq_enable(sd); -end: - skb_defer_free_flush(sd); +end:; } struct netdev_adjacent { |