diff options
author | Eric Dumazet <edumazet@google.com> | 2023-04-21 09:43:57 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-04-23 13:35:07 +0100 |
commit | 87eff2ec57b6d68d294013d8dd21e839a1175e3a (patch) | |
tree | ced867d044fe2843db10784167cbba91399f5131 | |
parent | a1aaee7f8f79d1b0595e24f8c3caed24630d6cb6 (diff) |
net: optimize napi_threaded_poll() vs RPS/RFS
We use napi_threaded_poll() in order to reduce our softirq dependency.
We can add a followup of 821eba962d95 ("net: optimize napi_schedule_rps()")
to further remove the need of firing NET_RX_SOFTIRQ whenever
RPS/RFS are used.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 3 | ||||
-rw-r--r-- | net/core/dev.c | 12 |
2 files changed, 13 insertions, 2 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a6a3e9457d6c..08fbd4622ccf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3194,7 +3194,10 @@ struct softnet_data { #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; #endif + bool in_net_rx_action; + bool in_napi_threaded_poll; + #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit __rcu *flow_limit; #endif diff --git a/net/core/dev.c b/net/core/dev.c index 7d9ec23f97c6..735096d42c1d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4603,10 +4603,10 @@ static void napi_schedule_rps(struct softnet_data *sd) sd->rps_ipi_next = mysd->rps_ipi_list; mysd->rps_ipi_list = sd; - /* If not called from net_rx_action() + /* If not called from net_rx_action() or napi_threaded_poll() * we have to raise NET_RX_SOFTIRQ. */ - if (!mysd->in_net_rx_action) + if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll) __raise_softirq_irqoff(NET_RX_SOFTIRQ); return; } @@ -6631,11 +6631,19 @@ static int napi_threaded_poll(void *data) local_bh_disable(); sd = this_cpu_ptr(&softnet_data); + sd->in_napi_threaded_poll = true; have = netpoll_poll_lock(napi); __napi_poll(napi, &repoll); netpoll_poll_unlock(have); + sd->in_napi_threaded_poll = false; + barrier(); + + if (sd_has_rps_ipi_waiting(sd)) { + local_irq_disable(); + net_rps_action_and_irq_enable(sd); + } skb_defer_free_flush(sd); local_bh_enable(); |