diff options
author | Eric Dumazet <edumazet@google.com> | 2021-11-16 19:29:23 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-11-17 14:56:16 +0000 |
commit | dab8fe320726b38a6b1dc6a7ca6e386c5f7779e8 (patch) | |
tree | e24c4bd0ebd9f84847ff9e3d6a47a3655dc6151c /net/sched | |
parent | 5337824f4dc4bb26f38fbbba4ffb425a92803f15 (diff) |
net: do not inline netif_tx_lock()/netif_tx_unlock()
These are not fast path, there is no point in inlining them.
Also provide netif_freeze_queues()/netif_unfreeze_queues()
so that we can use them from dev_watchdog() in the following patch.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_generic.c | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 02c46041f76e..389e0d8fc68d 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -445,6 +445,57 @@ unsigned long dev_trans_start(struct net_device *dev) } EXPORT_SYMBOL(dev_trans_start); +static void netif_freeze_queues(struct net_device *dev) +{ + unsigned int i; + int cpu; + + cpu = smp_processor_id(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ + __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); + } +} + +void netif_tx_lock(struct net_device *dev) +{ + spin_lock(&dev->tx_global_lock); + netif_freeze_queues(dev); +} +EXPORT_SYMBOL(netif_tx_lock); + +static void netif_unfreeze_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + netif_schedule_queue(txq); + } +} + +void netif_tx_unlock(struct net_device *dev) +{ + netif_unfreeze_queues(dev); + spin_unlock(&dev->tx_global_lock); +} +EXPORT_SYMBOL(netif_tx_unlock); + static void dev_watchdog(struct timer_list *t) { struct net_device *dev = from_timer(dev, t, watchdog_timer); |