diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-08 23:13:53 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-08 23:13:53 -0700 |
commit | c773e847ea8f6812804e40f52399c6921a00eab1 (patch) | |
tree | 952e0e262cc0b0f2136bc2a62938ae1d186f896a /include/linux/netdevice.h | |
parent | eb6aafe3f843cb0e939546c03540a3b4911b6964 (diff) |
netdev: Move _xmit_lock and xmit_lock_owner into netdev_queue.
Accesses are mostly structured such that when there are multiple TX
queues the code transformations will be a little bit simpler.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 62 |
1 files changed, 39 insertions, 23 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 28aa8e77cee9..c8d5f128858d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -453,6 +453,8 @@ struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct sk_buff *gso_skb; + spinlock_t _xmit_lock; + int xmit_lock_owner; struct Qdisc *qdisc_sleeping; struct list_head qdisc_list; struct netdev_queue *next_sched; @@ -639,12 +641,6 @@ struct net_device /* * One part is mostly used on xmit path (device) */ - /* hard_start_xmit synchronizer */ - spinlock_t _xmit_lock ____cacheline_aligned_in_smp; - /* cpu id of processor entered to hard_start_xmit or -1, - if nobody entered there. - */ - int xmit_lock_owner; void *priv; /* pointer to private data */ int (*hard_start_xmit) (struct sk_buff *skb, struct net_device *dev); @@ -1402,52 +1398,72 @@ static inline void netif_rx_complete(struct net_device *dev, * * Get network device transmit lock */ -static inline void __netif_tx_lock(struct net_device *dev, int cpu) +static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { - spin_lock(&dev->_xmit_lock); - dev->xmit_lock_owner = cpu; + spin_lock(&txq->_xmit_lock); + txq->xmit_lock_owner = cpu; } static inline void netif_tx_lock(struct net_device *dev) { - __netif_tx_lock(dev, smp_processor_id()); + __netif_tx_lock(&dev->tx_queue, smp_processor_id()); +} + +static inline void __netif_tx_lock_bh(struct netdev_queue *txq) +{ + spin_lock_bh(&txq->_xmit_lock); + txq->xmit_lock_owner = smp_processor_id(); } static inline void netif_tx_lock_bh(struct net_device *dev) { - spin_lock_bh(&dev->_xmit_lock); - dev->xmit_lock_owner = smp_processor_id(); + __netif_tx_lock_bh(&dev->tx_queue); } -static inline int netif_tx_trylock(struct net_device *dev) +static inline int __netif_tx_trylock(struct netdev_queue *txq) { - int ok = spin_trylock(&dev->_xmit_lock); + int ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) - dev->xmit_lock_owner = smp_processor_id(); + txq->xmit_lock_owner = smp_processor_id(); return ok; } +static inline int netif_tx_trylock(struct net_device *dev) +{ + return __netif_tx_trylock(&dev->tx_queue); +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + static inline void netif_tx_unlock(struct net_device *dev) { - dev->xmit_lock_owner = -1; - spin_unlock(&dev->_xmit_lock); + __netif_tx_unlock(&dev->tx_queue); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); } static inline void netif_tx_unlock_bh(struct net_device *dev) { - dev->xmit_lock_owner = -1; - spin_unlock_bh(&dev->_xmit_lock); + __netif_tx_unlock_bh(&dev->tx_queue); } -#define HARD_TX_LOCK(dev, cpu) { \ +#define HARD_TX_LOCK(dev, txq, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - __netif_tx_lock(dev, cpu); \ + __netif_tx_lock(txq, cpu); \ } \ } -#define HARD_TX_UNLOCK(dev) { \ +#define HARD_TX_UNLOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - netif_tx_unlock(dev); \ + __netif_tx_unlock(txq); \ } \ } |