diff options
author | Alexander Aring <alex.aring@gmail.com> | 2014-10-26 09:37:02 +0100 |
---|---|---|
committer | Marcel Holtmann <marcel@holtmann.org> | 2014-10-26 17:23:58 +0100 |
commit | fe24371d6645b766c59ec664c59d0a9c310ad455 (patch) | |
tree | bc7f7553e60e9dcaee1827740a500b5227ba81cd /net/mac802154/tx.c | |
parent | 50c6fb9965907732b4f5c45bd3bacf4b4f3463b9 (diff) |
mac802154: tx: remove kmalloc in xmit hotpath
This patch removes the kmalloc allocation for workqueue data. This patch
replaces the kmalloc and uses the control block of skb. The control block
has enough space and isn't use by any other layer in this case.
Signed-off-by: Alexander Aring <alex.aring@gmail.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Diffstat (limited to 'net/mac802154/tx.c')
-rw-r--r-- | net/mac802154/tx.c | 56 |
1 files changed, 27 insertions, 29 deletions
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index 2eb06c2cf96d..513e760a8557 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -30,7 +30,7 @@ /* IEEE 802.15.4 transceivers can sleep during the xmit session, so process * packets through the workqueue. */ -struct xmit_work { +struct wpan_xmit_cb { struct sk_buff *skb; struct work_struct work; struct ieee802154_local *local; @@ -38,50 +38,54 @@ struct xmit_work { u8 page; }; +static inline struct wpan_xmit_cb *wpan_xmit_cb(const struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct wpan_xmit_cb)); + + return (struct wpan_xmit_cb *)skb->cb; +} + static void mac802154_xmit_worker(struct work_struct *work) { - struct xmit_work *xw = container_of(work, struct xmit_work, work); + struct wpan_xmit_cb *cb = container_of(work, struct wpan_xmit_cb, work); struct ieee802154_sub_if_data *sdata; int res; - mutex_lock(&xw->local->phy->pib_lock); - if (xw->local->phy->current_channel != xw->chan || - xw->local->phy->current_page != xw->page) { - res = xw->local->ops->set_channel(&xw->local->hw, - xw->page, - xw->chan); + mutex_lock(&cb->local->phy->pib_lock); + if (cb->local->phy->current_channel != cb->chan || + cb->local->phy->current_page != cb->page) { + res = cb->local->ops->set_channel(&cb->local->hw, cb->page, + cb->chan); if (res) { pr_debug("set_channel failed\n"); goto out; } - xw->local->phy->current_channel = xw->chan; - xw->local->phy->current_page = xw->page; + cb->local->phy->current_channel = cb->chan; + cb->local->phy->current_page = cb->page; } - res = xw->local->ops->xmit(&xw->local->hw, xw->skb); + res = cb->local->ops->xmit(&cb->local->hw, cb->skb); if (res) pr_debug("transmission failed\n"); out: - mutex_unlock(&xw->local->phy->pib_lock); + mutex_unlock(&cb->local->phy->pib_lock); /* Restart the netif queue on each sub_if_data object. */ rcu_read_lock(); - list_for_each_entry_rcu(sdata, &xw->local->interfaces, list) + list_for_each_entry_rcu(sdata, &cb->local->interfaces, list) netif_wake_queue(sdata->dev); rcu_read_unlock(); - dev_kfree_skb(xw->skb); - - kfree(xw); + dev_kfree_skb(cb->skb); } static netdev_tx_t mac802154_tx(struct ieee802154_local *local, struct sk_buff *skb, u8 page, u8 chan) { - struct xmit_work *work; struct ieee802154_sub_if_data *sdata; + struct wpan_xmit_cb *cb = wpan_xmit_cb(skb); if (!(local->phy->channels_supported[page] & (1 << chan))) { WARN_ON(1); @@ -101,25 +105,19 @@ static netdev_tx_t mac802154_tx(struct ieee802154_local *local, if (skb_cow_head(skb, local->hw.extra_tx_headroom)) goto err_tx; - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - kfree_skb(skb); - return NETDEV_TX_BUSY; - } - /* Stop the netif queue on each sub_if_data object. */ rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) netif_stop_queue(sdata->dev); rcu_read_unlock(); - INIT_WORK(&work->work, mac802154_xmit_worker); - work->skb = skb; - work->local = local; - work->page = page; - work->chan = chan; + INIT_WORK(&cb->work, mac802154_xmit_worker); + cb->skb = skb; + cb->local = local; + cb->page = page; + cb->chan = chan; - queue_work(local->workqueue, &work->work); + queue_work(local->workqueue, &cb->work); return NETDEV_TX_OK; |