summaryrefslogtreecommitdiff
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2009-03-28 14:57:13 +1100
committerJames Morris <jmorris@namei.org>2009-03-28 14:57:13 +1100
commitbb798169d1bb860b07192cf9c75937fadc8610b4 (patch)
treefa67f14406a1e79897e6f29e59fed7c02ec31c30 /net/mac80211/tx.c
parenta106cbfd1f3703402fc2d95d97e7a054102250f0 (diff)
parent5d80f8e5a9dc9c9a94d4aeaa567e219a808b8a4a (diff)
Merge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into next
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c677
1 files changed, 421 insertions, 256 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 37e3d5ef7e3f..3fb04a86444d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -34,7 +34,7 @@
#define IEEE80211_TX_OK 0
#define IEEE80211_TX_AGAIN 1
-#define IEEE80211_TX_FRAG_AGAIN 2
+#define IEEE80211_TX_PENDING 2
/* misc utils */
@@ -192,7 +192,19 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
if (unlikely(tx->local->sw_scanning) &&
- !ieee80211_is_probe_req(hdr->frame_control))
+ !ieee80211_is_probe_req(hdr->frame_control) &&
+ !ieee80211_is_nullfunc(hdr->frame_control))
+ /*
+ * When software scanning only nullfunc frames (to notify
+ * the sleep state to the AP) and probe requests (for the
+ * active scan) are allowed, all other frames should not be
+ * sent and we should not get here, but if we do
+ * nonetheless, drop them to avoid sending them
+ * off-channel. See the link below and
+ * ieee80211_start_scan() for more.
+ *
+ * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
+ */
return TX_DROP;
if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
@@ -330,6 +342,22 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
+static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
+ struct sk_buff *skb)
+{
+ if (!ieee80211_is_mgmt(fc))
+ return 0;
+
+ if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP))
+ return 0;
+
+ if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
+ skb->data))
+ return 0;
+
+ return 1;
+}
+
static ieee80211_tx_result
ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
{
@@ -409,11 +437,17 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
tx->key = NULL;
else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
tx->key = key;
+ else if (ieee80211_is_mgmt(hdr->frame_control) &&
+ (key = rcu_dereference(tx->sdata->default_mgmt_key)))
+ tx->key = key;
else if ((key = rcu_dereference(tx->sdata->default_key)))
tx->key = key;
else if (tx->sdata->drop_unencrypted &&
(tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED)) {
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
+ (!ieee80211_is_robust_mgmt_frame(hdr) ||
+ (ieee80211_is_action(hdr->frame_control) &&
+ tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) {
I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
return TX_DROP;
} else
@@ -428,10 +462,19 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
if (ieee80211_is_auth(hdr->frame_control))
break;
case ALG_TKIP:
- case ALG_CCMP:
if (!ieee80211_is_data_present(hdr->frame_control))
tx->key = NULL;
break;
+ case ALG_CCMP:
+ if (!ieee80211_is_data_present(hdr->frame_control) &&
+ !ieee80211_use_mfp(hdr->frame_control, tx->sta,
+ tx->skb))
+ tx->key = NULL;
+ break;
+ case ALG_AES_CMAC:
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ tx->key = NULL;
+ break;
}
}
@@ -658,17 +701,62 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
+static int ieee80211_fragment(struct ieee80211_local *local,
+ struct sk_buff *skb, int hdrlen,
+ int frag_threshold)
+{
+ struct sk_buff *tail = skb, *tmp;
+ int per_fragm = frag_threshold - hdrlen - FCS_LEN;
+ int pos = hdrlen + per_fragm;
+ int rem = skb->len - hdrlen - per_fragm;
+
+ if (WARN_ON(rem < 0))
+ return -EINVAL;
+
+ while (rem) {
+ int fraglen = per_fragm;
+
+ if (fraglen > rem)
+ fraglen = rem;
+ rem -= fraglen;
+ tmp = dev_alloc_skb(local->tx_headroom +
+ frag_threshold +
+ IEEE80211_ENCRYPT_HEADROOM +
+ IEEE80211_ENCRYPT_TAILROOM);
+ if (!tmp)
+ return -ENOMEM;
+ tail->next = tmp;
+ tail = tmp;
+ skb_reserve(tmp, local->tx_headroom +
+ IEEE80211_ENCRYPT_HEADROOM);
+ /* copy control information */
+ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
+ skb_copy_queue_mapping(tmp, skb);
+ tmp->priority = skb->priority;
+ tmp->do_not_encrypt = skb->do_not_encrypt;
+ tmp->dev = skb->dev;
+ tmp->iif = skb->iif;
+
+ /* copy header and data */
+ memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
+ memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
+
+ pos += fraglen;
+ }
+
+ skb->len = hdrlen + per_fragm;
+ return 0;
+}
+
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
- size_t hdrlen, per_fragm, num_fragm, payload_len, left;
- struct sk_buff **frags, *first, *frag;
- int i;
- u16 seq;
- u8 *pos;
+ struct sk_buff *skb = tx->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
int frag_threshold = tx->local->fragmentation_threshold;
+ int hdrlen;
+ int fragnum;
if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
return TX_CONTINUE;
@@ -681,58 +769,35 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
return TX_DROP;
- first = tx->skb;
-
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- payload_len = first->len - hdrlen;
- per_fragm = frag_threshold - hdrlen - FCS_LEN;
- num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
-
- frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
- if (!frags)
- goto fail;
-
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
- seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
- pos = first->data + hdrlen + per_fragm;
- left = payload_len - per_fragm;
- for (i = 0; i < num_fragm - 1; i++) {
- struct ieee80211_hdr *fhdr;
- size_t copylen;
-
- if (left <= 0)
- goto fail;
- /* reserve enough extra head and tail room for possible
- * encryption */
- frag = frags[i] =
- dev_alloc_skb(tx->local->tx_headroom +
- frag_threshold +
- IEEE80211_ENCRYPT_HEADROOM +
- IEEE80211_ENCRYPT_TAILROOM);
- if (!frag)
- goto fail;
-
- /* Make sure that all fragments use the same priority so
- * that they end up using the same TX queue */
- frag->priority = first->priority;
+ /* internal error, why is TX_FRAGMENTED set? */
+ if (WARN_ON(skb->len <= frag_threshold))
+ return TX_DROP;
- skb_reserve(frag, tx->local->tx_headroom +
- IEEE80211_ENCRYPT_HEADROOM);
+ /*
+ * Now fragment the frame. This will allocate all the fragments and
+ * chain them (using skb as the first fragment) to skb->next.
+ * During transmission, we will remove the successfully transmitted
+ * fragments from this list. When the low-level driver rejects one
+ * of the fragments then we will simply pretend to accept the skb
+ * but store it away as pending.
+ */
+ if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
+ return TX_DROP;
- /* copy TX information */
- info = IEEE80211_SKB_CB(frag);
- memcpy(info, first->cb, sizeof(frag->cb));
+ /* update duration/seq/flags of fragments */
+ fragnum = 0;
+ do {
+ int next_len;
+ const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
- /* copy/fill in 802.11 header */
- fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen);
- memcpy(fhdr, first->data, hdrlen);
- fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
+ hdr = (void *)skb->data;
+ info = IEEE80211_SKB_CB(skb);
- if (i == num_fragm - 2) {
- /* clear MOREFRAGS bit for the last fragment */
- fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS);
- } else {
+ if (skb->next) {
+ hdr->frame_control |= morefrags;
+ next_len = skb->next->len;
/*
* No multi-rate retries for fragmented frames, that
* would completely throw off the NAV at other STAs.
@@ -743,37 +808,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
info->control.rates[4].idx = -1;
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+ } else {
+ hdr->frame_control &= ~morefrags;
+ next_len = 0;
}
-
- /* copy data */
- copylen = left > per_fragm ? per_fragm : left;
- memcpy(skb_put(frag, copylen), pos, copylen);
-
- skb_copy_queue_mapping(frag, first);
-
- frag->do_not_encrypt = first->do_not_encrypt;
- frag->dev = first->dev;
- frag->iif = first->iif;
-
- pos += copylen;
- left -= copylen;
- }
- skb_trim(first, hdrlen + per_fragm);
-
- tx->num_extra_frag = num_fragm - 1;
- tx->extra_frag = frags;
+ hdr->duration_id = ieee80211_duration(tx, 0, next_len);
+ hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
+ fragnum++;
+ } while ((skb = skb->next));
return TX_CONTINUE;
-
- fail:
- if (frags) {
- for (i = 0; i < num_fragm - 1; i++)
- if (frags[i])
- dev_kfree_skb(frags[i]);
- kfree(frags);
- }
- I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
- return TX_DROP;
}
static ieee80211_tx_result debug_noinline
@@ -789,6 +833,8 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
return ieee80211_crypto_tkip_encrypt(tx);
case ALG_CCMP:
return ieee80211_crypto_ccmp_encrypt(tx);
+ case ALG_AES_CMAC:
+ return ieee80211_crypto_aes_cmac_encrypt(tx);
}
/* not reached */
@@ -799,27 +845,19 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
- int next_len, i;
- int group_addr = is_multicast_ether_addr(hdr->addr1);
-
- if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) {
- hdr->duration_id = ieee80211_duration(tx, group_addr, 0);
- return TX_CONTINUE;
- }
-
- hdr->duration_id = ieee80211_duration(tx, group_addr,
- tx->extra_frag[0]->len);
+ struct sk_buff *skb = tx->skb;
+ struct ieee80211_hdr *hdr;
+ int next_len;
+ bool group_addr;
- for (i = 0; i < tx->num_extra_frag; i++) {
- if (i + 1 < tx->num_extra_frag)
- next_len = tx->extra_frag[i + 1]->len;
- else
- next_len = 0;
+ do {
+ hdr = (void *) skb->data;
+ next_len = skb->next ? skb->next->len : 0;
+ group_addr = is_multicast_ether_addr(hdr->addr1);
- hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data;
- hdr->duration_id = ieee80211_duration(tx, 0, next_len);
- }
+ hdr->duration_id =
+ ieee80211_duration(tx, group_addr, next_len);
+ } while ((skb = skb->next));
return TX_CONTINUE;
}
@@ -827,24 +865,20 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
{
- int i;
+ struct sk_buff *skb = tx->skb;
if (!tx->sta)
return TX_CONTINUE;
tx->sta->tx_packets++;
- tx->sta->tx_fragments++;
- tx->sta->tx_bytes += tx->skb->len;
- if (tx->extra_frag) {
- tx->sta->tx_fragments += tx->num_extra_frag;
- for (i = 0; i < tx->num_extra_frag; i++)
- tx->sta->tx_bytes += tx->extra_frag[i]->len;
- }
+ do {
+ tx->sta->tx_fragments++;
+ tx->sta->tx_bytes += skb->len;
+ } while ((skb = skb->next));
return TX_CONTINUE;
}
-
/* actual transmit path */
/*
@@ -950,9 +984,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
struct ieee80211_hdr *hdr;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
int hdrlen, tid;
u8 *qc, *state;
+ bool queued = false;
memset(tx, 0, sizeof(*tx));
tx->skb = skb;
@@ -979,17 +1013,53 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
*/
}
+ /*
+ * If this flag is set to true anywhere, and we get here,
+ * we are doing the needed processing, so remove the flag
+ * now.
+ */
+ info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+
hdr = (struct ieee80211_hdr *) skb->data;
tx->sta = sta_info_get(local, hdr->addr1);
- if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) {
+ if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
+ (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
+ unsigned long flags;
+ struct tid_ampdu_tx *tid_tx;
+
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ spin_lock_irqsave(&tx->sta->lock, flags);
+ /*
+ * XXX: This spinlock could be fairly expensive, but see the
+ * comment in agg-tx.c:ieee80211_agg_tx_operational().
+ * One way to solve this would be to do something RCU-like
+ * for managing the tid_tx struct and using atomic bitops
+ * for the actual state -- by introducing an actual
+ * 'operational' bit that would be possible. It would
+ * require changing ieee80211_agg_tx_operational() to
+ * set that bit, and changing the way tid_tx is managed
+ * everywhere, including races between that bit and
+ * tid_tx going away (tid_tx being added can be easily
+ * committed to memory before the 'operational' bit).
+ */
+ tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
- if (*state == HT_AGG_STATE_OPERATIONAL)
+ if (*state == HT_AGG_STATE_OPERATIONAL) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
+ } else if (*state != HT_AGG_STATE_IDLE) {
+ /* in progress */
+ queued = true;
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ __skb_queue_tail(&tid_tx->pending, skb);
+ }
+ spin_unlock_irqrestore(&tx->sta->lock, flags);
+
+ if (unlikely(queued))
+ return TX_QUEUED;
}
if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1040,51 +1110,55 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local,
}
if (unlikely(!dev))
return -ENODEV;
- /* initialises tx with control */
+ /*
+ * initialises tx with control
+ *
+ * return value is safe to ignore here because this function
+ * can only be invoked for multicast frames
+ *
+ * XXX: clean up
+ */
__ieee80211_tx_prepare(tx, skb, dev);
dev_put(dev);
return 0;
}
-static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
- struct ieee80211_tx_data *tx)
+static int __ieee80211_tx(struct ieee80211_local *local,
+ struct sk_buff **skbp,
+ struct sta_info *sta)
{
+ struct sk_buff *skb = *skbp, *next;
struct ieee80211_tx_info *info;
- int ret, i;
+ int ret, len;
+ bool fragm = false;
- if (skb) {
- if (netif_subqueue_stopped(local->mdev, skb))
- return IEEE80211_TX_AGAIN;
- info = IEEE80211_SKB_CB(skb);
+ local->mdev->trans_start = jiffies;
- ret = local->ops->tx(local_to_hw(local), skb);
- if (ret)
- return IEEE80211_TX_AGAIN;
- local->mdev->trans_start = jiffies;
- ieee80211_led_tx(local, 1);
- }
- if (tx->extra_frag) {
- for (i = 0; i < tx->num_extra_frag; i++) {
- if (!tx->extra_frag[i])
- continue;
- info = IEEE80211_SKB_CB(tx->extra_frag[i]);
+ while (skb) {
+ if (ieee80211_queue_stopped(&local->hw,
+ skb_get_queue_mapping(skb)))
+ return IEEE80211_TX_PENDING;
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (fragm)
info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
IEEE80211_TX_CTL_FIRST_FRAGMENT);
- if (netif_subqueue_stopped(local->mdev,
- tx->extra_frag[i]))
- return IEEE80211_TX_FRAG_AGAIN;
-
- ret = local->ops->tx(local_to_hw(local),
- tx->extra_frag[i]);
- if (ret)
- return IEEE80211_TX_FRAG_AGAIN;
- local->mdev->trans_start = jiffies;
- ieee80211_led_tx(local, 1);
- tx->extra_frag[i] = NULL;
+
+ next = skb->next;
+ len = skb->len;
+ ret = local->ops->tx(local_to_hw(local), skb);
+ if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
+ dev_kfree_skb(skb);
+ ret = NETDEV_TX_OK;
}
- kfree(tx->extra_frag);
- tx->extra_frag = NULL;
+ if (ret != NETDEV_TX_OK)
+ return IEEE80211_TX_AGAIN;
+ *skbp = skb = next;
+ ieee80211_led_tx(local, 1);
+ fragm = true;
}
+
return IEEE80211_TX_OK;
}
@@ -1096,7 +1170,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb = tx->skb;
ieee80211_tx_result res = TX_DROP;
- int i;
#define CALL_TXH(txh) \
res = txh(tx); \
@@ -1120,11 +1193,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
txh_done:
if (unlikely(res == TX_DROP)) {
I802_DEBUG_INC(tx->local->tx_handlers_drop);
- dev_kfree_skb(skb);
- for (i = 0; i < tx->num_extra_frag; i++)
- if (tx->extra_frag[i])
- dev_kfree_skb(tx->extra_frag[i]);
- kfree(tx->extra_frag);
+ while (skb) {
+ struct sk_buff *next;
+
+ next = skb->next;
+ dev_kfree_skb(skb);
+ skb = next;
+ }
return -1;
} else if (unlikely(res == TX_QUEUED)) {
I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1134,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
return 0;
}
-static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
+static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
+ bool txpending)
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct sta_info *sta;
struct ieee80211_tx_data tx;
ieee80211_tx_result res_prepare;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int ret, i;
+ struct sk_buff *next;
+ unsigned long flags;
+ int ret, retries;
u16 queue;
queue = skb_get_queue_mapping(skb);
- WARN_ON(test_bit(queue, local->queues_pending));
+ WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue]));
if (unlikely(skb->len < 10)) {
dev_kfree_skb(skb);
- return 0;
+ return;
}
rcu_read_lock();
@@ -1158,10 +1236,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
/* initialises tx */
res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
- if (res_prepare == TX_DROP) {
+ if (unlikely(res_prepare == TX_DROP)) {
dev_kfree_skb(skb);
rcu_read_unlock();
- return 0;
+ return;
+ } else if (unlikely(res_prepare == TX_QUEUED)) {
+ rcu_read_unlock();
+ return;
}
sta = tx.sta;
@@ -1171,11 +1252,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
if (invoke_tx_handlers(&tx))
goto out;
-retry:
- ret = __ieee80211_tx(local, skb, &tx);
- if (ret) {
- struct ieee80211_tx_stored_packet *store;
-
+ retries = 0;
+ retry:
+ ret = __ieee80211_tx(local, &tx.skb, tx.sta);
+ switch (ret) {
+ case IEEE80211_TX_OK:
+ break;
+ case IEEE80211_TX_AGAIN:
/*
* Since there are no fragmented frames on A-MPDU
* queues, there's no reason for a driver to reject
@@ -1183,46 +1266,57 @@ retry:
*/
if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
goto drop;
+ /* fall through */
+ case IEEE80211_TX_PENDING:
+ skb = tx.skb;
+
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+
+ if (__netif_subqueue_stopped(local->mdev, queue)) {
+ do {
+ next = skb->next;
+ skb->next = NULL;
+ if (unlikely(txpending))
+ skb_queue_head(&local->pending[queue],
+ skb);
+ else
+ skb_queue_tail(&local->pending[queue],
+ skb);
+ } while ((skb = next));
- store = &local->pending_packet[queue];
+ /*
+ * Make sure nobody will enable the queue on us
+ * (without going through the tasklet) nor disable the
+ * netdev queue underneath the pending handling code.
+ */
+ __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
+ &local->queue_stop_reasons[queue]);
- if (ret == IEEE80211_TX_FRAG_AGAIN)
- skb = NULL;
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+ flags);
+ } else {
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+ flags);
- set_bit(queue, local->queues_pending);
- smp_mb();
- /*
- * When the driver gets out of buffers during sending of
- * fragments and calls ieee80211_stop_queue, the netif
- * subqueue is stopped. There is, however, a small window
- * in which the PENDING bit is not yet set. If a buffer
- * gets available in that window (i.e. driver calls
- * ieee80211_wake_queue), we would end up with ieee80211_tx
- * called with the PENDING bit still set. Prevent this by
- * continuing transmitting here when that situation is
- * possible to have happened.
- */
- if (!__netif_subqueue_stopped(local->mdev, queue)) {
- clear_bit(queue, local->queues_pending);
+ retries++;
+ if (WARN(retries > 10, "tx refused but queue active"))
+ goto drop;
goto retry;
}
- store->skb = skb;
- store->extra_frag = tx.extra_frag;
- store->num_extra_frag = tx.num_extra_frag;
}
out:
rcu_read_unlock();
- return 0;
+ return;
drop:
- if (skb)
- dev_kfree_skb(skb);
- for (i = 0; i < tx.num_extra_frag; i++)
- if (tx.extra_frag[i])
- dev_kfree_skb(tx.extra_frag[i]);
- kfree(tx.extra_frag);
rcu_read_unlock();
- return 0;
+
+ skb = tx.skb;
+ while (skb) {
+ next = skb->next;
+ dev_kfree_skb(skb);
+ skb = next;
+ }
}
/* device xmit handlers */
@@ -1281,7 +1375,6 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
FOUND_SDATA,
UNKNOWN_ADDRESS,
} monitor_iface = NOT_MONITOR;
- int ret;
if (skb->iif)
odev = dev_get_by_index(&init_net, skb->iif);
@@ -1295,7 +1388,20 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
"originating device\n", dev->name);
#endif
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
+ }
+
+ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ local->hw.conf.dynamic_ps_timeout > 0) {
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ ieee80211_stop_queues_by_reason(&local->hw,
+ IEEE80211_QUEUE_STOP_REASON_PS);
+ queue_work(local->hw.workqueue,
+ &local->dynamic_ps_disable_work);
+ }
+
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
memset(info, 0, sizeof(*info));
@@ -1311,7 +1417,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
else
if (mesh_nexthop_lookup(skb, osdata)) {
dev_put(odev);
- return 0;
+ return NETDEV_TX_OK;
}
if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
@@ -1373,7 +1479,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
dev_kfree_skb(skb);
dev_put(odev);
- return 0;
+ return NETDEV_TX_OK;
}
if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1382,20 +1488,42 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
u.ap);
if (likely(monitor_iface != UNKNOWN_ADDRESS))
info->control.vif = &osdata->vif;
- ret = ieee80211_tx(odev, skb);
+
+ ieee80211_tx(odev, skb, false);
dev_put(odev);
- return ret;
+ return NETDEV_TX_OK;
}
int ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_channel *chan = local->hw.conf.channel;
struct ieee80211_radiotap_header *prthdr =
(struct ieee80211_radiotap_header *)skb->data;
u16 len_rthdr;
+ /*
+ * Frame injection is not allowed if beaconing is not allowed
+ * or if we need radar detection. Beaconing is usually not allowed when
+ * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
+ * Passive scan is also used in world regulatory domains where
+ * your country is not known and as such it should be treated as
+ * NO TX unless the channel is explicitly allowed in which case
+ * your current regulatory domain would not have the passive scan
+ * flag.
+ *
+ * Since AP mode uses monitor interfaces to inject/TX management
+ * frames we can make AP mode the exception to this rule once it
+ * supports radar detection as its implementation can deal with
+ * radar detection by itself. We can do that later by adding a
+ * monitor flag interfaces used for AP support.
+ */
+ if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_PASSIVE_SCAN)))
+ goto fail;
+
/* check for not even having the fixed radiotap header part */
if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
goto fail; /* too short to be possibly valid */
@@ -1479,19 +1607,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
goto fail;
}
- if (!(local->hw.flags & IEEE80211_HW_NO_STACK_DYNAMIC_PS) &&
- local->dynamic_ps_timeout > 0) {
- if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- ieee80211_stop_queues_by_reason(&local->hw,
- IEEE80211_QUEUE_STOP_REASON_PS);
- queue_work(local->hw.workqueue,
- &local->dynamic_ps_disable_work);
- }
-
- mod_timer(&local->dynamic_ps_timer, jiffies +
- msecs_to_jiffies(local->dynamic_ps_timeout));
- }
-
nh_pos = skb_network_header(skb) - skb->data;
h_pos = skb_transport_header(skb) - skb->data;
@@ -1572,7 +1687,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
case NL80211_IFTYPE_STATION:
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
- memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN);
+ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
hdrlen = 24;
@@ -1581,7 +1696,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
/* DA SA BSSID */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
- memcpy(hdr.addr3, sdata->u.sta.bssid, ETH_ALEN);
+ memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
hdrlen = 24;
break;
default:
@@ -1603,8 +1718,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
}
/* receiver and we are QoS enabled, use a QoS type frame */
- if (sta_flags & WLAN_STA_WME &&
- ieee80211_num_regular_queues(&local->hw) >= 4) {
+ if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
hdrlen += 2;
}
@@ -1736,19 +1850,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
*/
void ieee80211_clear_tx_pending(struct ieee80211_local *local)
{
- int i, j;
- struct ieee80211_tx_stored_packet *store;
+ int i;
- for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
- if (!test_bit(i, local->queues_pending))
- continue;
- store = &local->pending_packet[i];
- kfree_skb(store->skb);
- for (j = 0; j < store->num_extra_frag; j++)
- kfree_skb(store->extra_frag[j]);
- kfree(store->extra_frag);
- clear_bit(i, local->queues_pending);
+ for (i = 0; i < local->hw.queues; i++)
+ skb_queue_purge(&local->pending[i]);
+}
+
+static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sub_if_data *sdata;
+ struct sta_info *sta;
+ struct ieee80211_hdr *hdr;
+ struct net_device *dev;
+ int ret;
+ bool result = true;
+
+ /* does interface still exist? */
+ dev = dev_get_by_index(&init_net, skb->iif);
+ if (!dev) {
+ dev_kfree_skb(skb);
+ return true;
+ }
+
+ /* validate info->control.vif against skb->iif */
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+
+ if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) {
+ dev_kfree_skb(skb);
+ result = true;
+ goto out;
}
+
+ if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
+ ieee80211_tx(dev, skb, true);
+ } else {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ sta = sta_info_get(local, hdr->addr1);
+
+ ret = __ieee80211_tx(local, &skb, sta);
+ if (ret != IEEE80211_TX_OK)
+ result = false;
+ }
+
+ out:
+ dev_put(dev);
+
+ return result;
}
/*
@@ -1759,40 +1912,53 @@ void ieee80211_tx_pending(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *)data;
struct net_device *dev = local->mdev;
- struct ieee80211_tx_stored_packet *store;
- struct ieee80211_tx_data tx;
- int i, ret;
+ unsigned long flags;
+ int i;
+ bool next;
+ rcu_read_lock();
netif_tx_lock_bh(dev);
- for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
- /* Check that this queue is ok */
- if (__netif_subqueue_stopped(local->mdev, i) &&
- !test_bit(i, local->queues_pending_run))
- continue;
- if (!test_bit(i, local->queues_pending)) {
- clear_bit(i, local->queues_pending_run);
- ieee80211_wake_queue(&local->hw, i);
+ for (i = 0; i < local->hw.queues; i++) {
+ /*
+ * If queue is stopped by something other than due to pending
+ * frames, or we have no pending frames, proceed to next queue.
+ */
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ next = false;
+ if (local->queue_stop_reasons[i] !=
+ BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) ||
+ skb_queue_empty(&local->pending[i]))
+ next = true;
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+ if (next)
continue;
- }
- clear_bit(i, local->queues_pending_run);
+ /*
+ * start the queue now to allow processing our packets,
+ * we're under the tx lock here anyway so nothing will
+ * happen as a result of this
+ */
netif_start_subqueue(local->mdev, i);
- store = &local->pending_packet[i];
- tx.extra_frag = store->extra_frag;
- tx.num_extra_frag = store->num_extra_frag;
- tx.flags = 0;
- ret = __ieee80211_tx(local, store->skb, &tx);
- if (ret) {
- if (ret == IEEE80211_TX_FRAG_AGAIN)
- store->skb = NULL;
- } else {
- clear_bit(i, local->queues_pending);
- ieee80211_wake_queue(&local->hw, i);
+ while (!skb_queue_empty(&local->pending[i])) {
+ struct sk_buff *skb = skb_dequeue(&local->pending[i]);
+
+ if (!ieee80211_tx_pending_skb(local, skb)) {
+ skb_queue_head(&local->pending[i], skb);
+ break;
+ }
}
+
+ /* Start regular packet processing again. */
+ if (skb_queue_empty(&local->pending[i]))
+ ieee80211_wake_queue_by_reason(&local->hw, i,
+ IEEE80211_QUEUE_STOP_REASON_PENDING);
}
+
netif_tx_unlock_bh(dev);
+ rcu_read_unlock();
}
/* functions for drivers to get certain frames */
@@ -1867,7 +2033,6 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct ieee80211_sub_if_data *sdata = NULL;
struct ieee80211_if_ap *ap = NULL;
- struct ieee80211_if_sta *ifsta = NULL;
struct beacon_data *beacon;
struct ieee80211_supported_band *sband;
enum ieee80211_band band = local->hw.conf.channel->band;
@@ -1919,13 +2084,13 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
} else
goto out;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_hdr *hdr;
- ifsta = &sdata->u.sta;
- if (!ifsta->probe_resp)
+ if (!ifibss->probe_resp)
goto out;
- skb = skb_copy(ifsta->probe_resp, GFP_ATOMIC);
+ skb = skb_copy(ifibss->probe_resp, GFP_ATOMIC);
if (!skb)
goto out;