diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 119 |
1 files changed, 95 insertions, 24 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2206d2d36f0f..bbf1b1247ac4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * @@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) **/ static u32 i40e_get_tx_pending(struct i40e_ring *ring) { - u32 ntu = ((ring->next_to_clean <= ring->next_to_use) - ? ring->next_to_use - : ring->next_to_use + ring->count); - return ntu - ring->next_to_clean; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } /** @@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) **/ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = i40e_get_tx_pending(tx_ring); struct i40e_pf *pf = tx_ring->vsi->back; bool ret = false; @@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending >= I40E_MIN_DESC_PENDING)) { + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending < I40E_MIN_DESC_PENDING) && - (tx_pending > 0)) { + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { if (I40E_DEBUG_FLOW & pf->hw.debug_mask) dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", tx_pending, tx_ring->queue_index); pf->tx_sluggish_count++; } else { /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; + tx_ring->tx_stats.tx_done_old = tx_done; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } return ret; } -/** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - #define WB_STRIDE 0x3 /** @@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, + const u8 hdr_len) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 1; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + size = hdr_len; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if (j == I40E_MAX_BUFFER_TXD) { + if (size < skb_shinfo(skb)->gso_size) { + linearize = true; + break; + } + j = 1; + size -= skb_shinfo(skb)->gso_size; + if (size) + j++; + size += hdr_len; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + +/** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer @@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; + if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (skb_linearize(skb)) + goto out_drop; + skb_tx_timestamp(skb); /* always enable CRC insertion offload */ |