diff options
author | Maciej Fijalkowski <maciej.fijalkowski@intel.com> | 2023-01-31 21:45:04 +0100 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2023-02-01 23:30:27 +0100 |
commit | 3246a10752a7120878740f345f3956997777b3d6 (patch) | |
tree | 9b20eca07c8ac02c9d0dd94a3c9711fd2ff96f3a /drivers/net/ethernet/intel/ice/ice_txrx.c | |
parent | 2fba7dc5157b6f85dbf1b8e26e63a724db1f3d79 (diff) |
ice: Add support for XDP multi-buffer on Tx side
Similarly as for Rx side in previous patch, logic on XDP Tx in ice
driver needs to be adjusted for multi-buffer support. Specifically, the
way how HW Tx descriptors are produced and cleaned.
Currently, XDP_TX works on strict ring boundaries, meaning it sets RS
bit (on producer side) / looks up DD bit (on consumer/cleaning side)
every quarter of the ring. It means that if for example multi buffer
frame would span across the ring quarter boundary (say that frame
consists of 4 frames and we start from 62 descriptor where ring is sized
to 256 entries), RS bit would be produced in the middle of multi buffer
frame, which would be a broken behavior as it needs to be set on the
last descriptor of the frame.
To make it work, set RS bit at the last descriptor from the batch of
frames that XDP_TX action was used on and make the first entry remember
the index of last descriptor with RS bit set. This way, cleaning side
can take the index of descriptor with RS bit, look up DD bit's presence
and clean from first entry to last.
In order to clean up the code base introduce the common ice_set_rs_bit()
which will return index of descriptor that got RS bit produced on so
that standard driver can store this within proper ice_tx_buf and ZC
driver can simply ignore return value.
Co-developed-by: Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com>
Signed-off-by: Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com>
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-12-maciej.fijalkowski@intel.com
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 05028921fadd..fd731229c4de 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -578,7 +578,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, case XDP_TX: if (static_branch_unlikely(&ice_xdp_locking_key)) spin_lock(&xdp_ring->tx_lock); - ret = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); + ret = __ice_xmit_xdp_ring(xdp, xdp_ring); if (static_branch_unlikely(&ice_xdp_locking_key)) spin_unlock(&xdp_ring->tx_lock); if (ret == ICE_XDP_CONSUMED) @@ -625,6 +625,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, unsigned int queue_index = smp_processor_id(); struct ice_vsi *vsi = np->vsi; struct ice_tx_ring *xdp_ring; + struct ice_tx_buf *tx_buf; int nxmit = 0, i; if (test_bit(ICE_VSI_DOWN, vsi->state)) @@ -647,16 +648,18 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, xdp_ring = vsi->xdp_rings[queue_index]; } + tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; - err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); + err = ice_xmit_xdp_ring(xdpf, xdp_ring); if (err != ICE_XDP_TX) break; nxmit++; } + tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); if (unlikely(flags & XDP_XMIT_FLUSH)) ice_xdp_ring_update_tail(xdp_ring); @@ -1136,6 +1139,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) u32 cnt = rx_ring->count; u32 cached_ntc = ntc; u32 xdp_xmit = 0; + u32 cached_ntu; bool failure; u32 first; @@ -1145,8 +1149,10 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) #endif xdp_prog = READ_ONCE(rx_ring->xdp_prog); - if (xdp_prog) + if (xdp_prog) { xdp_ring = rx_ring->xdp_ring; + cached_ntu = xdp_ring->next_to_use; + } /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { @@ -1295,7 +1301,7 @@ construct_skb: failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); if (xdp_xmit) - ice_finalize_xdp_rx(xdp_ring, xdp_xmit); + ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu); if (rx_ring->ring_stats) ice_update_rx_ring_stats(rx_ring, total_rx_pkts, |