summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40e/i40e_txrx.c
diff options
context:
space:
mode:
authorTirthendu Sarkar <tirthendu.sarkar@intel.com>2023-03-10 00:26:14 +0530
committerTony Nguyen <anthony.l.nguyen@intel.com>2023-03-09 13:11:24 -0800
commit01aa49e31e1674e22dd9c868ca6b4b945acd621e (patch)
tree8152577bd717cb9d79e2ed47c7f1cbd30ce37272 /drivers/net/ethernet/intel/i40e/i40e_txrx.c
parente9031f2da1aef34b0b4c659ead613c335b46ae92 (diff)
i40e: add xdp_buff to i40e_ring struct
Store xdp_buff on Rx ring struct in preparation for XDP multi-buffer support. This will allow us to combine fragmented frames across separate NAPI cycles in the same way as currently skb fragments are handled. This means that skb pointer on Rx ring will become redundant and will be removed in a later patch. As a consequence i40e_trace() now uses xdp instead of skb pointer. Truesize only needs to be calculated for page sizes bigger than 4k as it is always half-page for 4k pages. With xdp_buff on ring, frame size can now be set during xdp_init_buff() and need not be repopulated in each NAPI call for 4k pages. As a consequence i40e_rx_frame_truesize() is now used only for bigger pages. Signed-off-by: Tirthendu Sarkar <tirthendu.sarkar@intel.com> Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel) Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c33
1 files changed, 13 insertions, 20 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 7fa35ff52689..5544c2d43a92 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1619,21 +1619,19 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
writel(val, rx_ring->tail);
}
+#if (PAGE_SIZE >= 8192)
static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
unsigned int size)
{
unsigned int truesize;
-#if (PAGE_SIZE < 8192)
- truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
-#else
truesize = rx_ring->rx_offset ?
SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
-#endif
return truesize;
}
+#endif
/**
* i40e_alloc_mapped_page - recycle or make a new page
@@ -2405,21 +2403,16 @@ static void i40e_inc_ntp(struct i40e_ring *rx_ring)
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
unsigned int *rx_cleaned)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int offset = rx_ring->rx_offset;
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct sk_buff *skb = rx_ring->skb;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
bool failure = false;
- struct xdp_buff xdp;
int xdp_res = 0;
-#if (PAGE_SIZE < 8192)
- frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
-#endif
- xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
-
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
while (likely(total_rx_packets < (unsigned int)budget)) {
@@ -2467,7 +2460,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
if (!size)
break;
- i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, xdp);
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */
@@ -2476,19 +2469,19 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
- xdp_prepare_buff(&xdp, hard_start, offset, size, true);
- xdp_buff_clear_frags_flag(&xdp);
+ xdp_prepare_buff(xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
+ xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog);
+ xdp_res = i40e_run_xdp(rx_ring, xdp, xdp_prog);
}
if (xdp_res) {
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
xdp_xmit |= xdp_res;
- i40e_rx_buffer_flip(rx_buffer, xdp.frame_sz);
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
} else {
rx_buffer->pagecnt_bias++;
}
@@ -2497,9 +2490,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
} else if (skb) {
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
} else if (ring_uses_build_skb(rx_ring)) {
- skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
+ skb = i40e_build_skb(rx_ring, rx_buffer, xdp);
} else {
- skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
+ skb = i40e_construct_skb(rx_ring, rx_buffer, xdp);
}
/* exit if we failed to retrieve a buffer */
@@ -2528,7 +2521,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
/* populate checksum, VLAN, and protocol */
i40e_process_skb_fields(rx_ring, rx_desc, skb);
- i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, xdp);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
skb = NULL;