diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2020-05-14 12:49:17 +0200 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2020-05-14 21:21:54 -0700 |
commit | 494f44d54e25dd79af0ed6734c2d6be0aa0b6d94 (patch) | |
tree | 1af0fc96fdf441274a429dd15c4576444a71a06b /drivers | |
parent | 983e43451830742fa93f83656ccbdcb865ea4259 (diff) |
mvneta: Add XDP frame size to driver
This marvell driver mvneta uses PAGE_SIZE frames, which makes it
really easy to convert. Driver updates rxq and now frame_sz
once per NAPI call.
This driver takes advantage of page_pool PP_FLAG_DMA_SYNC_DEV that
can help reduce the number of cache-lines that need to be flushed
when doing DMA sync for_device. Due to xdp_adjust_tail can grow the
area accessible to the by the CPU (can possibly write into), then max
sync length *after* bpf_prog_run_xdp() needs to be taken into account.
For XDP_TX action the driver is smart and does DMA-sync. When growing
tail this is still safe, because page_pool have DMA-mapped the entire
page size.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
Cc: thomas.petazzoni@bootlin.com
Link: https://lore.kernel.org/bpf/158945335786.97035.12714388304493736747.stgit@firesoul
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/marvell/mvneta.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e0e9e56830c0..41d2a0eac5fa 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2148,12 +2148,17 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp, struct mvneta_stats *stats) { - unsigned int len; + unsigned int len, sync; + struct page *page; u32 ret, act; len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; act = bpf_prog_run_xdp(prog, xdp); + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; + sync = max(sync, len); + switch (act) { case XDP_PASS: stats->xdp_pass++; @@ -2164,9 +2169,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, err = xdp_do_redirect(pp->dev, xdp, prog); if (unlikely(err)) { ret = MVNETA_XDP_DROPPED; - page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), len, - true); + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); } else { ret = MVNETA_XDP_REDIR; stats->xdp_redirect++; @@ -2175,10 +2179,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, } case XDP_TX: ret = mvneta_xdp_xmit_back(pp, xdp); - if (ret != MVNETA_XDP_TX) - page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), len, - true); + if (ret != MVNETA_XDP_TX) { + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); + } break; default: bpf_warn_invalid_xdp_action(act); @@ -2187,8 +2191,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, trace_xdp_exception(pp->dev, prog, act); /* fall through */ case XDP_DROP: - page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), len, true); + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); ret = MVNETA_XDP_DROPPED; stats->xdp_drop++; break; @@ -2320,6 +2324,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, rcu_read_lock(); xdp_prog = READ_ONCE(pp->xdp_prog); xdp_buf.rxq = &rxq->xdp_rxq; + xdp_buf.frame_sz = PAGE_SIZE; /* Fairness NAPI loop */ while (rx_proc < budget && rx_proc < rx_todo) { |