summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorAlexander Lobakin <aleksander.lobakin@intel.com>2024-06-20 15:53:45 +0200
committerTony Nguyen <anthony.l.nguyen@intel.com>2024-07-10 10:46:32 -0700
commit5aaac1aece4e4db9d620791a33f7a4173c660e65 (patch)
tree414715face75d3ad9a123940103d2f3425a602d6 /drivers/net/ethernet
parent4309363f19598999b25a1e55fccf688daa4cc220 (diff)
libeth: support different types of buffers for Rx
Unlike previous generations, idpf requires more buffer types for optimal performance. This includes: header buffers, short buffers, and no-overhead buffers (w/o headroom and tailroom, for TCP zerocopy when the header split is enabled). Introduce libeth Rx buffer type and calculate page_pool params accordingly. All the HW-related details like buffer alignment are still accounted. For the header buffers, pick 256 bytes as in most places in the kernel (have you ever seen frames with bigger headers?). Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c132
1 files changed, 121 insertions, 11 deletions
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index 6221b88c34ac..d0b158b6e55b 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -6,7 +6,7 @@
/* Rx buffer management */
/**
- * libeth_rx_hw_len - get the actual buffer size to be passed to HW
+ * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW
* @pp: &page_pool_params of the netdev to calculate the size for
* @max_len: maximum buffer size for a single descriptor
*
@@ -14,7 +14,7 @@
* MTU the @dev has, HW required alignment, minimum and maximum allowed values,
* and system's page size.
*/
-static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
+static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len)
{
u32 len;
@@ -27,6 +27,118 @@ static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
}
/**
+ * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ * @truesize: desired truesize for the buffers
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW ignoring the
+ * MTU and closest to the passed truesize. Can be used for "short" buffer
+ * queues to fragment pages more efficiently.
+ */
+static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
+ u32 max_len, u32 truesize)
+{
+ u32 min, len;
+
+ min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE);
+ truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min),
+ PAGE_SIZE << LIBETH_RX_PAGE_ORDER);
+
+ len = SKB_WITH_OVERHEAD(truesize - pp->offset);
+ len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE;
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_page_pool_params - calculate params with the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to will all needed stack overhead (headroom, tailroom) and
+ * both the HW buffer length and the truesize for all types of buffers. For
+ * "short" buffers, truesize never exceeds the "wanted" one; for the rest,
+ * it can be up to the page size.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ pp->offset = LIBETH_SKB_HEADROOM;
+ /* HW-writeable / syncable length per one page */
+ pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
+
+ /* HW-writeable length per buffer */
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len);
+ break;
+ case LIBETH_FQE_SHORT:
+ fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len,
+ fq->truesize);
+ break;
+ case LIBETH_FQE_HDR:
+ fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE);
+ break;
+ default:
+ return false;
+ }
+
+ /* Buffer size to allocate */
+ fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset +
+ fq->buf_len));
+
+ return true;
+}
+
+/**
+ * libeth_rx_page_pool_params_zc - calculate params without the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to exclude the stack overhead and both the buffer length
+ * and the truesize, which are equal for the data buffers. Note that this
+ * requires separate header buffers to be always active and account the
+ * overhead.
+ * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy
+ * mode.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ u32 mtu, max;
+
+ pp->offset = 0;
+ pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER;
+
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ mtu = READ_ONCE(pp->netdev->mtu);
+ break;
+ case LIBETH_FQE_SHORT:
+ mtu = fq->truesize;
+ break;
+ default:
+ return false;
+ }
+
+ mtu = roundup_pow_of_two(mtu);
+ max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX),
+ pp->max_len);
+
+ fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
+ fq->truesize = fq->buf_len;
+
+ return true;
+}
+
+/**
* libeth_rx_fq_create - create a PP with the default libeth settings
* @fq: buffer queue struct to fill
* @napi: &napi_struct covering this PP (no usage outside its poll loops)
@@ -44,19 +156,17 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
.netdev = napi->dev,
.napi = napi,
.dma_dir = DMA_FROM_DEVICE,
- .offset = LIBETH_SKB_HEADROOM,
};
struct libeth_fqe *fqes;
struct page_pool *pool;
+ bool ret;
- /* HW-writeable / syncable length per one page */
- pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset);
-
- /* HW-writeable length per buffer */
- fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len);
- /* Buffer size to allocate */
- fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset +
- fq->buf_len));
+ if (!fq->hsplit)
+ ret = libeth_rx_page_pool_params(fq, &pp);
+ else
+ ret = libeth_rx_page_pool_params_zc(fq, &pp);
+ if (!ret)
+ return -EINVAL;
pool = page_pool_create(&pp);
if (IS_ERR(pool))