summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-20 10:23:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-20 10:23:39 -0700
commitdaa121128a2d2ac6006159e2c47676e4fcd21eab (patch)
tree92f5ebb4ebc9be3535c5c3905ba40ab68cbdf964 /include/net
parent6e51b4b5bbc07e52b226017936874715629932d1 (diff)
parenta6016aac5252da9d22a4dc0b98121b0acdf6d2f5 (diff)
Merge tag 'dma-mapping-6.10-2024-05-20' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - optimize DMA sync calls when they are no-ops (Alexander Lobakin) - fix swiotlb padding for untrusted devices (Michael Kelley) - add documentation for swiotb (Michael Kelley) * tag 'dma-mapping-6.10-2024-05-20' of git://git.infradead.org/users/hch/dma-mapping: dma: fix DMA sync for drivers not calling dma_set_mask*() xsk: use generic DMA sync shortcut instead of a custom one page_pool: check for DMA sync shortcut earlier page_pool: don't use driver-set flags field directly page_pool: make sure frag API fields don't span between cachelines iommu/dma: avoid expensive indirect calls for sync operations dma: avoid redundant calls for sync operations dma: compile-out DMA sync op calls when not used iommu/dma: fix zeroing of bounce buffer padding used by untrusted devices swiotlb: remove alloc_size argument to swiotlb_tbl_map_single() Documentation/core-api: add swiotlb documentation
Diffstat (limited to 'include/net')
-rw-r--r--include/net/page_pool/types.h25
-rw-r--r--include/net/xdp_sock_drv.h7
-rw-r--r--include/net/xsk_buff_pool.h14
3 files changed, 27 insertions, 19 deletions
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index a6ebed002216..b088d131aeb0 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -45,7 +45,6 @@ struct pp_alloc_cache {
/**
* struct page_pool_params - page pool parameters
- * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
* @order: 2^order pages on allocation
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
@@ -55,10 +54,11 @@ struct pp_alloc_cache {
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
+ * @netdev: corresponding &net_device for Netlink introspection
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
*/
struct page_pool_params {
struct_group_tagged(page_pool_params_fast, fast,
- unsigned int flags;
unsigned int order;
unsigned int pool_size;
int nid;
@@ -70,6 +70,7 @@ struct page_pool_params {
);
struct_group_tagged(page_pool_params_slow, slow,
struct net_device *netdev;
+ unsigned int flags;
/* private: used by test code only */
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
@@ -130,12 +131,28 @@ struct page_pool {
struct page_pool_params_fast p;
int cpuid;
- bool has_init_callback;
+ u32 pages_state_hold_cnt;
+
+ bool has_init_callback:1; /* slow::init_callback is set */
+ bool dma_map:1; /* Perform DMA mapping */
+ bool dma_sync:1; /* Perform DMA sync */
+#ifdef CONFIG_PAGE_POOL_STATS
+ bool system:1; /* This is a global percpu pool */
+#endif
+ /* The following block must stay within one cacheline. On 32-bit
+ * systems, sizeof(long) == sizeof(int), so that the block size is
+ * ``3 * sizeof(long)``. On 64-bit systems, the actual size is
+ * ``2 * sizeof(long) + sizeof(int)``. The closest pow-2 to both of
+ * them is ``4 * sizeof(long)``, so just use that one for simplicity.
+ * Having it aligned to a cacheline boundary may be excessive and
+ * doesn't bring any good.
+ */
+ __cacheline_group_begin(frag) __aligned(4 * sizeof(long));
long frag_users;
struct page *frag_page;
unsigned int frag_offset;
- u32 pages_state_hold_cnt;
+ __cacheline_group_end(frag);
struct delayed_work release_dw;
void (*disconnect)(void *pool);
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index c9aec9ab6191..0a5dca2b2b3f 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -219,13 +219,10 @@ static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool
return meta;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
- if (!pool->dma_need_sync)
- return;
-
xp_dma_sync_for_cpu(xskb);
}
@@ -402,7 +399,7 @@ static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool
return NULL;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
}
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 99dd7376df6a..bacb33f1e3e5 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -43,7 +43,6 @@ struct xsk_dma_map {
refcount_t users;
struct list_head list; /* Protected by the RTNL_LOCK */
u32 dma_pages_cnt;
- bool dma_need_sync;
};
struct xsk_buff_pool {
@@ -82,7 +81,6 @@ struct xsk_buff_pool {
u8 tx_metadata_len; /* inherited from umem */
u8 cached_need_wakeup;
bool uses_need_wakeup;
- bool dma_need_sync;
bool unaligned;
bool tx_sw_csum;
void *addrs;
@@ -155,21 +153,17 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
return xskb->frame_dma;
}
-void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{
- xp_dma_sync_for_cpu_slow(xskb);
+ dma_sync_single_for_cpu(xskb->pool->dev, xskb->dma,
+ xskb->pool->frame_len,
+ DMA_BIDIRECTIONAL);
}
-void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
- size_t size);
static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma, size_t size)
{
- if (!pool->dma_need_sync)
- return;
-
- xp_dma_sync_for_device_slow(pool, dma, size);
+ dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL);
}
/* Masks for xdp_umem_page flags.