summaryrefslogtreecommitdiff
path: root/include/net/xdp_sock.h
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2018-06-04 14:05:57 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2018-06-05 15:48:34 +0200
commitac98d8aab61baf785eb8f099b36daf34fc76a70e (patch)
treec0fa347892f50786cd516e5eb4396abf69bebb0d /include/net/xdp_sock.h
parente3760c7e50ac6cdf1188fec44938dd7e6e6eef61 (diff)
xsk: wire upp Tx zero-copy functions
Here we add the functionality required to support zero-copy Tx, and also exposes various zero-copy related functions for the netdevs. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'include/net/xdp_sock.h')
-rw-r--r--include/net/xdp_sock.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index d93d3aac3fc9..9fe472f2ac95 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/if_xdp.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/mm.h>
#include <net/sock.h>
@@ -42,6 +43,8 @@ struct xdp_umem {
struct net_device *dev;
u16 queue_id;
bool zc;
+ spinlock_t xsk_list_lock;
+ struct list_head xsk_list;
};
struct xdp_sock {
@@ -53,6 +56,8 @@ struct xdp_sock {
struct list_head flush_node;
u16 queue_id;
struct xsk_queue *tx ____cacheline_aligned_in_smp;
+ struct list_head list;
+ bool zc;
/* Protects multiple processes in the control path */
struct mutex mutex;
u64 rx_dropped;
@@ -64,8 +69,12 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
void xsk_flush(struct xdp_sock *xs);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
+/* Used from netdev driver */
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
void xsk_umem_discard_addr(struct xdp_umem *umem);
+void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
+bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
+void xsk_umem_consume_tx_done(struct xdp_umem *umem);
#else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{