diff options
author | Praveen Kaligineedi <pkaligineedi@google.com> | 2023-03-15 16:33:11 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-03-17 08:29:21 +0000 |
commit | 39a7f4aa3e4a7947614cf1d5c27abba3300adb1e (patch) | |
tree | 09044310dc36097407d318cd1c6275e919d3ba40 /drivers/net/ethernet/google/gve/gve.h | |
parent | 75eaae158b1b7d8d5bde2bafc0bcf778423071d3 (diff) |
gve: Add XDP REDIRECT support for GQI-QPL format
This patch contains the following changes:
1) Support for XDP REDIRECT action on rx
2) ndo_xdp_xmit callback support
In GQI-QPL queue format, the driver needs to allocate a fixed size
memory, the size specified by vNIC device, for RX/TX and register this
memory as a bounce buffer with the vNIC device when a queue is created.
The number of pages in the bounce buffer is limited and the pages need to
be made available to the vNIC by copying the RX data out to prevent
head-of-line blocking. The XDP_REDIRECT packets are therefore immediately
copied to a newly allocated page.
Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: Jeroen de Borst <jeroendb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve.h')
-rw-r--r-- | drivers/net/ethernet/google/gve/gve.h | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 8d5234d4ba67..a3b2aec2c575 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -236,6 +236,7 @@ struct gve_rx_ring { u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ u64 xdp_tx_errors; u64 xdp_redirect_errors; + u64 xdp_alloc_fails; u64 xdp_actions[GVE_XDP_ACTIONS]; u32 q_num; /* queue index */ u32 ntfy_id; /* notification block index */ @@ -247,6 +248,7 @@ struct gve_rx_ring { /* XDP stuff */ struct xdp_rxq_info xdp_rxq; + struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */ }; /* A TX desc ring entry */ @@ -267,7 +269,10 @@ struct gve_tx_iovec { * ring entry but only used for a pkt_desc not a seg_desc */ struct gve_tx_buffer_state { - struct sk_buff *skb; /* skb for this pkt */ + union { + struct sk_buff *skb; /* skb for this pkt */ + struct xdp_frame *xdp_frame; /* xdp_frame */ + }; struct { u16 size; /* size of xmitted xdp pkt */ } xdp; @@ -385,6 +390,8 @@ struct gve_tx_ring { struct { /* Spinlock for when cleanup in progress */ spinlock_t clean_lock; + /* Spinlock for XDP tx traffic */ + spinlock_t xdp_lock; }; /* DQO fields. */ @@ -462,6 +469,8 @@ struct gve_tx_ring { dma_addr_t q_resources_bus; /* dma address of the queue resources */ dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ struct u64_stats_sync statss; /* sync stats for 32bit archs */ + u64 xdp_xmit; + u64 xdp_xmit_errors; } ____cacheline_aligned; /* Wraps the info for one irq including the napi struct and the queues @@ -919,8 +928,10 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, enum dma_data_direction); /* tx handling */ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); +int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, - void *data, int len); + void *data, int len, void *frame_p); void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); bool gve_tx_poll(struct gve_notify_block *block, int budget); bool gve_xdp_poll(struct gve_notify_block *block, int budget); |