diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2019-12-19 16:20:49 +0100 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-12-19 16:20:53 +0100 |
commit | ca8d0fa7cfb8abc4f623d49df210e0b81b32f626 (patch) | |
tree | bc5d84b8543985f1b7c03f1ff1a2c6edc0306cdd /net | |
parent | e47304232b373362228bf233f17bd12b11c9aafc (diff) | |
parent | c0fdccfd226a1424683d3000d9e08384391210a2 (diff) |
Merge branch 'bpf-fix-xsk-wakeup'
Maxim Mikityanskiy says:
====================
This series addresses the issue described in the commit message of the
first patch: lack of synchronization between XSK wakeup and destroying
the resources used by XSK wakeup. The idea is similar to napi_synchronize.
The series contains fixes for the drivers that implement XSK.
v2 incorporates changes suggested by Björn:
1. Call synchronize_rcu in Intel drivers only if the XDP program is
being unloaded.
2. Don't forget rcu_read_lock when wakeup is called from xsk_poll.
3. Use xs->zc as the condition to call ndo_xsk_wakeup.
====================
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/xdp/xsk.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 956793893c9d..328f661b83b2 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -334,12 +334,21 @@ out: } EXPORT_SYMBOL(xsk_umem_consume_tx); -static int xsk_zc_xmit(struct xdp_sock *xs) +static int xsk_wakeup(struct xdp_sock *xs, u8 flags) { struct net_device *dev = xs->dev; + int err; + + rcu_read_lock(); + err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); + rcu_read_unlock(); + + return err; +} - return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, - XDP_WAKEUP_TX); +static int xsk_zc_xmit(struct xdp_sock *xs) +{ + return xsk_wakeup(xs, XDP_WAKEUP_TX); } static void xsk_destruct_skb(struct sk_buff *skb) @@ -453,19 +462,16 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock, __poll_t mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); - struct net_device *dev; struct xdp_umem *umem; if (unlikely(!xsk_is_bound(xs))) return mask; - dev = xs->dev; umem = xs->umem; if (umem->need_wakeup) { - if (dev->netdev_ops->ndo_xsk_wakeup) - dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, - umem->need_wakeup); + if (xs->zc) + xsk_wakeup(xs, umem->need_wakeup); else /* Poll needs to drive Tx also in copy mode */ __xsk_sendmsg(sk); |