diff options
author | Parav Pandit <parav@nvidia.com> | 2020-10-30 11:38:03 +0200 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2020-11-02 15:14:56 -0400 |
commit | 683a9c7ed81769b6ecb6229afb00b6cda8e4bcfb (patch) | |
tree | 201d25edef148f678a0f7a77415106860d9cd61c /drivers/infiniband | |
parent | 3cea11cd5e3b00d91caf0b4730194039b45c5891 (diff) |
RDMA: Fix software RDMA drivers for dma mapping error
The commit f959dcd6ddfd ("dma-direct: Fix potential NULL pointer
dereference") made dma_mask as mandetory field to be setup even for
dma_virt_ops based dma devices. The commit in the fixes tag omitted
setting up the dma_mask on virtual devices triggering the below trace when
they were combined during the merge window.
Fix it by setting empty DMA MASK for software based RDMA devices.
WARNING: CPU: 1 PID: 8488 at kernel/dma/mapping.c:149 dma_map_page_attrs+0x493/0x700
CPU: 1 PID: 8488 Comm: syz-executor144 Not tainted 5.9.0-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:dma_map_page_attrs+0x493/0x700 kernel/dma/mapping.c:149
Trace:
dma_map_single_attrs include/linux/dma-mapping.h:279 [inline]
ib_dma_map_single include/rdma/ib_verbs.h:3967 [inline]
ib_mad_post_receive_mads+0x23f/0xd60 drivers/infiniband/core/mad.c:2715
ib_mad_port_start drivers/infiniband/core/mad.c:2862 [inline]
ib_mad_port_open drivers/infiniband/core/mad.c:3016 [inline]
ib_mad_init_device+0x72b/0x1400 drivers/infiniband/core/mad.c:3092
add_client_context+0x405/0x5e0 drivers/infiniband/core/device.c:680
enable_device_and_get+0x1d5/0x3c0 drivers/infiniband/core/device.c:1301
ib_register_device drivers/infiniband/core/device.c:1376 [inline]
ib_register_device+0x7a7/0xa40 drivers/infiniband/core/device.c:1335
rxe_register_device+0x46d/0x570 drivers/infiniband/sw/rxe/rxe_verbs.c:1182
rxe_add+0x12fe/0x16d0 drivers/infiniband/sw/rxe/rxe.c:247
rxe_net_add+0x8c/0xe0 drivers/infiniband/sw/rxe/rxe_net.c:507
rxe_newlink drivers/infiniband/sw/rxe/rxe.c:269 [inline]
rxe_newlink+0xb7/0xe0 drivers/infiniband/sw/rxe/rxe.c:250
nldev_newlink+0x30e/0x540 drivers/infiniband/core/nldev.c:1555
rdma_nl_rcv_msg+0x367/0x690 drivers/infiniband/core/netlink.c:195
rdma_nl_rcv_skb drivers/infiniband/core/netlink.c:239 [inline]
rdma_nl_rcv+0x2f2/0x440 drivers/infiniband/core/netlink.c:259
netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
netlink_unicast+0x533/0x7d0 net/netlink/af_netlink.c:1330
netlink_sendmsg+0x856/0xd90 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:651 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:671
____sys_sendmsg+0x6e8/0x810 net/socket.c:2353
___sys_sendmsg+0xf3/0x170 net/socket.c:2407
__sys_sendmsg+0xe5/0x1b0 net/socket.c:2440
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x443699
Link: https://lore.kernel.org/r/20201030093803.278830-1-parav@nvidia.com
Reported-by: syzbot+34dc2fea3478e659af01@syzkaller.appspotmail.com
Fixes: e0477b34d9d1 ("RDMA: Explicitly pass in the dma_device to ib_register_device")
Signed-off-by: Parav Pandit <parav@nvidia.com>
Tested-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
Tested-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Zhu Yanjun <yanjunz@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/sw/rdmavt/vt.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_main.c | 7 |
3 files changed, 15 insertions, 5 deletions
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 52218684ad4a..670a9623b46e 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -524,6 +524,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) int rvt_register_device(struct rvt_dev_info *rdi) { int ret = 0, i; + u64 dma_mask; if (!rdi) return -EINVAL; @@ -580,8 +581,10 @@ int rvt_register_device(struct rvt_dev_info *rdi) /* DMA Operations */ rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms; - dma_set_coherent_mask(&rdi->ibdev.dev, - rdi->ibdev.dev.parent->coherent_dma_mask); + dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); + ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask); + if (ret) + goto bail_wss; /* Protection Domain */ spin_lock_init(&rdi->n_pds_lock); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 1fc022362fbe..f9c832e82552 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1118,6 +1118,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) int err; struct ib_device *dev = &rxe->ib_dev; struct crypto_shash *tfm; + u64 dma_mask; strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); @@ -1130,7 +1131,10 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) rxe->ndev->dev_addr); dev->dev.dma_parms = &rxe->dma_parms; dma_set_max_seg_size(&dev->dev, UINT_MAX); - dma_set_coherent_mask(&dev->dev, dma_get_required_mask(&dev->dev)); + dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); + err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask); + if (err) + return err; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index ca8bc7296867..181e06c1c43d 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -306,6 +306,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev) struct siw_device *sdev = NULL; struct ib_device *base_dev; struct device *parent = netdev->dev.parent; + u64 dma_mask; int rv; if (!parent) { @@ -384,8 +385,10 @@ static struct siw_device *siw_device_create(struct net_device *netdev) base_dev->dev.parent = parent; base_dev->dev.dma_parms = &sdev->dma_parms; dma_set_max_seg_size(&base_dev->dev, UINT_MAX); - dma_set_coherent_mask(&base_dev->dev, - dma_get_required_mask(&base_dev->dev)); + dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); + if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask)) + goto error; + base_dev->num_comp_vectors = num_possible_cpus(); xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); |