diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-28 11:25:37 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-28 11:25:37 -0300 |
commit | a0d8994b305b76fb38448b5b8961fafbe37b7abe (patch) | |
tree | 928f1ffd287b78cc6663543474bd5fbfe9c13b86 /drivers/infiniband | |
parent | fd1a52f38c23c5d99fc2e6178ac4b74c5cbcf793 (diff) | |
parent | 75e46fc02c975f401e70a53ecd55d475081d13a3 (diff) |
Merge branch 'mlx5-odp-dc' into rdma.git for-next
Michael Guralnik says:
====================
The series adds support for on-demand paging for DC transport.
As DC is a mlx-only transport, the capabilities are exposed to the user
using DEVX objects and later on through mlx5dv_query_device.
====================
Based on the mlx5-next branch from
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux for
dependencies
* branch 'mlx5-odp-dc':
IB/mlx5: Add page fault handler for DC initiator WQE
IB/mlx5: Remove check of FW capabilities in ODP page fault handling
net/mlx5: Set ODP capabilities for DC transport to max
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 51 |
1 files changed, 3 insertions, 48 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 817c924e7289..905936423a03 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -980,17 +980,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, return ret < 0 ? ret : npages; } -static const u32 mlx5_ib_odp_opcode_cap[] = { - [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND, - [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND, - [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND, - [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE, - [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE, - [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ, - [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC, - [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC, -}; - /* * Parse initiator WQE. Advances the wqe pointer to point at the * scatter-gather list, and set wqe_end to the end of the WQE. @@ -1001,7 +990,6 @@ static int mlx5_ib_mr_initiator_pfault_handler( { struct mlx5_wqe_ctrl_seg *ctrl = *wqe; u16 wqe_index = pfault->wqe.wqe_index; - u32 transport_caps; struct mlx5_base_av *av; unsigned ds, opcode; u32 qpn = qp->trans_qp.base.mqp.qpn; @@ -1025,31 +1013,11 @@ static int mlx5_ib_mr_initiator_pfault_handler( opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & MLX5_WQE_CTRL_OPCODE_MASK; - switch (qp->ibqp.qp_type) { - case IB_QPT_XRC_INI: + if (qp->ibqp.qp_type == IB_QPT_XRC_INI) *wqe += sizeof(struct mlx5_wqe_xrc_seg); - transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps; - break; - case IB_QPT_RC: - transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps; - break; - case IB_QPT_UD: - transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps; - break; - default: - mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n", - qp->ibqp.qp_type); - return -EFAULT; - } - if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) || - !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) { - mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n", - opcode); - return -EFAULT; - } - - if (qp->ibqp.qp_type == IB_QPT_UD) { + if (qp->ibqp.qp_type == IB_QPT_UD || + qp->qp_sub_type == MLX5_IB_QPT_DCI) { av = *wqe; if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) *wqe += sizeof(struct mlx5_av); @@ -1112,19 +1080,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev, return -EFAULT; } - switch (qp->ibqp.qp_type) { - case IB_QPT_RC: - if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & - IB_ODP_SUPPORT_RECV)) - goto invalid_transport_or_opcode; - break; - default: -invalid_transport_or_opcode: - mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n", - qp->ibqp.qp_type); - return -EFAULT; - } - *wqe_end = wqe + wqe_size; return 0; |