summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mana
diff options
context:
space:
mode:
authorKonstantin Taranov <kotaranov@microsoft.com>2024-03-04 05:52:41 -0800
committerLeon Romanovsky <leon@kernel.org>2024-03-07 11:32:33 +0200
commit2d5c00815778ec4f4e0a84e405e3e157b7815db1 (patch)
tree0533a597a3dcca858401573693f940c13b88e95e /drivers/infiniband/hw/mana
parente02497fb654689049ba8b46f098f17d5f19e0b3c (diff)
RDMA/mana_ib: Use virtual address in dma regions for MRs
Introduce mana_ib_create_dma_region() to create dma regions with iova for MRs. It allows creating MRs with any page offset. Previously, only page-aligned addresses worked. For dma regions that must have a zero dma offset (e.g., for queues), mana_ib_create_zero_offset_dma_region() is added. To get the zero offset, ib_umem_find_best_pgoff() is used with zero pgoff_bitmask. Fixes: 0266a177631d ("RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter") Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> Link: https://lore.kernel.org/r/1709560361-26393-3-git-send-email-kotaranov@linux.microsoft.com Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev> Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/mana')
-rw-r--r--drivers/infiniband/hw/mana/cq.c4
-rw-r--r--drivers/infiniband/hw/mana/main.c40
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h7
-rw-r--r--drivers/infiniband/hw/mana/mr.c4
-rw-r--r--drivers/infiniband/hw/mana/qp.c6
-rw-r--r--drivers/infiniband/hw/mana/wq.c4
6 files changed, 45 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 83d20c3f048d..4a71e678d09c 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -48,7 +48,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}
- err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
if (err) {
ibdev_dbg(ibdev,
"Failed to create dma region for create cq, %d\n",
@@ -57,7 +57,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
ibdev_dbg(ibdev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, cq->gdma_region);
/*
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index dd570832d61e..71e33feee61b 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -301,8 +301,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
return 0;
}
-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
- mana_handle_t *gdma_region)
+static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, unsigned long page_sz)
{
struct gdma_dma_region_add_pages_req *add_req = NULL;
size_t num_pages_processed = 0, num_pages_to_handle;
@@ -314,7 +314,6 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
size_t max_pgs_create_cmd;
struct gdma_context *gc;
size_t num_pages_total;
- unsigned long page_sz;
unsigned int tail = 0;
u64 *page_addr_list;
void *request_buf;
@@ -323,12 +322,6 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
gc = mdev_to_gc(dev);
hwc = gc->hwc.driver_data;
- /* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
- if (!page_sz) {
- ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
- return -ENOMEM;
- }
num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
max_pgs_create_cmd =
@@ -414,6 +407,35 @@ out:
return err;
}
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt)
+{
+ unsigned long page_sz;
+
+ page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region)
+{
+ unsigned long page_sz;
+
+ /* Hardware requires dma region to align to chosen page size */
+ page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
{
struct gdma_context *gc = mdev_to_gc(dev);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6a03ae645c01..f83390eebb7d 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -160,8 +160,11 @@ static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32
int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
- mana_handle_t *gdma_region);
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region);
+
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt);
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index ee4d4f83467c..b70b13484f09 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -127,7 +127,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_free;
}
- err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
+ err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
if (err) {
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
err);
@@ -135,7 +135,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
}
ibdev_dbg(ibdev,
- "mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
+ "create_dma_region ret %d gdma_region %llx\n", err,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 5d4c05dcd003..6e7627745c95 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -357,8 +357,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
qp->sq_umem = umem;
- err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
- &qp->sq_gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
+ &qp->sq_gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create qp-raw, %d\n",
@@ -367,7 +367,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
ibdev_dbg(&mdev->ib_dev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, qp->sq_gdma_region);
/* Create a WQ on the same port handle used by the Ethernet */
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 372d361510e0..7c9c69962573 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -46,7 +46,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
- err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create wq, %d\n",
@@ -55,7 +55,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
}
ibdev_dbg(&mdev->ib_dev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, wq->gdma_region);
/* WQ ID is returned at wq_create time, doesn't know the value yet */