From 081ea5195a11c9f1eaa8393be603b75982f91b7d Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:47 +0200 Subject: RDMA/cma: Use a helper function to enqueue resolve work items To avoid errors, with attaching ownership of work item and its cm_id refcount which is decremented in work handler, tie them up in single helper function. Also avoid code duplication. Link: https://lore.kernel.org/r/20200126142652.104803-3-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 72f032160c4b..8f16ebb413c2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2687,14 +2687,18 @@ static void cma_init_resolve_route_work(struct cma_work *work, work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; } -static void cma_init_resolve_addr_work(struct cma_work *work, - struct rdma_id_private *id_priv) +static void enqueue_resolve_addr_work(struct cma_work *work, + struct rdma_id_private *id_priv) { + atomic_inc(&id_priv->refcount); + work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ADDR_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; + + queue_work(cma_wq, &work->work); } static int cma_resolve_ib_route(struct rdma_id_private *id_priv, @@ -3148,9 +3152,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); - atomic_inc(&id_priv->refcount); - cma_init_resolve_addr_work(work, id_priv); - queue_work(cma_wq, &work->work); + enqueue_resolve_addr_work(work, id_priv); return 0; err: kfree(work); @@ -3175,9 +3177,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); - atomic_inc(&id_priv->refcount); - cma_init_resolve_addr_work(work, id_priv); - queue_work(cma_wq, &work->work); + enqueue_resolve_addr_work(work, id_priv); return 0; err: kfree(work); -- cgit v1.2.3-58-ga151 From cc055dd3a71352759a6c7ecaee612eeaef93ef22 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:48 +0200 Subject: RDMA/cma: Use RDMA device port iterator Use RDMA device port iterator to avoid open coding. Link: https://lore.kernel.org/r/20200126142652.104803-4-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 8f16ebb413c2..34c62eae08d8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -728,8 +728,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; + unsigned int port; union ib_gid gid; - u8 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) @@ -753,7 +753,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, } list_for_each_entry(cma_dev, &dev_list, list) { - for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { + rdma_for_each_port (cma_dev->device, port) { + if (listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; @@ -786,8 +787,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) struct cma_device *cma_dev, *cur_dev; struct sockaddr_ib *addr; union ib_gid gid, sgid, *dgid; + unsigned int p; u16 pkey, index; - u8 p; enum ib_port_state port_state; int i; @@ -798,7 +799,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { - for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { + rdma_for_each_port (cur_dev->device, p) { if (!rdma_cap_af_ib(cur_dev->device, p)) continue; @@ -3029,9 +3030,9 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) struct cma_device *cma_dev, *cur_dev; union ib_gid gid; enum ib_port_state port_state; + unsigned int p; u16 pkey; int ret; - u8 p; cma_dev = NULL; mutex_lock(&lock); @@ -3043,7 +3044,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) if (!cma_dev) cma_dev = cur_dev; - for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { + rdma_for_each_port (cur_dev->device, p) { if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && port_state == IB_PORT_ACTIVE) { cma_dev = cur_dev; -- cgit v1.2.3-58-ga151 From 5ff8c8fa44c2cb74f3066ec4a531265db69b86c5 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:49 +0200 Subject: RDMA/cma: Rename cma_device ref/deref helpers to to get/put Helper functions which increment/decrement reference count of the structure read better when they are named with the get/put suffix. Hence, rename cma_ref/deref_dev() to cma_dev_get/put(). Link: https://lore.kernel.org/r/20200126142652.104803-5-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 22 +++++++++++----------- drivers/infiniband/core/cma_configfs.c | 6 +++--- drivers/infiniband/core/cma_priv.h | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 34c62eae08d8..7e16d1b001ff 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -247,11 +247,17 @@ enum { CMA_OPTION_AFONLY, }; -void cma_ref_dev(struct cma_device *cma_dev) +void cma_dev_get(struct cma_device *cma_dev) { atomic_inc(&cma_dev->refcount); } +void cma_dev_put(struct cma_device *cma_dev) +{ + if (atomic_dec_and_test(&cma_dev->refcount)) + complete(&cma_dev->comp); +} + struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, void *cookie) { @@ -267,7 +273,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, } if (found_cma_dev) - cma_ref_dev(found_cma_dev); + cma_dev_get(found_cma_dev); mutex_unlock(&lock); return found_cma_dev; } @@ -463,7 +469,7 @@ static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) static void _cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { - cma_ref_dev(cma_dev); + cma_dev_get(cma_dev); id_priv->cma_dev = cma_dev; id_priv->id.device = cma_dev->device; id_priv->id.route.addr.dev_addr.transport = @@ -484,12 +490,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv, rdma_start_port(cma_dev->device)]; } -void cma_deref_dev(struct cma_device *cma_dev) -{ - if (atomic_dec_and_test(&cma_dev->refcount)) - complete(&cma_dev->comp); -} - static inline void release_mc(struct kref *kref) { struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); @@ -502,7 +502,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv) { mutex_lock(&lock); list_del(&id_priv->list); - cma_deref_dev(id_priv->cma_dev); + cma_dev_put(id_priv->cma_dev); id_priv->cma_dev = NULL; mutex_unlock(&lock); } @@ -4728,7 +4728,7 @@ static void cma_process_remove(struct cma_device *cma_dev) } mutex_unlock(&lock); - cma_deref_dev(cma_dev); + cma_dev_put(cma_dev); wait_for_completion(&cma_dev->comp); } diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 8b0b5ae22e4c..c672a4978bfd 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -94,7 +94,7 @@ static int cma_configfs_params_get(struct config_item *item, static void cma_configfs_params_put(struct cma_device *cma_dev) { - cma_deref_dev(cma_dev); + cma_dev_put(cma_dev); } static ssize_t default_roce_mode_show(struct config_item *item, @@ -312,12 +312,12 @@ static struct config_group *make_cma_dev(struct config_group *group, configfs_add_default_group(&cma_dev_group->ports_group, &cma_dev_group->device_group); - cma_deref_dev(cma_dev); + cma_dev_put(cma_dev); return &cma_dev_group->device_group; fail: if (cma_dev) - cma_deref_dev(cma_dev); + cma_dev_put(cma_dev); kfree(cma_dev_group); return ERR_PTR(err); } diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h index ca7307277518..4e04c442ff86 100644 --- a/drivers/infiniband/core/cma_priv.h +++ b/drivers/infiniband/core/cma_priv.h @@ -111,8 +111,8 @@ static inline void cma_configfs_exit(void) } #endif -void cma_ref_dev(struct cma_device *dev); -void cma_deref_dev(struct cma_device *dev); +void cma_dev_get(struct cma_device *dev); +void cma_dev_put(struct cma_device *dev); typedef bool (*cma_device_filter)(struct ib_device *, void *); struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, void *cookie); -- cgit v1.2.3-58-ga151 From be439912e7c2e3e78ebd087932c165a83bdca6b5 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:50 +0200 Subject: RDMA/cma: Use refcount API to reflect refcount Use the refcount variant to capture the reference counting of the cma device structure. Link: https://lore.kernel.org/r/20200126142652.104803-6-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 7e16d1b001ff..d43f7ce759f2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -199,7 +199,7 @@ struct cma_device { struct list_head list; struct ib_device *device; struct completion comp; - atomic_t refcount; + refcount_t refcount; struct list_head id_list; enum ib_gid_type *default_gid_type; u8 *default_roce_tos; @@ -249,12 +249,12 @@ enum { void cma_dev_get(struct cma_device *cma_dev) { - atomic_inc(&cma_dev->refcount); + refcount_inc(&cma_dev->refcount); } void cma_dev_put(struct cma_device *cma_dev) { - if (atomic_dec_and_test(&cma_dev->refcount)) + if (refcount_dec_and_test(&cma_dev->refcount)) complete(&cma_dev->comp); } @@ -754,7 +754,6 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, list_for_each_entry(cma_dev, &dev_list, list) { rdma_for_each_port (cma_dev->device, port) { - if (listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; @@ -4657,7 +4656,7 @@ static void cma_add_one(struct ib_device *device) } init_completion(&cma_dev->comp); - atomic_set(&cma_dev->refcount, 1); + refcount_set(&cma_dev->refcount, 1); INIT_LIST_HEAD(&cma_dev->id_list); ib_set_client_data(device, &cma_client, cma_dev); -- cgit v1.2.3-58-ga151 From e368d23f57f6a08341d35c44255f2d8e7695152b Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:51 +0200 Subject: RDMA/cma: Rename cma_device ref/deref helpers to to get/put Helper functions which increment/decrement reference count of a structure read better when they are named with the get/put suffix. Hence, rename cma_ref/deref_id() to cma_id_get/put(). Also use cma_get_id() wrapper to find the balancing put() calls. Link: https://lore.kernel.org/r/20200126142652.104803-7-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d43f7ce759f2..605afeed122f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -840,7 +840,12 @@ found: return 0; } -static void cma_deref_id(struct rdma_id_private *id_priv) +static void cma_id_get(struct rdma_id_private *id_priv) +{ + atomic_inc(&id_priv->refcount); +} + +static void cma_id_put(struct rdma_id_private *id_priv) { if (atomic_dec_and_test(&id_priv->refcount)) complete(&id_priv->comp); @@ -1846,11 +1851,11 @@ void rdma_destroy_id(struct rdma_cm_id *id) } cma_release_port(id_priv); - cma_deref_id(id_priv); + cma_id_put(id_priv); wait_for_completion(&id_priv->comp); if (id_priv->internal_id) - cma_deref_id(id_priv->id.context); + cma_id_put(id_priv->id.context); kfree(id_priv->id.route.path_rec); @@ -2187,7 +2192,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id, * Protect against the user destroying conn_id from another thread * until we're done accessing it. */ - atomic_inc(&conn_id->refcount); + cma_id_get(conn_id); ret = cma_cm_event_handler(conn_id, &event); if (ret) goto err3; @@ -2204,13 +2209,13 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id, mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&listen_id->handler_mutex); - cma_deref_id(conn_id); + cma_id_put(conn_id); if (net_dev) dev_put(net_dev); return 0; err3: - cma_deref_id(conn_id); + cma_id_put(conn_id); /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; err2: @@ -2391,7 +2396,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, * Protect against the user destroying conn_id from another thread * until we're done accessing it. */ - atomic_inc(&conn_id->refcount); + cma_id_get(conn_id); ret = cma_cm_event_handler(conn_id, &event); if (ret) { /* User wants to destroy the CM ID */ @@ -2399,13 +2404,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&listen_id->handler_mutex); - cma_deref_id(conn_id); + cma_id_put(conn_id); rdma_destroy_id(&conn_id->id); return ret; } mutex_unlock(&conn_id->handler_mutex); - cma_deref_id(conn_id); + cma_id_put(conn_id); out: mutex_unlock(&listen_id->handler_mutex); @@ -2492,7 +2497,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, _cma_attach_to_dev(dev_id_priv, cma_dev); list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); - atomic_inc(&id_priv->refcount); + cma_id_get(id_priv); dev_id_priv->internal_id = 1; dev_id_priv->afonly = id_priv->afonly; dev_id_priv->tos_set = id_priv->tos_set; @@ -2647,7 +2652,7 @@ static void cma_work_handler(struct work_struct *_work) } out: mutex_unlock(&id_priv->handler_mutex); - cma_deref_id(id_priv); + cma_id_put(id_priv); if (destroy) rdma_destroy_id(&id_priv->id); kfree(work); @@ -2671,7 +2676,7 @@ static void cma_ndev_work_handler(struct work_struct *_work) out: mutex_unlock(&id_priv->handler_mutex); - cma_deref_id(id_priv); + cma_id_put(id_priv); if (destroy) rdma_destroy_id(&id_priv->id); kfree(work); @@ -2690,7 +2695,8 @@ static void cma_init_resolve_route_work(struct cma_work *work, static void enqueue_resolve_addr_work(struct cma_work *work, struct rdma_id_private *id_priv) { - atomic_inc(&id_priv->refcount); + /* Balances with cma_id_put() in cma_work_handler */ + cma_id_get(id_priv); work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); @@ -2986,7 +2992,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) return -EINVAL; - atomic_inc(&id_priv->refcount); + cma_id_get(id_priv); if (rdma_cap_ib_sa(id->device, id->port_num)) ret = cma_resolve_ib_route(id_priv, timeout_ms); else if (rdma_protocol_roce(id->device, id->port_num)) @@ -3002,7 +3008,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) return 0; err: cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); - cma_deref_id(id_priv); + cma_id_put(id_priv); return ret; } EXPORT_SYMBOL(rdma_resolve_route); @@ -4581,7 +4587,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id INIT_WORK(&work->work, cma_ndev_work_handler); work->id = id_priv; work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; - atomic_inc(&id_priv->refcount); + cma_id_get(id_priv); queue_work(cma_wq, &work->work); } @@ -4715,11 +4721,11 @@ static void cma_process_remove(struct cma_device *cma_dev) list_del(&id_priv->listen_list); list_del_init(&id_priv->list); - atomic_inc(&id_priv->refcount); + cma_id_get(id_priv); mutex_unlock(&lock); ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); - cma_deref_id(id_priv); + cma_id_put(id_priv); if (ret) rdma_destroy_id(&id_priv->id); -- cgit v1.2.3-58-ga151 From 43fb5892cdfaa3bbe170aade07d4a38086636cca Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:52 +0200 Subject: RDMA/cma: Use refcount API to reflect refcount Use a refcount_t for atomics being used as a refcount. Link: https://lore.kernel.org/r/20200126142652.104803-8-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 6 +++--- drivers/infiniband/core/cma_priv.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 605afeed122f..5165158a7aaa 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -842,12 +842,12 @@ found: static void cma_id_get(struct rdma_id_private *id_priv) { - atomic_inc(&id_priv->refcount); + refcount_inc(&id_priv->refcount); } static void cma_id_put(struct rdma_id_private *id_priv) { - if (atomic_dec_and_test(&id_priv->refcount)) + if (refcount_dec_and_test(&id_priv->refcount)) complete(&id_priv->comp); } @@ -875,7 +875,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net, spin_lock_init(&id_priv->lock); mutex_init(&id_priv->qp_mutex); init_completion(&id_priv->comp); - atomic_set(&id_priv->refcount, 1); + refcount_set(&id_priv->refcount, 1); mutex_init(&id_priv->handler_mutex); INIT_LIST_HEAD(&id_priv->listen_list); INIT_LIST_HEAD(&id_priv->mc_list); diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h index 4e04c442ff86..5edcf44a9307 100644 --- a/drivers/infiniband/core/cma_priv.h +++ b/drivers/infiniband/core/cma_priv.h @@ -66,7 +66,7 @@ struct rdma_id_private { struct mutex qp_mutex; struct completion comp; - atomic_t refcount; + refcount_t refcount; struct mutex handler_mutex; int backlog; -- cgit v1.2.3-58-ga151 From b14c95bee83537aff423cece4ae6078759bc6f34 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Sun, 26 Jan 2020 22:55:04 +0800 Subject: RDMA/hns: Cleanups of magic numbers Some magic numbers are hard to understand, so replace them with macros or add some comments for them. Link: https://lore.kernel.org/r/20200126145504.9700-1-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Yixian Liu Signed-off-by: Wenpeng Liang Signed-off-by: Yixing Liu Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 2 +- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 10 +++++----- drivers/infiniband/hw/hns/hns_roce_qp.c | 23 +++++++++++++---------- drivers/infiniband/hw/hns/hns_roce_srq.c | 3 ++- 4 files changed, 21 insertions(+), 17 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a7c4ff975c28..3b3d6fee1eca 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -881,7 +881,7 @@ struct hns_roce_caps { u32 cqc_timer_ba_pg_sz; u32 cqc_timer_buf_pg_sz; u32 cqc_timer_hop_num; - u32 cqe_ba_pg_sz; + u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ u32 cqe_buf_pg_sz; u32 cqe_hop_num; u32 srqwqe_ba_pg_sz; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 12c4cd8e9378..b86687809407 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1999,7 +1999,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) return ret; } - if (hr_dev->pci_dev->revision == 0x21) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { ret = hns_roce_query_pf_timer_resource(hr_dev); if (ret) { dev_err(hr_dev->dev, @@ -2016,7 +2016,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) return ret; } - if (hr_dev->pci_dev->revision == 0x21) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { ret = hns_roce_set_vf_switch_param(hr_dev, 0); if (ret) { dev_err(hr_dev->dev, @@ -2298,7 +2298,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; - if (hr_dev->pci_dev->revision == 0x21) + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) hns_roce_function_clear(hr_dev); hns_roce_free_link_table(hr_dev, &priv->tpq); @@ -2757,7 +2757,7 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index) static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) { - *hr_cq->set_ci_db = cons_index & 0xffffff; + *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M; } static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, @@ -4475,7 +4475,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, V2_QPC_BYTE_24_HOP_LIMIT_S, 0); - if (hr_dev->pci_dev->revision == 0x21 && is_udp) + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B && is_udp) roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2); else diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 3257ad11be48..e13f16c59115 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -309,7 +309,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, max_cnt = max(1U, cap->max_recv_sge); hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); - if (hr_dev->caps.max_rq_sg <= 2) + if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); else @@ -370,16 +370,17 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, hr_qp->sq.wqe_shift = ucmd->log_sq_stride; max_cnt = max(1U, cap->max_send_sge); - if (hr_dev->caps.max_sq_sg <= 2) + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); else hr_qp->sq.max_gs = max_cnt; - if (hr_qp->sq.max_gs > 2) + if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * (hr_qp->sq.max_gs - 2)); - if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) { + if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE && + hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) { if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { dev_err(hr_dev->dev, "The extended sge cnt error! sge_cnt=%d\n", @@ -392,7 +393,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ex_sge_num = hr_qp->sge.sge_cnt; /* Get buf size, SQ and RQ are aligned to page_szie */ - if (hr_dev->caps.max_sq_sg <= 2) { + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), PAGE_SIZE) + round_up((hr_qp->sq.wqe_cnt << @@ -528,13 +529,15 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev, } /* ud sqwqe's sge use extend sge */ - if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) { + if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && + hr_qp->ibqp.qp_type == IB_QPT_GSI) { hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sq.max_gs); hr_qp->sge.sge_shift = 4; } - if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { + if (hr_qp->sq.max_gs > 2 && + hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) { if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n", hr_qp->sge.sge_cnt); @@ -577,7 +580,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, /* Get data_seg numbers */ max_cnt = max(1U, cap->max_send_sge); - if (hr_dev->caps.max_sq_sg <= 2) + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); else hr_qp->sq.max_gs = max_cnt; @@ -593,7 +596,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, hr_qp->sq.offset = 0; size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size); - if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { + if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) { hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift), (u32)hr_qp->sge.sge_cnt); hr_qp->sge.offset = size; @@ -1078,7 +1081,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; /* when hw version is v1, the sqpn is allocated */ - if (hr_dev->caps.max_sq_sg <= 2) + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + hr_dev->iboe.phy_port[hr_qp->port]; else diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index c6d5f06f9cde..5b3dd1a337d4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -381,7 +381,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1); srq->max_gs = init_attr->attr.max_sge; - srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs)); + srq_desc_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE, + HNS_ROCE_SGE_SIZE * srq->max_gs)); srq->wqe_shift = ilog2(srq_desc_size); -- cgit v1.2.3-58-ga151 From d7e2d3432ae7795e5b77a77039e904ed4a769f39 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Sun, 26 Jan 2020 22:58:35 +0800 Subject: RDMA/hns: Optimize eqe buffer allocation flow The eqe has a private multi-hop addressing implementation, but there is already a set of interfaces in the hns driver that can achieve this. So, simplify the eqe buffer allocation process by using the mtr interface and remove large amount of repeated logic. Link: https://lore.kernel.org/r/20200126145835.11368-1-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 10 +- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 481 ++++++---------------------- 2 files changed, 108 insertions(+), 383 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 3b3d6fee1eca..4db818e98af5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -762,14 +762,8 @@ struct hns_roce_eq { int eqe_ba_pg_sz; int eqe_buf_pg_sz; int hop_num; - u64 *bt_l0; /* Base address table for L0 */ - u64 **bt_l1; /* Base address table for L1 */ - u64 **buf; - dma_addr_t l0_dma; - dma_addr_t *l1_dma; - dma_addr_t *buf_dma; - u32 l0_last_num; /* L0 last chunk num */ - u32 l1_last_num; /* L1 last chunk num */ + struct hns_roce_mtr mtr; + struct hns_roce_buf buf; int eq_max_cnt; int eq_period; int shift; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index b86687809407..29a5ee0cf07e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5326,44 +5326,24 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq) hns_roce_write64(hr_dev, doorbell, eq->doorbell); } -static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry) +static inline void *get_eqe_buf(struct hns_roce_eq *eq, unsigned long offset) { u32 buf_chk_sz; - unsigned long off; buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); - off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE; - - return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) + - off % buf_chk_sz); -} - -static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry) -{ - u32 buf_chk_sz; - unsigned long off; - - buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); - - off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE; - - if (eq->hop_num == HNS_ROCE_HOP_NUM_0) - return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) + - off % buf_chk_sz); + if (eq->buf.nbufs == 1) + return eq->buf.direct.buf + offset % buf_chk_sz; else - return (struct hns_roce_aeqe *)((u8 *) - (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz); + return eq->buf.page_list[offset / buf_chk_sz].buf + + offset % buf_chk_sz; } static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) { struct hns_roce_aeqe *aeqe; - if (!eq->hop_num) - aeqe = get_aeqe_v2(eq, eq->cons_index); - else - aeqe = mhop_get_aeqe(eq, eq->cons_index); - + aeqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) * + HNS_ROCE_AEQ_ENTRY_SIZE); return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^ !!(eq->cons_index & eq->entries)) ? aeqe : NULL; } @@ -5456,44 +5436,12 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, return aeqe_found; } -static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry) -{ - u32 buf_chk_sz; - unsigned long off; - - buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); - off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE; - - return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) + - off % buf_chk_sz); -} - -static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry) -{ - u32 buf_chk_sz; - unsigned long off; - - buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); - - off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE; - - if (eq->hop_num == HNS_ROCE_HOP_NUM_0) - return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) + - off % buf_chk_sz); - else - return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off / - buf_chk_sz]) + off % buf_chk_sz); -} - static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) { struct hns_roce_ceqe *ceqe; - if (!eq->hop_num) - ceqe = get_ceqe_v2(eq, eq->cons_index); - else - ceqe = mhop_get_ceqe(eq, eq->cons_index); - + ceqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) * + HNS_ROCE_CEQ_ENTRY_SIZE); return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; } @@ -5653,90 +5601,11 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); } -static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) +static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { - struct device *dev = hr_dev->dev; - u64 idx; - u64 size; - u32 buf_chk_sz; - u32 bt_chk_sz; - u32 mhop_num; - int eqe_alloc; - int i = 0; - int j = 0; - - mhop_num = hr_dev->caps.eqe_hop_num; - buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); - bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); - - if (mhop_num == HNS_ROCE_HOP_NUM_0) { - dma_free_coherent(dev, (unsigned int)(eq->entries * - eq->eqe_size), eq->bt_l0, eq->l0_dma); - return; - } - - dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma); - if (mhop_num == 1) { - for (i = 0; i < eq->l0_last_num; i++) { - if (i == eq->l0_last_num - 1) { - eqe_alloc = i * (buf_chk_sz / eq->eqe_size); - size = (eq->entries - eqe_alloc) * eq->eqe_size; - dma_free_coherent(dev, size, eq->buf[i], - eq->buf_dma[i]); - break; - } - dma_free_coherent(dev, buf_chk_sz, eq->buf[i], - eq->buf_dma[i]); - } - } else if (mhop_num == 2) { - for (i = 0; i < eq->l0_last_num; i++) { - dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], - eq->l1_dma[i]); - - for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) { - idx = i * (bt_chk_sz / BA_BYTE_LEN) + j; - if ((i == eq->l0_last_num - 1) - && j == eq->l1_last_num - 1) { - eqe_alloc = (buf_chk_sz / eq->eqe_size) - * idx; - size = (eq->entries - eqe_alloc) - * eq->eqe_size; - dma_free_coherent(dev, size, - eq->buf[idx], - eq->buf_dma[idx]); - break; - } - dma_free_coherent(dev, buf_chk_sz, eq->buf[idx], - eq->buf_dma[idx]); - } - } - } - kfree(eq->buf_dma); - kfree(eq->buf); - kfree(eq->l1_dma); - kfree(eq->bt_l1); - eq->buf_dma = NULL; - eq->buf = NULL; - eq->l1_dma = NULL; - eq->bt_l1 = NULL; -} - -static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - u32 buf_chk_sz; - - buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); - - if (hr_dev->caps.eqe_hop_num) { - hns_roce_mhop_free_eq(hr_dev, eq); - return; - } - - dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf, - eq->buf_list->map); - kfree(eq->buf_list); + if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) + hns_roce_mtr_cleanup(hr_dev, &eq->mtr); + hns_roce_buf_free(hr_dev, eq->buf.size, &eq->buf); } static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, @@ -5744,6 +5613,8 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, void *mb_buf) { struct hns_roce_eq_context *eqc; + u64 ba[MTT_MIN_COUNT] = { 0 }; + int count; eqc = mb_buf; memset(eqc, 0, sizeof(struct hns_roce_eq_context)); @@ -5759,10 +5630,23 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz; eq->shift = ilog2((unsigned int)eq->entries); - if (!eq->hop_num) - eq->eqe_ba = eq->buf_list->map; - else - eq->eqe_ba = eq->l0_dma; + /* if not muti-hop, eqe buffer only use one trunk */ + if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) { + eq->eqe_ba = eq->buf.direct.map; + eq->cur_eqe_ba = eq->eqe_ba; + if (eq->buf.npages > 1) + eq->nxt_eqe_ba = eq->eqe_ba + (1 << eq->eqe_buf_pg_sz); + else + eq->nxt_eqe_ba = eq->eqe_ba; + } else { + count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, ba, + MTT_MIN_COUNT, &eq->eqe_ba); + eq->cur_eqe_ba = ba[0]; + if (count > 1) + eq->nxt_eqe_ba = ba[1]; + else + eq->nxt_eqe_ba = ba[0]; + } /* set eqc state */ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S, @@ -5860,220 +5744,97 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44); } -static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) +static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, + u32 page_shift) { - struct device *dev = hr_dev->dev; - int eq_alloc_done = 0; - int eq_buf_cnt = 0; - int eqe_alloc; - u32 buf_chk_sz; - u32 bt_chk_sz; - u32 mhop_num; - u64 size; - u64 idx; + struct hns_roce_buf_region region = {}; + dma_addr_t *buf_list = NULL; int ba_num; - int bt_num; - int record_i; - int record_j; - int i = 0; - int j = 0; - - mhop_num = hr_dev->caps.eqe_hop_num; - buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); - bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); + int ret; ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size), - buf_chk_sz); - bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN); + 1 << page_shift); + hns_roce_init_buf_region(®ion, hr_dev->caps.eqe_hop_num, 0, ba_num); - if (mhop_num == HNS_ROCE_HOP_NUM_0) { - if (eq->entries > buf_chk_sz / eq->eqe_size) { - dev_err(dev, "eq entries %d is larger than buf_pg_sz!", - eq->entries); - return -EINVAL; - } - eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size, - &(eq->l0_dma), GFP_KERNEL); - if (!eq->bt_l0) - return -ENOMEM; - - eq->cur_eqe_ba = eq->l0_dma; - eq->nxt_eqe_ba = 0; + /* alloc a tmp list for storing eq buf address */ + ret = hns_roce_alloc_buf_list(®ion, &buf_list, 1); + if (ret) { + dev_err(hr_dev->dev, "alloc eq buf_list error\n"); + return ret; + } - return 0; + ba_num = hns_roce_get_kmem_bufs(hr_dev, buf_list, region.count, + region.offset, &eq->buf); + if (ba_num != region.count) { + dev_err(hr_dev->dev, "get eqe buf err,expect %d,ret %d.\n", + region.count, ba_num); + ret = -ENOBUFS; + goto done; } - eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL); - if (!eq->buf_dma) - return -ENOMEM; - eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL); - if (!eq->buf) - goto err_kcalloc_buf; - - if (mhop_num == 2) { - eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL); - if (!eq->l1_dma) - goto err_kcalloc_l1_dma; - - eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL); - if (!eq->bt_l1) - goto err_kcalloc_bt_l1; - } - - /* alloc L0 BT */ - eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL); - if (!eq->bt_l0) - goto err_dma_alloc_l0; - - if (mhop_num == 1) { - if (ba_num > (bt_chk_sz / BA_BYTE_LEN)) - dev_err(dev, "ba_num %d is too large for 1 hop\n", - ba_num); - - /* alloc buf */ - for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) { - if (eq_buf_cnt + 1 < ba_num) { - size = buf_chk_sz; - } else { - eqe_alloc = i * (buf_chk_sz / eq->eqe_size); - size = (eq->entries - eqe_alloc) * eq->eqe_size; - } - eq->buf[i] = dma_alloc_coherent(dev, size, - &(eq->buf_dma[i]), - GFP_KERNEL); - if (!eq->buf[i]) - goto err_dma_alloc_buf; + hns_roce_mtr_init(&eq->mtr, PAGE_SHIFT + hr_dev->caps.eqe_ba_pg_sz, + page_shift); + ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, ®ion, 1); + if (ret) + dev_err(hr_dev->dev, "mtr attatch error for eqe\n"); - *(eq->bt_l0 + i) = eq->buf_dma[i]; + goto done; - eq_buf_cnt++; - if (eq_buf_cnt >= ba_num) - break; - } - eq->cur_eqe_ba = eq->buf_dma[0]; - if (ba_num > 1) - eq->nxt_eqe_ba = eq->buf_dma[1]; - - } else if (mhop_num == 2) { - /* alloc L1 BT and buf */ - for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) { - eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz, - &(eq->l1_dma[i]), - GFP_KERNEL); - if (!eq->bt_l1[i]) - goto err_dma_alloc_l1; - *(eq->bt_l0 + i) = eq->l1_dma[i]; - - for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) { - idx = i * bt_chk_sz / BA_BYTE_LEN + j; - if (eq_buf_cnt + 1 < ba_num) { - size = buf_chk_sz; - } else { - eqe_alloc = (buf_chk_sz / eq->eqe_size) - * idx; - size = (eq->entries - eqe_alloc) - * eq->eqe_size; - } - eq->buf[idx] = dma_alloc_coherent(dev, size, - &(eq->buf_dma[idx]), - GFP_KERNEL); - if (!eq->buf[idx]) - goto err_dma_alloc_buf; - - *(eq->bt_l1[i] + j) = eq->buf_dma[idx]; - - eq_buf_cnt++; - if (eq_buf_cnt >= ba_num) { - eq_alloc_done = 1; - break; - } - } + hns_roce_mtr_cleanup(hr_dev, &eq->mtr); +done: + hns_roce_free_buf_list(&buf_list, 1); - if (eq_alloc_done) - break; - } - eq->cur_eqe_ba = eq->buf_dma[0]; - if (ba_num > 1) - eq->nxt_eqe_ba = eq->buf_dma[1]; - } + return ret; +} - eq->l0_last_num = i + 1; - if (mhop_num == 2) - eq->l1_last_num = j + 1; +static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) +{ + struct hns_roce_buf *buf = &eq->buf; + bool is_mhop = false; + u32 page_shift; + u32 mhop_num; + u32 max_size; + int ret; - return 0; + page_shift = PAGE_SHIFT + hr_dev->caps.eqe_buf_pg_sz; + mhop_num = hr_dev->caps.eqe_hop_num; + if (!mhop_num) { + max_size = 1 << page_shift; + buf->size = max_size; + } else if (mhop_num == HNS_ROCE_HOP_NUM_0) { + max_size = eq->entries * eq->eqe_size; + buf->size = max_size; + } else { + max_size = 1 << page_shift; + buf->size = PAGE_ALIGN(eq->entries * eq->eqe_size); + is_mhop = true; + } -err_dma_alloc_l1: - dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma); - eq->bt_l0 = NULL; - eq->l0_dma = 0; - for (i -= 1; i >= 0; i--) { - dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], - eq->l1_dma[i]); - - for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) { - idx = i * bt_chk_sz / BA_BYTE_LEN + j; - dma_free_coherent(dev, buf_chk_sz, eq->buf[idx], - eq->buf_dma[idx]); - } + ret = hns_roce_buf_alloc(hr_dev, buf->size, max_size, buf, page_shift); + if (ret) { + dev_err(hr_dev->dev, "alloc eq buf error\n"); + return ret; } - goto err_dma_alloc_l0; - -err_dma_alloc_buf: - dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma); - eq->bt_l0 = NULL; - eq->l0_dma = 0; - - if (mhop_num == 1) - for (i -= 1; i >= 0; i--) - dma_free_coherent(dev, buf_chk_sz, eq->buf[i], - eq->buf_dma[i]); - else if (mhop_num == 2) { - record_i = i; - record_j = j; - for (; i >= 0; i--) { - dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], - eq->l1_dma[i]); - - for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) { - if (i == record_i && j >= record_j) - break; - - idx = i * bt_chk_sz / BA_BYTE_LEN + j; - dma_free_coherent(dev, buf_chk_sz, - eq->buf[idx], - eq->buf_dma[idx]); - } + + if (is_mhop) { + ret = map_eq_buf(hr_dev, eq, page_shift); + if (ret) { + dev_err(hr_dev->dev, "map roce buf error\n"); + goto err_alloc; } } -err_dma_alloc_l0: - kfree(eq->bt_l1); - eq->bt_l1 = NULL; - -err_kcalloc_bt_l1: - kfree(eq->l1_dma); - eq->l1_dma = NULL; - -err_kcalloc_l1_dma: - kfree(eq->buf); - eq->buf = NULL; - -err_kcalloc_buf: - kfree(eq->buf_dma); - eq->buf_dma = NULL; - - return -ENOMEM; + return 0; +err_alloc: + hns_roce_buf_free(hr_dev, buf->size, buf); + return ret; } static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, unsigned int eq_cmd) { - struct device *dev = hr_dev->dev; struct hns_roce_cmd_mailbox *mailbox; - u32 buf_chk_sz = 0; int ret; /* Allocate mailbox memory */ @@ -6081,38 +5842,17 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - if (!hr_dev->caps.eqe_hop_num) { - buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); - - eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list), - GFP_KERNEL); - if (!eq->buf_list) { - ret = -ENOMEM; - goto free_cmd_mbox; - } - - eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz, - &(eq->buf_list->map), - GFP_KERNEL); - if (!eq->buf_list->buf) { - ret = -ENOMEM; - goto err_alloc_buf; - } - - } else { - ret = hns_roce_mhop_alloc_eq(hr_dev, eq); - if (ret) { - ret = -ENOMEM; - goto free_cmd_mbox; - } + ret = alloc_eq_buf(hr_dev, eq); + if (ret) { + ret = -ENOMEM; + goto free_cmd_mbox; } - hns_roce_config_eqc(hr_dev, eq, mailbox->buf); ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) { - dev_err(dev, "[mailbox cmd] create eqc failed.\n"); + dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n"); goto err_cmd_mbox; } @@ -6121,16 +5861,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, return 0; err_cmd_mbox: - if (!hr_dev->caps.eqe_hop_num) - dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf, - eq->buf_list->map); - else { - hns_roce_mhop_free_eq(hr_dev, eq); - goto free_cmd_mbox; - } - -err_alloc_buf: - kfree(eq->buf_list); + free_eq_buf(hr_dev, eq); free_cmd_mbox: hns_roce_free_cmd_mailbox(hr_dev, mailbox); @@ -6310,7 +6041,7 @@ err_request_irq_fail: err_create_eq_fail: for (i -= 1; i >= 0; i--) - hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); + free_eq_buf(hr_dev, &eq_table->eq[i]); kfree(eq_table->eq); return ret; @@ -6332,7 +6063,7 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) for (i = 0; i < eq_num; i++) { hns_roce_v2_destroy_eqc(hr_dev, i); - hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); + free_eq_buf(hr_dev, &eq_table->eq[i]); } kfree(eq_table->eq); -- cgit v1.2.3-58-ga151 From 9a4b24108d92ffaa886e37923088bd806b988948 Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Tue, 4 Feb 2020 16:38:40 -0600 Subject: i40iw: Do an RCU lookup in i40iw_add_ipv4_addr The in_dev_for_each_ifa_rtnl() iterator in i40iw_add_ipv4_addr requires that the rtnl lock be held. But the rtnl_trylock/unlock scheme in this function does not guarantee it. Replace the rtnl locking with an RCU lookup using in_dev_for_each_ifa_rcu() Fixes: 8e06af711bf2 ("i40iw: add main, hdr, status") Link: https://lore.kernel.org/r/20200204223840.2151-1-shiraz.saleem@intel.com Signed-off-by: Shiraz Saleem Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/i40iw/i40iw_main.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 238614370927..84e1b52af15e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1212,22 +1212,19 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) { struct net_device *dev; struct in_device *idev; - bool got_lock = true; u32 ip_addr; - if (!rtnl_trylock()) - got_lock = false; - - for_each_netdev(&init_net, dev) { + rcu_read_lock(); + for_each_netdev_rcu(&init_net, dev) { if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) && (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || - (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) { + (dev == iwdev->netdev)) && (READ_ONCE(dev->flags) & IFF_UP)) { const struct in_ifaddr *ifa; - idev = in_dev_get(dev); + idev = __in_dev_get_rcu(dev); if (!idev) continue; - in_dev_for_each_ifa_rtnl(ifa, idev) { + in_dev_for_each_ifa_rcu(ifa, idev) { i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev), dev->dev_addr); @@ -1239,12 +1236,9 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) true, I40IW_ARP_ADD); } - - in_dev_put(idev); } } - if (got_lock) - rtnl_unlock(); + rcu_read_unlock(); } /** -- cgit v1.2.3-58-ga151 From beb205dd67aaa4315dedf5c40b47c6e9dee5a469 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 5 Feb 2020 10:13:54 +0200 Subject: RDMA/siw: Fix setting active_mtu attribute Make sure to set the active_mtu attribute to avoid report the following invalid value: $ ibv_devinfo -d siw0 | grep active_mtu active_mtu: invalid MTU (0) Fixes: 303ae1cdfdf7 ("rdma/siw: application interface") Link: https://lore.kernel.org/r/20200205081354.30438-1-kamalheib1@gmail.com Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Reviewed-by: Bernard Metzler Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/siw/siw_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 07e30138aaa1..73485d0da907 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -168,12 +168,12 @@ int siw_query_port(struct ib_device *base_dev, u8 port, memset(attr, 0, sizeof(*attr)); - attr->active_mtu = attr->max_mtu; attr->active_speed = 2; attr->active_width = 2; attr->gid_tbl_len = 1; attr->max_msg_sz = -1; attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); + attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); attr->phys_state = sdev->state == IB_PORT_ACTIVE ? IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; attr->pkey_tbl_len = 1; -- cgit v1.2.3-58-ga151 From ca750d4a9c426854e221ca404fbefeab9800134e Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 12 Feb 2020 09:26:27 +0200 Subject: RDMA/ucma: Mask QPN to be 24 bits according to IBTA IBTA declares QPN as 24bits, mask input to ensure that kernel doesn't get higher bits and ensure by adding WANR_ONCE() that other CM users do the same. Link: https://lore.kernel.org/r/20200212072635.682689-2-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 3 +++ drivers/infiniband/core/ucma.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 68cc1b2d6824..33c0d9e7bb66 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2188,6 +2188,9 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id, cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); + WARN_ONCE(param->qp_num & 0xFF000000, + "IBTA declares QPN to be 24 bits, but it is 0x%X\n", + param->qp_num); cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 0274e9b704be..57e68491a2fd 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1045,7 +1045,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id, dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; - dst->qp_num = src->qp_num; + dst->qp_num = src->qp_num & 0xFFFFFF; dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; } -- cgit v1.2.3-58-ga151 From f03d9fadfe13a78ee28fec320d43f7b37574adcb Mon Sep 17 00:00:00 2001 From: Michael Guralnik Date: Wed, 12 Feb 2020 09:35:59 +0200 Subject: RDMA/core: Add weak ordering dma attr to dma mapping For memory regions registered with IB_ACCESS_RELAXED_ORDERING will be dma mapped with the DMA_ATTR_WEAK_ORDERING. This will allow reads and writes to the mapping to be weakly ordered, such change can enhance performance on some supporting architectures. Link: https://lore.kernel.org/r/20200212073559.684139-1-leon@kernel.org Signed-off-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/umem.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 06b6125b5ae1..82455a1392f1 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -197,6 +197,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, unsigned long lock_limit; unsigned long new_pinned; unsigned long cur_base; + unsigned long dma_attr = 0; struct mm_struct *mm; unsigned long npages; int ret; @@ -278,10 +279,12 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, sg_mark_end(sg); - umem->nmap = ib_dma_map_sg(device, - umem->sg_head.sgl, - umem->sg_nents, - DMA_BIDIRECTIONAL); + if (access & IB_ACCESS_RELAXED_ORDERING) + dma_attr |= DMA_ATTR_WEAK_ORDERING; + + umem->nmap = + ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents, + DMA_BIDIRECTIONAL, dma_attr); if (!umem->nmap) { ret = -ENOMEM; -- cgit v1.2.3-58-ga151 From ffd541d45726341c1830ff595fd7352b6d1cfbcd Mon Sep 17 00:00:00 2001 From: Yixian Liu Date: Thu, 6 Feb 2020 17:56:44 +0800 Subject: RDMA/hns: Add the workqueue framework for flush cqe handler HiP08 RoCE hardware lacks ability(a known hardware problem) to flush outstanding WQEs if QP state gets into errored mode for some reason. To overcome this hardware problem and as a workaround, when QP is detected to be in errored state during various legs like post send, post receive etc [1], flush needs to be performed from the driver. The earlier patch[1] sent to solve the hardware limitation explained in the cover-letter had a bug in the software flushing leg. It acquired mutex while modifying QP state to errored state and while conveying it to the hardware using the mailbox. This caused leg to sleep while holding spin-lock and caused crash. Suggested Solution: we have proposed to defer the flushing of the QP in the Errored state using the workqueue to get around with the limitation of our hardware. This patch adds the framework of the workqueue and the flush handler function. [1] https://patchwork.kernel.org/patch/10534271/ Link: https://lore.kernel.org/r/1580983005-13899-2-git-send-email-liuyixian@huawei.com Signed-off-by: Yixian Liu Reviewed-by: Salil Mehta Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 20 +++++++++------- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +-- drivers/infiniband/hw/hns/hns_roce_qp.c | 37 +++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 4db818e98af5..067e3ac6fcfa 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -641,6 +641,15 @@ struct hns_roce_rinl_buf { u32 wqe_cnt; }; +struct hns_roce_work { + struct hns_roce_dev *hr_dev; + struct work_struct work; + u32 qpn; + u32 cqn; + int event_type; + int sub_type; +}; + struct hns_roce_qp { struct ib_qp ibqp; struct hns_roce_buf hr_buf; @@ -684,6 +693,7 @@ struct hns_roce_qp { struct hns_roce_sge sge; u32 next_sge; + struct hns_roce_work flush_work; struct hns_roce_rinl_buf rq_inl_buf; struct list_head node; /* all qps are on a list */ struct list_head rq_node; /* all recv qps are on a list */ @@ -900,15 +910,6 @@ struct hns_roce_caps { u16 default_ceq_arm_st; }; -struct hns_roce_work { - struct hns_roce_dev *hr_dev; - struct work_struct work; - u32 qpn; - u32 cqn; - int event_type; - int sub_type; -}; - struct hns_roce_dfx_hw { int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn, int *buffer); @@ -1231,6 +1232,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, struct ib_udata *udata); int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 29a5ee0cf07e..f69541acb085 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -6023,8 +6023,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) goto err_request_irq_fail; } - hr_dev->irq_workq = - create_singlethread_workqueue("hns_roce_irq_workqueue"); + hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0); if (!hr_dev->irq_workq) { dev_err(dev, "Create irq workqueue failed!\n"); ret = -ENOMEM; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e13f16c59115..aeb91a75cae5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -43,6 +43,43 @@ #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) +static void flush_work_handle(struct work_struct *work) +{ + struct hns_roce_work *flush_work = container_of(work, + struct hns_roce_work, work); + struct hns_roce_qp *hr_qp = container_of(flush_work, + struct hns_roce_qp, flush_work); + struct device *dev = flush_work->hr_dev->dev; + struct ib_qp_attr attr; + int attr_mask; + int ret; + + attr_mask = IB_QP_STATE; + attr.qp_state = IB_QPS_ERR; + + ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); + if (ret) + dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", + ret); + + /* + * make sure we signal QP destroy leg that flush QP was completed + * so that it can safely proceed ahead now and destroy QP + */ + if (atomic_dec_and_test(&hr_qp->refcount)) + complete(&hr_qp->free); +} + +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +{ + struct hns_roce_work *flush_work = &hr_qp->flush_work; + + flush_work->hr_dev = hr_dev; + INIT_WORK(&flush_work->work, flush_work_handle); + atomic_inc(&hr_qp->refcount); + queue_work(hr_dev->irq_workq, &flush_work->work); +} + void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) { struct device *dev = hr_dev->dev; -- cgit v1.2.3-58-ga151 From b53742865e9f09cbba4d9daa161760ec23f36aa4 Mon Sep 17 00:00:00 2001 From: Yixian Liu Date: Thu, 6 Feb 2020 17:56:45 +0800 Subject: RDMA/hns: Delayed flush cqe process with workqueue HiP08 RoCE hardware lacks ability(a known hardware problem) to flush outstanding WQEs if QP state gets into errored mode for some reason. To overcome this hardware problem and as a workaround, when QP is detected to be in errored state during various legs like post send, post receive etc[1], flush needs to be performed from the driver. The earlier patch[1] sent to solve the hardware limitation explained in the cover-letter had a bug in the software flushing leg. It acquired mutex while modifying QP state to errored state and while conveying it to the hardware using the mailbox. This caused leg to sleep while holding spin-lock and caused crash. Suggested Solution: we have proposed to defer the flushing of the QP in the Errored state using the workqueue to get around with the limitation of our hardware. This patch specifically adds the calls to the flush handler from where parts of the code like post_send/post_recv etc. when the QP state gets into the errored mode. [1] https://patchwork.kernel.org/patch/10534271/ Link: https://lore.kernel.org/r/1580983005-13899-3-git-send-email-liuyixian@huawei.com Signed-off-by: Yixian Liu Reviewed-by: Salil Mehta Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 6 ++ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 99 +++++++++++++++-------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 11 ++-- 3 files changed, 66 insertions(+), 50 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 067e3ac6fcfa..d0a83926dc8f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -641,6 +641,10 @@ struct hns_roce_rinl_buf { u32 wqe_cnt; }; +enum { + HNS_ROCE_FLUSH_FLAG = 0, +}; + struct hns_roce_work { struct hns_roce_dev *hr_dev; struct work_struct work; @@ -693,6 +697,8 @@ struct hns_roce_qp { struct hns_roce_sge sge; u32 next_sge; + /* 0: flush needed, 1: unneeded */ + unsigned long flush_flag; struct hns_roce_work flush_work; struct hns_roce_rinl_buf rq_inl_buf; struct list_head node; /* all qps are on a list */ diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index f69541acb085..aa9d1792e0cd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -220,11 +220,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, return 0; } -static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state); - static int check_send_valid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { @@ -261,7 +256,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct hns_roce_wqe_frmr_seg *fseg; struct device *dev = hr_dev->dev; struct hns_roce_v2_db sq_db; - struct ib_qp_attr attr; unsigned int owner_bit; unsigned int sge_idx; unsigned int wqe_idx; @@ -269,7 +263,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, int valid_num_sge; void *wqe = NULL; bool loopback; - int attr_mask; u32 tmp_len; u32 hr_op; u8 *smac; @@ -607,18 +600,19 @@ out: qp->next_sge = sge_idx; - if (qp->state == IB_QPS_ERR) { - attr_mask = IB_QP_STATE; - attr.qp_state = IB_QPS_ERR; - - ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask, - qp->state, IB_QPS_ERR); - if (ret) { - spin_unlock_irqrestore(&qp->sq.lock, flags); - *bad_wr = wr; - return ret; - } - } + /* + * Hip08 hardware cannot flush the WQEs in SQ if the QP state + * gets into errored mode. Hence, as a workaround to this + * hardware limitation, driver needs to assist in flushing. But + * the flushing operation uses mailbox to convey the QP state to + * the hardware and which can sleep due to the mutex protection + * around the mailbox calls. Hence, use the deferred flush for + * now. + */ + if (qp->state == IB_QPS_ERR) + if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, + &qp->flush_flag)) + init_flush_work(hr_dev, qp); } spin_unlock_irqrestore(&qp->sq.lock, flags); @@ -646,10 +640,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_rinl_sge *sge_list; struct device *dev = hr_dev->dev; - struct ib_qp_attr attr; unsigned long flags; void *wqe = NULL; - int attr_mask; u32 wqe_idx; int nreq; int ret; @@ -719,19 +711,19 @@ out: *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff; - if (hr_qp->state == IB_QPS_ERR) { - attr_mask = IB_QP_STATE; - attr.qp_state = IB_QPS_ERR; - - ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, - attr_mask, hr_qp->state, - IB_QPS_ERR); - if (ret) { - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); - *bad_wr = wr; - return ret; - } - } + /* + * Hip08 hardware cannot flush the WQEs in RQ if the QP state + * gets into errored mode. Hence, as a workaround to this + * hardware limitation, driver needs to assist in flushing. But + * the flushing operation uses mailbox to convey the QP state to + * the hardware and which can sleep due to the mutex protection + * around the mailbox calls. Hence, use the deferred flush for + * now. + */ + if (hr_qp->state == IB_QPS_ERR) + if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, + &hr_qp->flush_flag)) + init_flush_work(hr_dev, hr_qp); } spin_unlock_irqrestore(&hr_qp->rq.lock, flags); @@ -3013,13 +3005,11 @@ out: static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, struct hns_roce_qp **cur_qp, struct ib_wc *wc) { + struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); struct hns_roce_srq *srq = NULL; - struct hns_roce_dev *hr_dev; struct hns_roce_v2_cqe *cqe; struct hns_roce_qp *hr_qp; struct hns_roce_wq *wq; - struct ib_qp_attr attr; - int attr_mask; int is_send; u16 wqe_ctr; u32 opcode; @@ -3043,7 +3033,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, V2_CQE_BYTE_16_LCL_QPN_S); if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) { - hr_dev = to_hr_dev(hr_cq->ib_cq.device); hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); if (unlikely(!hr_qp)) { dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n", @@ -3053,6 +3042,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, *cur_qp = hr_qp; } + hr_qp = *cur_qp; wc->qp = &(*cur_qp)->ibqp; wc->vendor_err = 0; @@ -3137,14 +3127,24 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, break; } - /* flush cqe if wc status is error, excluding flush error */ - if ((wc->status != IB_WC_SUCCESS) && - (wc->status != IB_WC_WR_FLUSH_ERR)) { - attr_mask = IB_QP_STATE; - attr.qp_state = IB_QPS_ERR; - return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp, - &attr, attr_mask, - (*cur_qp)->state, IB_QPS_ERR); + /* + * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets + * into errored mode. Hence, as a workaround to this hardware + * limitation, driver needs to assist in flushing. But the flushing + * operation uses mailbox to convey the QP state to the hardware and + * which can sleep due to the mutex protection around the mailbox calls. + * Hence, use the deferred flush for now. Once wc error detected, the + * flushing operation is needed. + */ + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) { + dev_err(hr_dev->dev, "error cqe status is: 0x%x\n", + status & HNS_ROCE_V2_CQE_STATUS_MASK); + + if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) + init_flush_work(hr_dev, hr_qp); + + return 0; } if (wc->status == IB_WC_WR_FLUSH_ERR) @@ -4735,6 +4735,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, struct hns_roce_v2_qp_context *context = ctx; struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; struct device *dev = hr_dev->dev; + unsigned long sq_flag = 0; + unsigned long rq_flag = 0; int ret; /* @@ -4752,6 +4754,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, /* When QP state is err, SQ and RQ WQE should be flushed */ if (new_state == IB_QPS_ERR) { + spin_lock_irqsave(&hr_qp->sq.lock, sq_flag); + spin_lock_irqsave(&hr_qp->rq.lock, rq_flag); + hr_qp->state = IB_QPS_ERR; roce_set_field(context->byte_160_sq_ci_pi, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, @@ -4769,6 +4774,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); } + spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag); + spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag); } /* Configure the optional fields */ diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index aeb91a75cae5..c52e1b00f30d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -57,10 +57,12 @@ static void flush_work_handle(struct work_struct *work) attr_mask = IB_QP_STATE; attr.qp_state = IB_QPS_ERR; - ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); - if (ret) - dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", - ret); + if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { + ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); + if (ret) + dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", + ret); + } /* * make sure we signal QP destroy leg that flush QP was completed @@ -764,6 +766,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, spin_lock_init(&hr_qp->rq.lock); hr_qp->state = IB_QPS_RESET; + hr_qp->flush_flag = 0; hr_qp->ibqp.qp_type = init_attr->qp_type; -- cgit v1.2.3-58-ga151 From b72bfc965eb5d3475acabb038a1f9f6034c4658d Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 13 Feb 2020 15:19:11 -0400 Subject: RDMA/core: Get rid of ib_create_qp_user This function accepts a udata but does nothing with it, and is never passed a !NULL udata. Rename it to ib_create_qp which was the only caller and remove the udata. Link: https://lore.kernel.org/r/20200213191911.GA9898@ziepe.ca Reviewed-by: Max Gurtovoy Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 22 +++++++++++++++------- include/rdma/ib_verbs.h | 31 ++----------------------------- 2 files changed, 17 insertions(+), 36 deletions(-) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 3ebae3b65c28..13ff4c8bfe5a 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1127,8 +1127,7 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, EXPORT_SYMBOL(ib_open_qp); static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata) + struct ib_qp_init_attr *qp_init_attr) { struct ib_qp *real_qp = qp; @@ -1150,9 +1149,18 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, return qp; } -struct ib_qp *ib_create_qp_user(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata) +/** + * ib_create_qp - Creates a kernel QP associated with the specified protection + * domain. + * @pd: The protection domain associated with the QP. + * @qp_init_attr: A list of initial attributes required to create the + * QP. If QP creation succeeds, then the attributes are updated to + * the actual capabilities of the created QP. + * + * NOTE: for user qp use ib_create_qp_user with valid udata! + */ +struct ib_qp *ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr) { struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; struct ib_qp *qp; @@ -1197,7 +1205,7 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd, if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { struct ib_qp *xrc_qp = - create_xrc_qp_user(qp, qp_init_attr, udata); + create_xrc_qp_user(qp, qp_init_attr); if (IS_ERR(xrc_qp)) { ret = PTR_ERR(xrc_qp); @@ -1253,7 +1261,7 @@ err: return ERR_PTR(ret); } -EXPORT_SYMBOL(ib_create_qp_user); +EXPORT_SYMBOL(ib_create_qp); static const struct { int valid; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 1f779fad3a1e..5f3a04ead9f5 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -3627,35 +3627,8 @@ static inline int ib_post_srq_recv(struct ib_srq *srq, bad_recv_wr ? : &dummy); } -/** - * ib_create_qp_user - Creates a QP associated with the specified protection - * domain. - * @pd: The protection domain associated with the QP. - * @qp_init_attr: A list of initial attributes required to create the - * QP. If QP creation succeeds, then the attributes are updated to - * the actual capabilities of the created QP. - * @udata: Valid user data or NULL for kernel objects - */ -struct ib_qp *ib_create_qp_user(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata); - -/** - * ib_create_qp - Creates a kernel QP associated with the specified protection - * domain. - * @pd: The protection domain associated with the QP. - * @qp_init_attr: A list of initial attributes required to create the - * QP. If QP creation succeeds, then the attributes are updated to - * the actual capabilities of the created QP. - * @udata: Valid user data or NULL for kernel objects - * - * NOTE: for user qp use ib_create_qp_user with valid udata! - */ -static inline struct ib_qp *ib_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr) -{ - return ib_create_qp_user(pd, qp_init_attr, NULL); -} +struct ib_qp *ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr); /** * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. -- cgit v1.2.3-58-ga151 From 167b95ec887029c5fe0fb0f601fb209323f498fb Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 18 Feb 2020 19:17:00 +0000 Subject: RDMA/ucma: Use refcount_t for the ctx->ref Don't use an atomic as a refcount. Link: https://lore.kernel.org/r/20200218191657.GA29724@ziepe.ca Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/ucma.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 57e68491a2fd..66ad29c672fc 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -85,7 +85,7 @@ struct ucma_file { struct ucma_context { u32 id; struct completion comp; - atomic_t ref; + refcount_t ref; int events_reported; int backlog; @@ -152,7 +152,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) if (ctx->closing) ctx = ERR_PTR(-EIO); else - atomic_inc(&ctx->ref); + refcount_inc(&ctx->ref); } xa_unlock(&ctx_table); return ctx; @@ -160,7 +160,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) static void ucma_put_ctx(struct ucma_context *ctx) { - if (atomic_dec_and_test(&ctx->ref)) + if (refcount_dec_and_test(&ctx->ref)) complete(&ctx->comp); } @@ -212,7 +212,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) return NULL; INIT_WORK(&ctx->close_work, ucma_close_id); - atomic_set(&ctx->ref, 1); + refcount_set(&ctx->ref, 1); init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; @@ -1502,7 +1502,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, mc = ERR_PTR(-ENOENT); else if (mc->ctx->file != file) mc = ERR_PTR(-EINVAL); - else if (!atomic_inc_not_zero(&mc->ctx->ref)) + else if (!refcount_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); else __xa_erase(&multicast_table, mc->id); -- cgit v1.2.3-58-ga151 From 779820c2e1e9251ddfdce5dd43b0bba30cd22271 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Wed, 19 Feb 2020 02:19:53 -0800 Subject: RDMA/core: Add helper function to retrieve driver gid context from gid attr Adding a helper function to retrieve the driver gid context from the gid attr. Link: https://lore.kernel.org/r/1582107594-5180-2-git-send-email-selvin.xavier@broadcom.com Suggested-by: Jason Gunthorpe Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cache.c | 17 +++++++++++++++++ include/rdma/ib_cache.h | 1 + 2 files changed, 18 insertions(+) diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 17bfedd24cc3..9bbdace3c809 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -972,6 +972,23 @@ done: } EXPORT_SYMBOL(rdma_query_gid); +/** + * rdma_read_gid_hw_context - Read the HW GID context from GID attribute + * @attr: Potinter to the GID attribute + * + * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding + * to the SGID attr. Callers are required to already be holding the reference + * to an existing GID entry. + * + * Returns the HW GID context + * + */ +void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr) +{ + return container_of(attr, struct ib_gid_table_entry, attr)->context; +} +EXPORT_SYMBOL(rdma_read_gid_hw_context); + /** * rdma_find_gid - Returns SGID attributes if the matching GID is found. * @device: The device to query. diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index 870b5e6c06db..e06d13388ae7 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -39,6 +39,7 @@ int rdma_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); +void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr); const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, const union ib_gid *gid, enum ib_gid_type gid_type, -- cgit v1.2.3-58-ga151 From 0a01623b74d41d91a595cbeb29e1a03648aec087 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Wed, 19 Feb 2020 02:19:54 -0800 Subject: RDMA/bnxt_re: Use rdma_read_gid_hw_context to retrieve HW gid index bnxt_re HW maintains a GID table with only a single entry for the two duplicate GID entries (v1 and v2). Driver needs to map stack gid index to the HW table gid index. Use the new API rdma_read_gid_hw_context () to retrieve the HW GID context to get the HW table index. Link: https://lore.kernel.org/r/1582107594-5180-3-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 52b6a4d85460..18579e8d630c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -639,6 +639,7 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr, const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); struct bnxt_re_dev *rdev = pd->rdev; const struct ib_gid_attr *sgid_attr; + struct bnxt_re_gid_ctx *ctx; struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); u8 nw_type; int rc; @@ -654,19 +655,18 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr, /* Supply the configuration for the HW */ memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw, sizeof(union ib_gid)); - /* - * If RoCE V2 is enabled, stack will have two entries for - * each GID entry. Avoiding this duplicte entry in HW. Dividing - * the GID index by 2 for RoCE V2 + sgid_attr = grh->sgid_attr; + /* Get the HW context of the GID. The reference + * of GID table entry is already taken by the caller. */ - ah->qplib_ah.sgid_index = grh->sgid_index / 2; + ctx = rdma_read_gid_hw_context(sgid_attr); + ah->qplib_ah.sgid_index = ctx->idx; ah->qplib_ah.host_sgid_index = grh->sgid_index; ah->qplib_ah.traffic_class = grh->traffic_class; ah->qplib_ah.flow_label = grh->flow_label; ah->qplib_ah.hop_limit = grh->hop_limit; ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr); - sgid_attr = grh->sgid_attr; /* Get network header type for this GID */ nw_type = rdma_gid_attr_network_type(sgid_attr); ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type); @@ -1593,6 +1593,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, const struct ib_global_route *grh = rdma_ah_read_grh(&qp_attr->ah_attr); const struct ib_gid_attr *sgid_attr; + struct bnxt_re_gid_ctx *ctx; qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | @@ -1604,11 +1605,12 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw, sizeof(qp->qplib_qp.ah.dgid.data)); qp->qplib_qp.ah.flow_label = grh->flow_label; - /* If RoCE V2 is enabled, stack will have two entries for - * each GID entry. Avoiding this duplicte entry in HW. Dividing - * the GID index by 2 for RoCE V2 + sgid_attr = grh->sgid_attr; + /* Get the HW context of the GID. The reference + * of GID table entry is already taken by the caller. */ - qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2; + ctx = rdma_read_gid_hw_context(sgid_attr); + qp->qplib_qp.ah.sgid_index = ctx->idx; qp->qplib_qp.ah.host_sgid_index = grh->sgid_index; qp->qplib_qp.ah.hop_limit = grh->hop_limit; qp->qplib_qp.ah.traffic_class = grh->traffic_class; @@ -1616,7 +1618,6 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ether_addr_copy(qp->qplib_qp.ah.dmac, qp_attr->ah_attr.roce.dmac); - sgid_attr = qp_attr->ah_attr.grh.sgid_attr; rc = rdma_read_gid_l2_fields(sgid_attr, NULL, &qp->qplib_qp.smac[0]); if (rc) -- cgit v1.2.3-58-ga151 From fb3063d31995cc4cf1d47a406bb61d6fb1b1d58d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 17 Feb 2020 12:57:14 -0800 Subject: RDMA/rxe: Fix configuration of atomic queue pair attributes From the comment above the definition of the roundup_pow_of_two() macro: The result is undefined when n == 0. Hence only pass positive values to roundup_pow_of_two(). This patch fixes the following UBSAN complaint: UBSAN: Undefined behaviour in ./include/linux/log2.h:57:13 shift exponent 64 is too large for 64-bit type 'long unsigned int' Call Trace: dump_stack+0xa5/0xe6 ubsan_epilogue+0x9/0x26 __ubsan_handle_shift_out_of_bounds.cold+0x4c/0xf9 rxe_qp_from_attr.cold+0x37/0x5d [rdma_rxe] rxe_modify_qp+0x59/0x70 [rdma_rxe] _ib_modify_qp+0x5aa/0x7c0 [ib_core] ib_modify_qp+0x3b/0x50 [ib_core] cma_modify_qp_rtr+0x234/0x260 [rdma_cm] __rdma_accept+0x1a7/0x650 [rdma_cm] nvmet_rdma_cm_handler+0x1286/0x14cd [nvmet_rdma] cma_cm_event_handler+0x6b/0x330 [rdma_cm] cma_ib_req_handler+0xe60/0x22d0 [rdma_cm] cm_process_work+0x30/0x140 [ib_cm] cm_req_handler+0x11f4/0x1cd0 [ib_cm] cm_work_handler+0xb8/0x344e [ib_cm] process_one_work+0x569/0xb60 worker_thread+0x7a/0x5d0 kthread+0x1e6/0x210 ret_from_fork+0x24/0x30 Link: https://lore.kernel.org/r/20200217205714.26937-1-bvanassche@acm.org Fixes: 8700e3e7c485 ("Soft RoCE driver") Signed-off-by: Bart Van Assche Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_qp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index ec21f616ac98..6c11c3aeeca6 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -590,15 +590,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, int err; if (mask & IB_QP_MAX_QP_RD_ATOMIC) { - int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); + int max_rd_atomic = attr->max_rd_atomic ? + roundup_pow_of_two(attr->max_rd_atomic) : 0; qp->attr.max_rd_atomic = max_rd_atomic; atomic_set(&qp->req.rd_atomic, max_rd_atomic); } if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { - int max_dest_rd_atomic = - __roundup_pow_of_two(attr->max_dest_rd_atomic); + int max_dest_rd_atomic = attr->max_dest_rd_atomic ? + roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; -- cgit v1.2.3-58-ga151 From 8d8d2b76ac3545cf6c0e9c6aa61700db98d2496a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 14 Feb 2020 00:33:38 +0000 Subject: RDMA/hns: fix spelling mistake: "attatch" -> "attach" There is a spelling mistake in a dev_err error message. Fix it. Link: https://lore.kernel.org/r/20200214003338.6573-1-colin.king@canonical.com Signed-off-by: Colin Ian King Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index aa9d1792e0cd..66c1cfeba768 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5783,7 +5783,7 @@ static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, page_shift); ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, ®ion, 1); if (ret) - dev_err(hr_dev->dev, "mtr attatch error for eqe\n"); + dev_err(hr_dev->dev, "mtr attach error for eqe\n"); goto done; -- cgit v1.2.3-58-ga151 From 52c5e9e7497b728b53a84cbd5873c4b707d10d55 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 20 Feb 2020 09:34:31 +0800 Subject: RDMA/hns: Initialize all fields of doorbells to zero Prevent uninitialized fields when new fields are added, and make code look simpler. Link: https://lore.kernel.org/r/1582162471-50361-1-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 9 ++------- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 5 +---- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index c6e66586e533..89dac44b3cef 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -69,7 +69,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct hns_roce_wqe_data_seg *dseg = NULL; struct hns_roce_qp *qp = to_hr_qp(ibqp); struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_sq_db sq_db; + struct hns_roce_sq_db sq_db = {}; int ps_opcode = 0, i = 0; unsigned long flags = 0; void *wqe = NULL; @@ -318,8 +318,6 @@ out: /* Memory barrier */ wmb(); - sq_db.u32_4 = 0; - sq_db.u32_8 = 0; roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M, SQ_DOORBELL_U32_4_SQ_HEAD_S, (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); @@ -351,7 +349,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_rq_db rq_db; + struct hns_roce_rq_db rq_db = {}; __le32 doorbell[2] = {0}; unsigned long flags = 0; unsigned int wqe_idx; @@ -418,9 +416,6 @@ out: ROCEE_QP1C_CFG3_0_REG + QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); } else { - rq_db.u32_4 = 0; - rq_db.u32_8 = 0; - roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 66c1cfeba768..dee1cc8ffb42 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -255,7 +255,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct hns_roce_qp *qp = to_hr_qp(ibqp); struct hns_roce_wqe_frmr_seg *fseg; struct device *dev = hr_dev->dev; - struct hns_roce_v2_db sq_db; + struct hns_roce_v2_db sq_db = {}; unsigned int owner_bit; unsigned int sge_idx; unsigned int wqe_idx; @@ -583,9 +583,6 @@ out: /* Memory barrier */ wmb(); - sq_db.byte_4 = 0; - sq_db.parameter = 0; - roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn); roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M, -- cgit v1.2.3-58-ga151 From 5b361328ca649534d721e4eae20c96ccbe702ce7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 12 Feb 2020 19:04:25 -0600 Subject: RDMA: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Link: https://lore.kernel.org/r/20200213010425.GA13068@embeddedor.com Signed-off-by: Gustavo A. R. Silva Signed-off-by: Jason Gunthorpe # added a few more --- drivers/infiniband/core/cache.c | 2 +- drivers/infiniband/core/cm.c | 4 ++-- drivers/infiniband/core/mad_priv.h | 4 ++-- drivers/infiniband/core/multicast.c | 2 +- drivers/infiniband/core/sa_query.c | 2 +- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 4 ++-- drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | 8 ++++---- drivers/infiniband/hw/hfi1/mad.c | 4 ++-- drivers/infiniband/hw/hfi1/mad.h | 2 +- drivers/infiniband/hw/hfi1/pio.h | 4 ++-- drivers/infiniband/hw/hfi1/sdma.c | 2 +- drivers/infiniband/hw/hfi1/sdma.h | 4 ++-- drivers/infiniband/hw/hfi1/user_exp_rcv.h | 2 +- drivers/infiniband/hw/i40iw/i40iw_cm.h | 4 ++-- drivers/infiniband/hw/mthca/mthca_memfree.c | 2 +- drivers/infiniband/hw/mthca/mthca_memfree.h | 2 +- drivers/infiniband/hw/usnic/usnic_uiom.h | 2 +- drivers/infiniband/sw/rxe/rxe_queue.h | 2 +- drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h | 4 ++-- drivers/infiniband/ulp/srp/ib_srp.h | 2 +- include/rdma/ib_fmr_pool.h | 2 +- include/rdma/ib_verbs.h | 18 +++++++++--------- include/rdma/opa_vnic.h | 2 +- include/rdma/rdmavt_mr.h | 2 +- include/rdma/rdmavt_qp.h | 2 +- 25 files changed, 44 insertions(+), 44 deletions(-) diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 9bbdace3c809..717b798cddad 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -46,7 +46,7 @@ struct ib_pkey_cache { int table_len; - u16 table[0]; + u16 table[]; }; struct ib_update_work { diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 33c0d9e7bb66..aec6867f0ed2 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -197,7 +197,7 @@ struct cm_device { struct ib_device *ib_device; u8 ack_delay; int going_down; - struct cm_port *port[0]; + struct cm_port *port[]; }; struct cm_av { @@ -216,7 +216,7 @@ struct cm_work { __be32 local_id; /* Established / timewait */ __be32 remote_id; struct ib_cm_event cm_event; - struct sa_path_rec path[0]; + struct sa_path_rec path[]; }; struct cm_timewait_info { diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 956b3a7dfed7..403d8673a2f9 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -79,13 +79,13 @@ struct ib_mad_private { struct ib_mad_private_header header; size_t mad_size; struct ib_grh grh; - u8 mad[0]; + u8 mad[]; } __packed; struct ib_rmpp_segment { struct list_head list; u32 num; - u8 data[0]; + u8 data[]; }; struct ib_mad_agent_private { diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index cd338ddc4a39..9c2d8b7f1af9 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -71,7 +71,7 @@ struct mcast_device { struct ib_event_handler event_handler; int start_port; int end_port; - struct mcast_port port[0]; + struct mcast_port port[]; }; enum mcast_state { diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 30d4c126a2db..74e0058fcf9e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -101,7 +101,7 @@ struct ib_sa_port { struct ib_sa_device { int start_port, end_port; struct ib_event_handler event_handler; - struct ib_sa_port port[0]; + struct ib_sa_port port[]; }; struct ib_sa_query { diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7d06b0f8d49a..e8e11bd95e42 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -707,7 +707,7 @@ struct mpa_message { u8 flags; u8 revision; __be16 private_data_size; - u8 private_data[0]; + u8 private_data[]; }; struct mpa_v2_conn_params { @@ -719,7 +719,7 @@ struct terminate_message { u8 layer_etype; u8 ecode; __be16 hdrct_rsvd; - u8 len_hdrs[0]; + u8 len_hdrs[]; }; #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index cbdb300a4794..a2f5e29ef226 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -123,7 +123,7 @@ struct fw_ri_dsgl { __be32 len0; __be64 addr0; #ifndef C99_NOT_SUPPORTED - struct fw_ri_dsge_pair sge[0]; + struct fw_ri_dsge_pair sge[]; #endif }; @@ -139,7 +139,7 @@ struct fw_ri_isgl { __be16 nsge; __be32 r2; #ifndef C99_NOT_SUPPORTED - struct fw_ri_sge sge[0]; + struct fw_ri_sge sge[]; #endif }; @@ -149,7 +149,7 @@ struct fw_ri_immd { __be16 r2; __be32 immdlen; #ifndef C99_NOT_SUPPORTED - __u8 data[0]; + __u8 data[]; #endif }; @@ -321,7 +321,7 @@ struct fw_ri_res_wr { __be32 len16_pkd; __u64 cookie; #ifndef C99_NOT_SUPPORTED - struct fw_ri_res res[0]; + struct fw_ri_res res[]; #endif }; diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index a51bcd2b4391..7073f237a949 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -2381,7 +2381,7 @@ struct opa_port_status_rsp { __be64 port_vl_rcv_bubble; __be64 port_vl_mark_fecn; __be64 port_vl_xmit_discards; - } vls[0]; /* real array size defined by # bits set in vl_select_mask */ + } vls[]; /* real array size defined by # bits set in vl_select_mask */ }; enum counter_selects { @@ -2423,7 +2423,7 @@ struct opa_aggregate { __be16 attr_id; __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */ __be32 attr_mod; - u8 data[0]; + u8 data[]; }; #define MSK_LLI 0x000000f0 diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index 2f48e6953629..889e63d3f2cc 100644 --- a/drivers/infiniband/hw/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h @@ -165,7 +165,7 @@ struct opa_mad_notice_attr { } __packed ntc_2048; }; - u8 class_data[0]; + u8 class_data[]; }; #define IB_VLARB_LOWPRI_0_31 1 diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h index c9a58b642bdd..0102262343c0 100644 --- a/drivers/infiniband/hw/hfi1/pio.h +++ b/drivers/infiniband/hw/hfi1/pio.h @@ -243,7 +243,7 @@ struct sc_config_sizes { */ struct pio_map_elem { u32 mask; - struct send_context *ksc[0]; + struct send_context *ksc[]; }; /* @@ -263,7 +263,7 @@ struct pio_vl_map { u32 mask; u8 actual_vls; u8 vls; - struct pio_map_elem *map[0]; + struct pio_map_elem *map[]; }; int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index a51525647ac8..c93ea021cf49 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -833,7 +833,7 @@ struct sdma_engine *sdma_select_engine_sc( struct sdma_rht_map_elem { u32 mask; u8 ctr; - struct sdma_engine *sde[0]; + struct sdma_engine *sde[]; }; struct sdma_rht_node { diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 1e2e40f79cb2..7a851191f987 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h @@ -1002,7 +1002,7 @@ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status); */ struct sdma_map_elem { u32 mask; - struct sdma_engine *sde[0]; + struct sdma_engine *sde[]; }; /** @@ -1024,7 +1024,7 @@ struct sdma_vl_map { u32 mask; u8 actual_vls; u8 vls; - struct sdma_map_elem *map[0]; + struct sdma_map_elem *map[]; }; int sdma_map_init( diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h index 6257eee083a1..332abb446861 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h @@ -73,7 +73,7 @@ struct tid_rb_node { dma_addr_t dma_addr; bool freed; unsigned int npages; - struct page *pages[0]; + struct page *pages[]; }; static inline int num_user_pages(unsigned long addr, diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h index 66dc1ba03389..6e43e4d730f4 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.h +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h @@ -85,7 +85,7 @@ struct ietf_mpa_v1 { u8 flags; u8 rev; __be16 priv_data_len; - u8 priv_data[0]; + u8 priv_data[]; }; #define ietf_mpa_req_resp_frame ietf_mpa_frame @@ -101,7 +101,7 @@ struct ietf_mpa_v2 { u8 rev; __be16 priv_data_len; struct ietf_rtr_msg rtr_msg; - u8 priv_data[0]; + u8 priv_data[]; }; struct i40iw_cm_node; diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 78a48aea3faf..fa808582b08b 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -58,7 +58,7 @@ struct mthca_user_db_table { u64 uvirt; struct scatterlist mem; int refcount; - } page[0]; + } page[]; }; static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index da9b8f9b884f..f9a2e65e2ff5 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h @@ -68,7 +68,7 @@ struct mthca_icm_table { int lowmem; int coherent; struct mutex mutex; - struct mthca_icm *icm[0]; + struct mthca_icm *icm[]; }; struct mthca_icm_iter { diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h index 70be49b1ca05..7ec8991ace67 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom.h @@ -77,7 +77,7 @@ struct usnic_uiom_reg { struct usnic_uiom_chunk { struct list_head list; int nents; - struct scatterlist page_list[0]; + struct scatterlist page_list[]; }; struct usnic_uiom_pd *usnic_uiom_alloc_pd(void); diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index acd0a925481c..8ef17d617022 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h @@ -63,7 +63,7 @@ struct rxe_queue_buf { __u32 pad_2[31]; __u32 consumer_index; __u32 pad_3[31]; - __u8 data[0]; + __u8 data[]; }; struct rxe_queue { diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h index 4480092c68e0..0b3570dc606d 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h @@ -258,7 +258,7 @@ struct opa_veswport_mactable { __be16 offset; __be16 num_entries; __be32 mac_tbl_digest; - struct opa_veswport_mactable_entry tbl_entries[0]; + struct opa_veswport_mactable_entry tbl_entries[]; } __packed; /** @@ -440,7 +440,7 @@ struct opa_veswport_iface_macs { __be16 num_macs_in_msg; __be16 tot_macs_in_lst; __be16 gen_count; - struct opa_vnic_iface_mac_entry entry[0]; + struct opa_vnic_iface_mac_entry entry[]; } __packed; /** diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 5359ece561ca..6fabcc2faf1f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -309,7 +309,7 @@ struct srp_fr_pool { int max_page_list_len; spinlock_t lock; struct list_head free_list; - struct srp_fr_desc desc[0]; + struct srp_fr_desc desc[]; }; /** diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h index f8982e4e9702..2fd9bfb6d648 100644 --- a/include/rdma/ib_fmr_pool.h +++ b/include/rdma/ib_fmr_pool.h @@ -73,7 +73,7 @@ struct ib_pool_fmr { int remap_count; u64 io_virtual_address; int page_list_len; - u64 page_list[0]; + u64 page_list[]; }; struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 5f3a04ead9f5..bbc5cfb57cd2 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1876,7 +1876,7 @@ struct ib_flow_eth_filter { __be16 ether_type; __be16 vlan_tag; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_eth { @@ -1890,7 +1890,7 @@ struct ib_flow_ib_filter { __be16 dlid; __u8 sl; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_ib { @@ -1915,7 +1915,7 @@ struct ib_flow_ipv4_filter { u8 ttl; u8 flags; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_ipv4 { @@ -1933,7 +1933,7 @@ struct ib_flow_ipv6_filter { u8 traffic_class; u8 hop_limit; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_ipv6 { @@ -1947,7 +1947,7 @@ struct ib_flow_tcp_udp_filter { __be16 dst_port; __be16 src_port; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_tcp_udp { @@ -1959,7 +1959,7 @@ struct ib_flow_spec_tcp_udp { struct ib_flow_tunnel_filter { __be32 tunnel_id; - u8 real_sz[0]; + u8 real_sz[]; }; /* ib_flow_spec_tunnel describes the Vxlan tunnel @@ -1976,7 +1976,7 @@ struct ib_flow_esp_filter { __be32 spi; __be32 seq; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_esp { @@ -1991,7 +1991,7 @@ struct ib_flow_gre_filter { __be16 protocol; __be32 key; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_gre { @@ -2004,7 +2004,7 @@ struct ib_flow_spec_gre { struct ib_flow_mpls_filter { __be32 tag; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_mpls { diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h index 0c07a70bd7f6..e90b149fe92a 100644 --- a/include/rdma/opa_vnic.h +++ b/include/rdma/opa_vnic.h @@ -75,7 +75,7 @@ struct opa_vnic_rdma_netdev { struct rdma_netdev rn; /* keep this first */ /* followed by device private data */ - char *dev_priv[0]; + char *dev_priv[]; }; static inline void *opa_vnic_priv(const struct net_device *dev) diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index 72a3856d4057..ce6c888f7fe7 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -85,7 +85,7 @@ struct rvt_mregion { u8 lkey_published; /* in global table */ struct percpu_ref refcount; struct completion comp; /* complete when refcount goes to zero */ - struct rvt_segarray *map[0]; /* the segments */ + struct rvt_segarray *map[]; /* the segments */ }; #define RVT_MAX_LKEY_TABLE_BITS 23 diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 0d5c70e2d8ab..5fc10108703a 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -191,7 +191,7 @@ struct rvt_swqe { u32 ssn; /* send sequence number */ u32 length; /* total length of data in sg_list */ void *priv; /* driver dependent field */ - struct rvt_sge sg_list[0]; + struct rvt_sge sg_list[]; }; /** -- cgit v1.2.3-58-ga151 From 8dae419f9ec730c1984ea7395067a2534780ada1 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Sat, 15 Feb 2020 12:10:58 -0500 Subject: RDMA/bnxt_re: Refactor queue pair creation code Restructuring the bnxt_re_create_qp function. Listing below the major changes: - Monolithic central part of create_qp where attributes are initialized is now enclosed in one function and this new function has few more sub-functions. - Top level qp limit checking code moved to a function. - GSI QP creation and GSI Shadow qp creation code is handled in a sub function. Link: https://lore.kernel.org/r/1581786665-23705-2-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Naresh Kumar PBS Signed-off-by: Selvin Xavier Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/bnxt_re.h | 13 +- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 629 ++++++++++++++++++++----------- drivers/infiniband/hw/bnxt_re/main.c | 3 +- 3 files changed, 428 insertions(+), 217 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 725b2350e349..c2805384e832 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -104,6 +104,14 @@ struct bnxt_re_sqp_entries { struct bnxt_re_qp *qp1_qp; }; +#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024 +struct bnxt_re_gsi_context { + struct bnxt_re_qp *gsi_qp; + struct bnxt_re_qp *gsi_sqp; + struct bnxt_re_ah *gsi_sah; + struct bnxt_re_sqp_entries *sqp_tbl; +}; + #define BNXT_RE_MIN_MSIX 2 #define BNXT_RE_MAX_MSIX 9 #define BNXT_RE_AEQ_IDX 0 @@ -165,10 +173,7 @@ struct bnxt_re_dev { u16 cosq[2]; /* QP for for handling QP1 packets */ - u32 sqp_id; - struct bnxt_re_qp *qp1_sqp; - struct bnxt_re_ah *sqp_ah; - struct bnxt_re_sqp_entries sqp_tbl[1024]; + struct bnxt_re_gsi_context gsi_ctx; atomic_t nq_alloc_cnt; u32 is_virtfn; u32 num_vfs; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 18579e8d630c..c37f0b1df475 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -312,7 +312,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) */ if (ctx->idx == 0 && rdma_link_local_addr((struct in6_addr *)gid_to_del) && - ctx->refcnt == 1 && rdev->qp1_sqp) { + ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) { dev_dbg(rdev_to_dev(rdev), "Trying to delete GID0 while QP1 is alive\n"); return -EFAULT; @@ -742,6 +742,49 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, spin_unlock_irqrestore(&qp->scq->cq_lock, flags); } +static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) +{ + struct bnxt_re_qp *gsi_sqp; + struct bnxt_re_ah *gsi_sah; + struct bnxt_re_dev *rdev; + int rc = 0; + + rdev = qp->rdev; + gsi_sqp = rdev->gsi_ctx.gsi_sqp; + gsi_sah = rdev->gsi_ctx.gsi_sah; + + /* remove from active qp list */ + mutex_lock(&rdev->qp_lock); + list_del(&gsi_sqp->list); + mutex_unlock(&rdev->qp_lock); + atomic_dec(&rdev->qp_count); + + dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n"); + bnxt_qplib_destroy_ah(&rdev->qplib_res, + &gsi_sah->qplib_ah, + true); + bnxt_qplib_clean_qp(&qp->qplib_qp); + + dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n"); + rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); + if (rc) { + dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed"); + goto fail; + } + bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); + + kfree(rdev->gsi_ctx.sqp_tbl); + kfree(gsi_sah); + kfree(gsi_sqp); + rdev->gsi_ctx.gsi_sqp = NULL; + rdev->gsi_ctx.gsi_sah = NULL; + rdev->gsi_ctx.sqp_tbl = NULL; + + return 0; +fail: + return rc; +} + /* Queue Pairs */ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) { @@ -750,7 +793,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) unsigned int flags; int rc; + mutex_lock(&rdev->qp_lock); + list_del(&qp->list); + mutex_unlock(&rdev->qp_lock); + atomic_dec(&rdev->qp_count); + bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); + rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); @@ -765,40 +814,19 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); - if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { - bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah, - false); - - bnxt_qplib_clean_qp(&qp->qplib_qp); - rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, - &rdev->qp1_sqp->qplib_qp); - if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to destroy Shadow QP"); - return rc; - } - bnxt_qplib_free_qp_res(&rdev->qplib_res, - &rdev->qp1_sqp->qplib_qp); - mutex_lock(&rdev->qp_lock); - list_del(&rdev->qp1_sqp->list); - atomic_dec(&rdev->qp_count); - mutex_unlock(&rdev->qp_lock); - - kfree(rdev->sqp_ah); - kfree(rdev->qp1_sqp); - rdev->qp1_sqp = NULL; - rdev->sqp_ah = NULL; + if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) { + rc = bnxt_re_destroy_gsi_sqp(qp); + if (rc) + goto sh_fail; } ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); - mutex_lock(&rdev->qp_lock); - list_del(&qp->list); - atomic_dec(&rdev->qp_count); - mutex_unlock(&rdev->qp_lock); kfree(qp); return 0; +sh_fail: + return rc; } static u8 __from_ib_qp_type(enum ib_qp_type type) @@ -967,8 +995,6 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp if (rc) goto fail; - rdev->sqp_id = qp->qplib_qp.id; - spin_lock_init(&qp->sq_lock); INIT_LIST_HEAD(&qp->list); mutex_lock(&rdev->qp_lock); @@ -981,205 +1007,377 @@ fail: return NULL; } -struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata) +static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr) { - struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); - struct bnxt_re_dev *rdev = pd->rdev; - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; - struct bnxt_re_qp *qp; - struct bnxt_re_cq *cq; - struct bnxt_re_srq *srq; - int rc, entries; + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + int entries; - if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) || - (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) || - (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) || - (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) || - (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data)) - return ERR_PTR(-EINVAL); + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) - return ERR_PTR(-ENOMEM); + if (init_attr->srq) { + struct bnxt_re_srq *srq; - qp->rdev = rdev; - ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr); - qp->qplib_qp.pd = &pd->qplib_pd; - qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); - qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type); + srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); + if (!srq) { + dev_err(rdev_to_dev(rdev), "SRQ not found"); + return -EINVAL; + } + qplqp->srq = &srq->qplib_srq; + qplqp->rq.max_wqe = 0; + } else { + /* Allocate 1 more than what's provided so posting max doesn't + * mean empty. + */ + entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1); + qplqp->rq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + 1); - if (qp_init_attr->qp_type == IB_QPT_GSI && - bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) - qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI; - if (qp->qplib_qp.type == IB_QPT_MAX) { + qplqp->rq.q_full_delta = qplqp->rq.max_wqe - + init_attr->cap.max_recv_wr; + qplqp->rq.max_sge = init_attr->cap.max_recv_sge; + if (qplqp->rq.max_sge > dev_attr->max_qp_sges) + qplqp->rq.max_sge = dev_attr->max_qp_sges; + } + + return 0; +} + +static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; + + qplqp->rq.max_sge = dev_attr->max_qp_sges; + if (qplqp->rq.max_sge > dev_attr->max_qp_sges) + qplqp->rq.max_sge = dev_attr->max_qp_sges; +} + +static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + int entries; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; + + qplqp->sq.max_sge = init_attr->cap.max_send_sge; + if (qplqp->sq.max_sge > dev_attr->max_qp_sges) + qplqp->sq.max_sge = dev_attr->max_qp_sges; + /* + * Change the SQ depth if user has requested minimum using + * configfs. Only supported for kernel consumers + */ + entries = init_attr->cap.max_send_wr; + /* Allocate 128 + 1 more than what's provided */ + entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; + /* + * Reserving one slot for Phantom WQE. Application can + * post one extra entry in this case. But allowing this to avoid + * unexpected Queue full condition + */ + qplqp->sq.q_full_delta -= 1; +} + +static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + int entries; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; + + entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1); + qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qplqp->sq.q_full_delta = qplqp->sq.max_wqe - + init_attr->cap.max_send_wr; + qplqp->sq.max_sge++; /* Need one extra sge to put UD header */ + if (qplqp->sq.max_sge > dev_attr->max_qp_sges) + qplqp->sq.max_sge = dev_attr->max_qp_sges; +} + +static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, + struct ib_qp_init_attr *init_attr) +{ + struct bnxt_qplib_chip_ctx *chip_ctx; + int qptype; + + chip_ctx = &rdev->chip_ctx; + + qptype = __from_ib_qp_type(init_attr->qp_type); + if (qptype == IB_QPT_MAX) { dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported", - qp->qplib_qp.type); - rc = -EINVAL; - goto fail; + qptype); + qptype = -EINVAL; + goto out; } - qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data; - qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == - IB_SIGNAL_ALL_WR) ? true : false); + if (bnxt_qplib_is_chip_gen_p5(chip_ctx) && + init_attr->qp_type == IB_QPT_GSI) + qptype = CMDQ_CREATE_QP_TYPE_GSI; +out: + return qptype; +} - qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; - if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) - qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; +static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + struct bnxt_re_cq *cq; + int rc = 0, qptype; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; + + /* Setup misc params */ + ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr); + qplqp->pd = &pd->qplib_pd; + qplqp->qp_handle = (u64)qplqp; + qplqp->max_inline_data = init_attr->cap.max_inline_data; + qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? + true : false); + qptype = bnxt_re_init_qp_type(rdev, init_attr); + if (qptype < 0) { + rc = qptype; + goto out; + } + qplqp->type = (u8)qptype; - if (qp_init_attr->send_cq) { - cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq, - ib_cq); + if (init_attr->qp_type == IB_QPT_RC) { + qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; + qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; + } + qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); + qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ + if (init_attr->create_flags) + dev_dbg(rdev_to_dev(rdev), + "QP create flags 0x%x not supported", + init_attr->create_flags); + + /* Setup CQs */ + if (init_attr->send_cq) { + cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); if (!cq) { dev_err(rdev_to_dev(rdev), "Send CQ not found"); rc = -EINVAL; - goto fail; + goto out; } - qp->qplib_qp.scq = &cq->qplib_cq; + qplqp->scq = &cq->qplib_cq; qp->scq = cq; } - if (qp_init_attr->recv_cq) { - cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq, - ib_cq); + if (init_attr->recv_cq) { + cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); if (!cq) { dev_err(rdev_to_dev(rdev), "Receive CQ not found"); rc = -EINVAL; - goto fail; + goto out; } - qp->qplib_qp.rcq = &cq->qplib_cq; + qplqp->rcq = &cq->qplib_cq; qp->rcq = cq; } - if (qp_init_attr->srq) { - srq = container_of(qp_init_attr->srq, struct bnxt_re_srq, - ib_srq); - if (!srq) { - dev_err(rdev_to_dev(rdev), "SRQ not found"); - rc = -EINVAL; - goto fail; - } - qp->qplib_qp.srq = &srq->qplib_srq; - qp->qplib_qp.rq.max_wqe = 0; - } else { - /* Allocate 1 more than what's provided so posting max doesn't - * mean empty - */ - entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1); - qp->qplib_qp.rq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + 1); + /* Setup RQ/SRQ */ + rc = bnxt_re_init_rq_attr(qp, init_attr); + if (rc) + goto out; + if (init_attr->qp_type == IB_QPT_GSI) + bnxt_re_adjust_gsi_rq_attr(qp); - qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - - qp_init_attr->cap.max_recv_wr; + /* Setup SQ */ + bnxt_re_init_sq_attr(qp, init_attr, udata); + if (init_attr->qp_type == IB_QPT_GSI) + bnxt_re_adjust_gsi_sq_attr(qp, init_attr); + + if (udata) /* This will update DPI and qp_handle */ + rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); +out: + return rc; +} - qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; - if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; +static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, + struct bnxt_re_pd *pd) +{ + struct bnxt_re_sqp_entries *sqp_tbl = NULL; + struct bnxt_re_dev *rdev; + struct bnxt_re_qp *sqp; + struct bnxt_re_ah *sah; + int rc = 0; + + rdev = qp->rdev; + /* Create a shadow QP to handle the QP1 traffic */ + sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES, + GFP_KERNEL); + if (!sqp_tbl) + return -ENOMEM; + rdev->gsi_ctx.sqp_tbl = sqp_tbl; + + sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); + if (!sqp) { + rc = -ENODEV; + dev_err(rdev_to_dev(rdev), + "Failed to create Shadow QP for QP1"); + goto out; } + rdev->gsi_ctx.gsi_sqp = sqp; - qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); + sqp->rcq = qp->rcq; + sqp->scq = qp->scq; + sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, + &qp->qplib_qp); + if (!sah) { + bnxt_qplib_destroy_qp(&rdev->qplib_res, + &sqp->qplib_qp); + rc = -ENODEV; + dev_err(rdev_to_dev(rdev), + "Failed to create AH entry for ShadowQP"); + goto out; + } + rdev->gsi_ctx.gsi_sah = sah; - if (qp_init_attr->qp_type == IB_QPT_GSI && - !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) { - /* Allocate 1 more than what's provided */ - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + 1); - qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - - qp_init_attr->cap.max_send_wr; - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; - if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; - qp->qplib_qp.sq.max_sge++; - if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) - qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; - - qp->qplib_qp.rq_hdr_buf_size = - BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; - - qp->qplib_qp.sq_hdr_buf_size = - BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; - qp->qplib_qp.dpi = &rdev->dpi_privileged; - rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp); - if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to create HW QP1"); - goto fail; - } - /* Create a shadow QP to handle the QP1 traffic */ - rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, - &qp->qplib_qp); - if (!rdev->qp1_sqp) { - rc = -EINVAL; - dev_err(rdev_to_dev(rdev), - "Failed to create Shadow QP for QP1"); - goto qp_destroy; - } - rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, - &qp->qplib_qp); - if (!rdev->sqp_ah) { - bnxt_qplib_destroy_qp(&rdev->qplib_res, - &rdev->qp1_sqp->qplib_qp); - rc = -EINVAL; - dev_err(rdev_to_dev(rdev), - "Failed to create AH entry for ShadowQP"); - goto qp_destroy; - } + return 0; +out: + kfree(sqp_tbl); + return rc; +} - } else { - /* Allocate 128 + 1 more than what's provided */ - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + - BNXT_QPLIB_RESERVED_QP_WRS + 1); - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + - BNXT_QPLIB_RESERVED_QP_WRS + 1); - qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; +static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, + struct ib_qp_init_attr *init_attr) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_dev *rdev; + struct bnxt_qplib_qp *qplqp; + int rc = 0; - /* - * Reserving one slot for Phantom WQE. Application can - * post one extra entry in this case. But allowing this to avoid - * unexpected Queue full condition - */ + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; - qp->qplib_qp.sq.q_full_delta -= 1; + qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; + qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; - qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; - qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; - if (udata) { - rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); - if (rc) - goto fail; - } else { - qp->qplib_qp.dpi = &rdev->dpi_privileged; - } + rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp); + if (rc) { + dev_err(rdev_to_dev(rdev), "create HW QP1 failed!"); + goto out; + } + + rc = bnxt_re_create_shadow_gsi(qp, pd); +out: + return rc; +} + +static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, + struct ib_qp_init_attr *init_attr, + struct bnxt_qplib_dev_attr *dev_attr) +{ + bool rc = true; + + if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes || + init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes || + init_attr->cap.max_send_sge > dev_attr->max_qp_sges || + init_attr->cap.max_recv_sge > dev_attr->max_qp_sges || + init_attr->cap.max_inline_data > dev_attr->max_inline_data) { + dev_err(rdev_to_dev(rdev), + "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x", + init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, + init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, + init_attr->cap.max_send_sge, dev_attr->max_qp_sges, + init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, + init_attr->cap.max_inline_data, + dev_attr->max_inline_data); + rc = false; + } + return rc; +} + +struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_udata *udata) +{ + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_re_qp *qp; + int rc; + + rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); + if (!rc) { + rc = -EINVAL; + goto exit; + } + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) { + rc = -ENOMEM; + goto exit; + } + qp->rdev = rdev; + rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); + if (rc) + goto fail; + + if (qp_init_attr->qp_type == IB_QPT_GSI && + !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) { + rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr); + if (rc == -ENODEV) + goto qp_destroy; + if (rc) + goto fail; + } else { rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); goto free_umem; } + if (udata) { + struct bnxt_re_qp_resp resp; + + resp.qpid = qp->qplib_qp.id; + resp.rsvd = 0; + rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to copy QP udata"); + goto qp_destroy; + } + } } qp->ib_qp.qp_num = qp->qplib_qp.id; + if (qp_init_attr->qp_type == IB_QPT_GSI) + rdev->gsi_ctx.gsi_qp = qp; spin_lock_init(&qp->sq_lock); spin_lock_init(&qp->rq_lock); - - if (udata) { - struct bnxt_re_qp_resp resp; - - resp.qpid = qp->ib_qp.qp_num; - resp.rsvd = 0; - rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); - if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to copy QP udata"); - goto qp_destroy; - } - } INIT_LIST_HEAD(&qp->list); mutex_lock(&rdev->qp_lock); list_add_tail(&qp->list, &rdev->qp_list); - atomic_inc(&rdev->qp_count); mutex_unlock(&rdev->qp_lock); + atomic_inc(&rdev->qp_count); return &qp->ib_qp; qp_destroy: @@ -1189,6 +1387,7 @@ free_umem: ib_umem_release(qp->sumem); fail: kfree(qp); +exit: return ERR_PTR(rc); } @@ -1487,7 +1686,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp1_qp, int qp_attr_mask) { - struct bnxt_re_qp *qp = rdev->qp1_sqp; + struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; int rc = 0; if (qp_attr_mask & IB_QP_STATE) { @@ -1752,7 +1951,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, dev_err(rdev_to_dev(rdev), "Failed to modify HW QP"); return rc; } - if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) + if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); return rc; } @@ -1996,9 +2195,12 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, struct bnxt_qplib_swqe *wqe, int payload_size) { + struct bnxt_re_sqp_entries *sqp_entry; struct bnxt_qplib_sge ref, sge; + struct bnxt_re_dev *rdev; u32 rq_prod_index; - struct bnxt_re_sqp_entries *sqp_entry; + + rdev = qp->rdev; rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp); @@ -2013,7 +2215,7 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, ref.lkey = wqe->sg_list[0].lkey; ref.size = wqe->sg_list[0].size; - sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index]; + sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; /* SGE 1 */ wqe->sg_list[0].addr = sge.addr; @@ -2833,12 +3035,13 @@ static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, return rc; } -static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, +static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, struct bnxt_qplib_cqe *cqe) { - struct bnxt_re_dev *rdev = qp1_qp->rdev; + struct bnxt_re_dev *rdev = gsi_qp->rdev; struct bnxt_re_sqp_entries *sqp_entry = NULL; - struct bnxt_re_qp *qp = rdev->qp1_sqp; + struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp; + struct bnxt_re_ah *gsi_sah; struct ib_send_wr *swr; struct ib_ud_wr udwr; struct ib_recv_wr rwr; @@ -2861,19 +3064,19 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, swr = &udwr.wr; tbl_idx = cqe->wr_id; - rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf + - (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size); - rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp, + rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf + + (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size); + rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, tbl_idx); /* Shadow QP header buffer */ - shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp, + shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, tbl_idx); - sqp_entry = &rdev->sqp_tbl[tbl_idx]; + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; /* Store this cqe */ memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); - sqp_entry->qp1_qp = qp1_qp; + sqp_entry->qp1_qp = gsi_qp; /* Find packet type from the cqe */ @@ -2927,7 +3130,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, rwr.wr_id = tbl_idx; rwr.next = NULL; - rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr); + rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to post Rx buffers to shadow QP"); @@ -2939,13 +3142,13 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, swr->wr_id = tbl_idx; swr->opcode = IB_WR_SEND; swr->next = NULL; - - udwr.ah = &rdev->sqp_ah->ib_ah; - udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id; - udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey; + gsi_sah = rdev->gsi_ctx.gsi_sah; + udwr.ah = &gsi_sah->ib_ah; + udwr.remote_qpn = gsi_sqp->qplib_qp.id; + udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; /* post data received in the send queue */ - rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr); + rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); return 0; } @@ -2999,12 +3202,12 @@ static void bnxt_re_process_res_rc_wc(struct ib_wc *wc, wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; } -static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, +static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) { - struct bnxt_re_dev *rdev = qp->rdev; - struct bnxt_re_qp *qp1_qp = NULL; + struct bnxt_re_dev *rdev = gsi_sqp->rdev; + struct bnxt_re_qp *gsi_qp = NULL; struct bnxt_qplib_cqe *orig_cqe = NULL; struct bnxt_re_sqp_entries *sqp_entry = NULL; int nw_type; @@ -3014,13 +3217,13 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, tbl_idx = cqe->wr_id; - sqp_entry = &rdev->sqp_tbl[tbl_idx]; - qp1_qp = sqp_entry->qp1_qp; + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; + gsi_qp = sqp_entry->qp1_qp; orig_cqe = &sqp_entry->cqe; wc->wr_id = sqp_entry->wrid; wc->byte_len = orig_cqe->length; - wc->qp = &qp1_qp->ib_qp; + wc->qp = &gsi_qp->ib_qp; wc->ex.imm_data = orig_cqe->immdata; wc->src_qp = orig_cqe->src_qp; @@ -3099,7 +3302,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp) int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) { struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); - struct bnxt_re_qp *qp; + struct bnxt_re_qp *qp, *sh_qp; struct bnxt_qplib_cqe *cqe; int i, ncqe, budget; struct bnxt_qplib_q *sq; @@ -3163,8 +3366,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) switch (cqe->opcode) { case CQ_BASE_CQE_TYPE_REQ: - if (qp->rdev->qp1_sqp && qp->qplib_qp.id == - qp->rdev->qp1_sqp->qplib_qp.id) { + sh_qp = qp->rdev->gsi_ctx.gsi_sqp; + if (sh_qp && + qp->qplib_qp.id == sh_qp->qplib_qp.id) { /* Handle this completion with * the stored completion */ @@ -3190,7 +3394,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) * stored in the table */ tbl_idx = cqe->wr_id; - sqp_entry = &cq->rdev->sqp_tbl[tbl_idx]; + sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx]; wc->wr_id = sqp_entry->wrid; bnxt_re_process_res_rawqp1_wc(wc, cqe); break; @@ -3198,8 +3402,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) bnxt_re_process_res_rc_wc(wc, cqe); break; case CQ_BASE_CQE_TYPE_RES_UD: - if (qp->rdev->qp1_sqp && qp->qplib_qp.id == - qp->rdev->qp1_sqp->qplib_qp.id) { + sh_qp = qp->rdev->gsi_ctx.gsi_sqp; + if (sh_qp && + qp->qplib_qp.id == sh_qp->qplib_qp.id) { /* Handle this completion with * the stored completion */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 793c97251588..82a5350ff6d7 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1125,7 +1125,8 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir, static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) { - return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp); + return (qp->ib_qp.qp_type == IB_QPT_GSI) || + (qp == rdev->gsi_ctx.gsi_sqp); } static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) -- cgit v1.2.3-58-ga151 From 0cfb329db988804124423b311a2845e56914e3ca Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Sat, 15 Feb 2020 12:10:59 -0500 Subject: RDMA/bnxt_re: Replace chip context structure with pointer The chip_ctx member in bnxt_re_dev structure is now a pointer to struct bnxt_qplib_chip_ctx. Since the member type has changed there are changes in rest of the code wherever dev->chip_ctx is used. Link: https://lore.kernel.org/r/1581786665-23705-3-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Naresh Kumar PBS Signed-off-by: Selvin Xavier Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/bnxt_re.h | 2 +- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 13 ++++++----- drivers/infiniband/hw/bnxt_re/main.c | 40 +++++++++++++++++++++----------- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 2 +- 4 files changed, 36 insertions(+), 21 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index c2805384e832..86274f4c8225 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -133,7 +133,7 @@ struct bnxt_re_dev { #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 struct net_device *netdev; unsigned int version, major, minor; - struct bnxt_qplib_chip_ctx chip_ctx; + struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_en_dev *en_dev; struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX]; int num_msix; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index c37f0b1df475..a17c9cf91118 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -859,7 +859,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE); /* Consider mapping PSN search memory only for RC QPs. */ if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { - psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ? + psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? sizeof(struct sq_psn_search_ext) : sizeof(struct sq_psn_search); bytes += (qplib_qp->sq.max_wqe * psn_sz); @@ -1060,6 +1060,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) qplqp->rq.max_sge = dev_attr->max_qp_sges; if (qplqp->rq.max_sge > dev_attr->max_qp_sges) qplqp->rq.max_sge = dev_attr->max_qp_sges; + qplqp->rq.max_sge = 6; } static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, @@ -1123,7 +1124,7 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, struct bnxt_qplib_chip_ctx *chip_ctx; int qptype; - chip_ctx = &rdev->chip_ctx; + chip_ctx = rdev->chip_ctx; qptype = __from_ib_qp_type(init_attr->qp_type); if (qptype == IB_QPT_MAX) { @@ -1343,7 +1344,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, goto fail; if (qp_init_attr->qp_type == IB_QPT_GSI && - !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) { + !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) { rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr); if (rc == -ENODEV) goto qp_destroy; @@ -3820,10 +3821,10 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) spin_lock_init(&uctx->sh_lock); resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX; - chip_met_rev_num = rdev->chip_ctx.chip_num; - chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) << + chip_met_rev_num = rdev->chip_ctx->chip_num; + chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) << BNXT_RE_CHIP_ID0_CHIP_REV_SFT; - chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) << + chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) << BNXT_RE_CHIP_ID0_CHIP_MET_SFT; resp.chip_id0 = chip_met_rev_num; /* Future extension of chip info */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 82a5350ff6d7..390daebea4c3 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -82,22 +82,35 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev); static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) { + struct bnxt_qplib_chip_ctx *chip_ctx; + + if (!rdev->chip_ctx) + return; + chip_ctx = rdev->chip_ctx; + rdev->chip_ctx = NULL; rdev->rcfw.res = NULL; rdev->qplib_res.cctx = NULL; + kfree(chip_ctx); } static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) { + struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_en_dev *en_dev; struct bnxt *bp; en_dev = rdev->en_dev; bp = netdev_priv(en_dev->net); - rdev->chip_ctx.chip_num = bp->chip_num; + chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL); + if (!chip_ctx) + return -ENOMEM; + chip_ctx->chip_num = bp->chip_num; + + rdev->chip_ctx = chip_ctx; /* rest members to follow eventually */ - rdev->qplib_res.cctx = &rdev->chip_ctx; + rdev->qplib_res.cctx = rdev->chip_ctx; rdev->rcfw.res = &rdev->qplib_res; return 0; @@ -136,7 +149,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT, attr->max_srq); ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); - if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) + if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) rdev->qplib_ctx.tqm_count[i] = rdev->dev_attr.tqm_alloc_reqs[i]; @@ -185,7 +198,7 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res)); bnxt_re_limit_pf_res(rdev); - num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ? + num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs; if (num_vfs) bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs); @@ -208,7 +221,7 @@ static void bnxt_re_sriov_config(void *p, int num_vfs) return; rdev->num_vfs = num_vfs; - if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) { + if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { bnxt_re_set_resource_limits(rdev); bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, &rdev->qplib_ctx); @@ -916,7 +929,7 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, #define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx) { - return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ? + return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB : BNXT_RE_GEN_P5_PF_NQ_DB) : rdev->msix_entries[indx].db_offset; @@ -967,7 +980,7 @@ static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) int i; for (i = 0; i < rdev->num_msix - 1; i++) { - type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); + type = bnxt_qplib_get_ring_type(rdev->chip_ctx); bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); rdev->nq[i].res = NULL; bnxt_qplib_free_nq(&rdev->nq[i]); @@ -1025,7 +1038,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) i, rc); goto free_nq; } - type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); + type = bnxt_qplib_get_ring_type(rdev->chip_ctx); pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr; pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count; rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type, @@ -1044,7 +1057,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) return 0; free_nq: for (i = num_vec_created; i >= 0; i--) { - type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); + type = bnxt_qplib_get_ring_type(rdev->chip_ctx); bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); bnxt_qplib_free_nq(&rdev->nq[i]); } @@ -1324,7 +1337,7 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); - type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); + type = bnxt_qplib_get_ring_type(rdev->chip_ctx); bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type); bnxt_qplib_free_rcfw_channel(&rdev->rcfw); } @@ -1405,7 +1418,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) pr_err("Failed to allocate RCFW Channel: %#x\n", rc); goto fail; } - type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); + + type = bnxt_qplib_get_ring_type(rdev->chip_ctx); pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr; pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count; ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; @@ -1434,7 +1448,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) bnxt_re_set_resource_limits(rdev); rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0, - bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)); + bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)); if (rc) { pr_err("Failed to allocate QPLIB context: %#x\n", rc); goto disable_rcfw; @@ -1504,7 +1518,7 @@ free_ctx: disable_rcfw: bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); free_ring: - type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); + type = bnxt_qplib_get_ring_type(rdev->chip_ctx); bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type); free_rcfw: bnxt_qplib_free_rcfw_channel(&rdev->rcfw); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 020f70e6865e..ffe861060fbf 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2426,7 +2426,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, } cqe = *pcqe; cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; - cqe->length = (u32)le16_to_cpu(hwcqe->length); + cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK; cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata); cqe->invrkey = le32_to_cpu(hwcqe->imm_data); cqe->flags = le16_to_cpu(hwcqe->flags); -- cgit v1.2.3-58-ga151 From 0c4dcd602817502bb3dced7a834a13ef717d65a4 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Sat, 15 Feb 2020 12:11:00 -0500 Subject: RDMA/bnxt_re: Refactor hardware queue memory allocation At top level there are three major data structure addition. viz bnxt_qplib_hwq_attr, bnxt_qplib_sg_info and bnxt_qplib_tqm_ctx Intorduction of first data structure reduces the arguments list to bnxt_re_alloc_init_hwq() function. There are changes all over the driver code to incorporate this new structure. The caller needs to fill the attribute data structure and pass to this function. The second data structure is to pass memory region description viz. sghead, page_size and page_shift. There are changes all over the driver code to initialize bnxt_re_sg_info data structure. The new data structure helps to reduce the argument list of __alloc_pbl() function call. Till now the TQM rings related members were not collected under any specific data-structure making it hard to manage. The third data sctructure bnxt_qplib_tqm_ctx is added to refactor the TQM queue allocation and initialization. Link: https://lore.kernel.org/r/1581786665-23705-4-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Naresh Kumar PBS Signed-off-by: Selvin Xavier Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 26 +- drivers/infiniband/hw/bnxt_re/main.c | 20 +- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 169 ++++++----- drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 +- drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 57 ++-- drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 5 +- drivers/infiniband/hw/bnxt_re/qplib_res.c | 470 ++++++++++++++++++----------- drivers/infiniband/hw/bnxt_re/qplib_res.h | 60 ++-- drivers/infiniband/hw/bnxt_re/qplib_sp.c | 48 +-- 9 files changed, 525 insertions(+), 332 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index a17c9cf91118..4368aacdd482 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -871,9 +871,11 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, return PTR_ERR(umem); qp->sumem = umem; - qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl; + qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl; qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem); qplib_qp->sq.sg_info.nmap = umem->nmap; + qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; + qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT; qplib_qp->qp_handle = ureq.qp_handle; if (!qp->qplib_qp.srq) { @@ -884,9 +886,11 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, if (IS_ERR(umem)) goto rqfail; qp->rumem = umem; - qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl; + qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl; qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem); qplib_qp->rq.sg_info.nmap = umem->nmap; + qplib_qp->rq.sg_info.pgsize = PAGE_SIZE; + qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT; } qplib_qp->dpi = &cntx->dpi; @@ -976,6 +980,8 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp qp->qplib_qp.sq.max_sge = 2; /* Q full delta can be 1 since it is internal QP */ qp->qplib_qp.sq.q_full_delta = 1; + qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE; + qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT; qp->qplib_qp.scq = qp1_qp->scq; qp->qplib_qp.rcq = qp1_qp->rcq; @@ -984,6 +990,8 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; /* Q full delta can be 1 since it is internal QP */ qp->qplib_qp.rq.q_full_delta = 1; + qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE; + qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT; qp->qplib_qp.mtu = qp1_qp->mtu; @@ -1043,6 +1051,8 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, if (qplqp->rq.max_sge > dev_attr->max_qp_sges) qplqp->rq.max_sge = dev_attr->max_qp_sges; } + qplqp->rq.sg_info.pgsize = PAGE_SIZE; + qplqp->rq.sg_info.pgshft = PAGE_SHIFT; return 0; } @@ -1095,6 +1105,8 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, * unexpected Queue full condition */ qplqp->sq.q_full_delta -= 1; + qplqp->sq.sg_info.pgsize = PAGE_SIZE; + qplqp->sq.sg_info.pgshft = PAGE_SHIFT; } static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, @@ -1511,9 +1523,11 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, return PTR_ERR(umem); srq->umem = umem; - qplib_srq->sg_info.sglist = umem->sg_head.sgl; + qplib_srq->sg_info.sghead = umem->sg_head.sgl; qplib_srq->sg_info.npages = ib_umem_num_pages(umem); qplib_srq->sg_info.nmap = umem->nmap; + qplib_srq->sg_info.pgsize = PAGE_SIZE; + qplib_srq->sg_info.pgshft = PAGE_SHIFT; qplib_srq->srq_handle = ureq.srq_handle; qplib_srq->dpi = &cntx->dpi; @@ -2368,7 +2382,7 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; wqe->frmr.page_list = mr->pages; wqe->frmr.page_list_len = mr->npages; - wqe->frmr.levels = qplib_frpl->hwq.level + 1; + wqe->frmr.levels = qplib_frpl->hwq.level; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; /* Need unconditional fence for reg_mr @@ -2742,6 +2756,8 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, if (entries > dev_attr->max_cq_wqes + 1) entries = dev_attr->max_cq_wqes + 1; + cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; + cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; if (udata) { struct bnxt_re_cq_req req; struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( @@ -2758,7 +2774,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, rc = PTR_ERR(cq->umem); goto fail; } - cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl; + cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl; cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem); cq->qplib_cq.sg_info.nmap = cq->umem->nmap; cq->qplib_cq.dpi = &uctx->dpi; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 390daebea4c3..a966c68dbc59 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -90,6 +90,8 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) rdev->chip_ctx = NULL; rdev->rcfw.res = NULL; rdev->qplib_res.cctx = NULL; + rdev->qplib_res.pdev = NULL; + rdev->qplib_res.netdev = NULL; kfree(chip_ctx); } @@ -151,7 +153,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) - rdev->qplib_ctx.tqm_count[i] = + rdev->qplib_ctx.tqm_ctx.qcount[i] = rdev->dev_attr.tqm_alloc_reqs[i]; } @@ -982,8 +984,8 @@ static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) for (i = 0; i < rdev->num_msix - 1; i++) { type = bnxt_qplib_get_ring_type(rdev->chip_ctx); bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); - rdev->nq[i].res = NULL; bnxt_qplib_free_nq(&rdev->nq[i]); + rdev->nq[i].res = NULL; } } @@ -1032,7 +1034,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) rdev->nq[i].res = &rdev->qplib_res; rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT + BNXT_RE_MAX_SRQC_COUNT + 2; - rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]); + rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); if (rc) { dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x", i, rc); @@ -1056,7 +1058,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) } return 0; free_nq: - for (i = num_vec_created; i >= 0; i--) { + for (i = num_vec_created - 1; i >= 0; i--) { type = bnxt_qplib_get_ring_type(rdev->chip_ctx); bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); bnxt_qplib_free_nq(&rdev->nq[i]); @@ -1335,7 +1337,7 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) dev_warn(rdev_to_dev(rdev), "Failed to deinitialize RCFW: %#x", rc); bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); - bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); + bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); type = bnxt_qplib_get_ring_type(rdev->chip_ctx); bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type); @@ -1411,7 +1413,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) /* Establish RCFW Communication Channel to initialize the context * memory for the function and all child VFs */ - rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw, + rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw, &rdev->qplib_ctx, BNXT_RE_MAX_QPC_COUNT); if (rc) { @@ -1432,7 +1434,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) } db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX); vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector; - rc = bnxt_qplib_enable_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw, + rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, vid, db_offt, rdev->is_virtfn, &bnxt_re_aeq_handler); if (rc) { @@ -1447,7 +1449,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) bnxt_re_set_resource_limits(rdev); - rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0, + rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0, bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)); if (rc) { pr_err("Failed to allocate QPLIB context: %#x\n", rc); @@ -1514,7 +1516,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) free_sctx: bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); free_ctx: - bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); + bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); disable_rcfw: bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); free_ring: diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index ffe861060fbf..e7fe86e61fbb 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -464,26 +464,33 @@ fail: void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq) { if (nq->hwq.max_elements) { - bnxt_qplib_free_hwq(nq->pdev, &nq->hwq); + bnxt_qplib_free_hwq(nq->res, &nq->hwq); nq->hwq.max_elements = 0; } } -int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq) +int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq) { - u8 hwq_type; + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; - nq->pdev = pdev; + nq->pdev = res->pdev; + nq->res = res; if (!nq->hwq.max_elements || nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT) nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; - hwq_type = bnxt_qplib_get_hwq_type(nq->res); - if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, - &nq->hwq.max_elements, - BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0, - PAGE_SIZE, hwq_type)) - return -ENOMEM; + sginfo.pgsize = PAGE_SIZE; + sginfo.pgshft = PAGE_SHIFT; + hwq_attr.res = res; + hwq_attr.sginfo = &sginfo; + hwq_attr.depth = nq->hwq.max_elements; + hwq_attr.stride = sizeof(struct nq_base); + hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res); + if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) { + dev_err(&nq->pdev->dev, "FP NQ allocation failed"); + return -ENOMEM; + } nq->budget = 8; return 0; } @@ -526,24 +533,26 @@ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, kfree(srq->swq); if (rc) return; - bnxt_qplib_free_hwq(res->pdev, &srq->hwq); + bnxt_qplib_free_hwq(res, &srq->hwq); } int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_create_srq req; + struct bnxt_qplib_hwq_attr hwq_attr = {}; struct creq_create_srq_resp resp; + struct cmdq_create_srq req; struct bnxt_qplib_pbl *pbl; u16 cmd_flags = 0; int rc, idx; - srq->hwq.max_elements = srq->max_wqe; - rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, &srq->sg_info, - &srq->hwq.max_elements, - BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_QUEUE); + hwq_attr.res = res; + hwq_attr.sginfo = &srq->sg_info; + hwq_attr.depth = srq->max_wqe; + hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE; + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr); if (rc) goto exit; @@ -602,7 +611,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, return 0; fail: - bnxt_qplib_free_hwq(res->pdev, &srq->hwq); + bnxt_qplib_free_hwq(res, &srq->hwq); kfree(srq->swq); exit: return rc; @@ -721,15 +730,16 @@ done: /* QP */ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { + struct bnxt_qplib_hwq_attr hwq_attr = {}; struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_create_qp1 req; - struct creq_create_qp1_resp resp; - struct bnxt_qplib_pbl *pbl; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; - int rc; + struct creq_create_qp1_resp resp; + struct cmdq_create_qp1 req; + struct bnxt_qplib_pbl *pbl; u16 cmd_flags = 0; u32 qp_flags = 0; + int rc; RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags); @@ -739,11 +749,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.qp_handle = cpu_to_le64(qp->qp_handle); /* SQ */ - sq->hwq.max_elements = sq->max_wqe; - rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, - &sq->hwq.max_elements, - BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_QUEUE); + hwq_attr.res = res; + hwq_attr.sginfo = &sq->sg_info; + hwq_attr.depth = sq->max_wqe; + hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE; + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); if (rc) goto exit; @@ -778,11 +789,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) /* RQ */ if (rq->max_wqe) { - rq->hwq.max_elements = qp->rq.max_wqe; - rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, - &rq->hwq.max_elements, - BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_QUEUE); + hwq_attr.res = res; + hwq_attr.sginfo = &rq->sg_info; + hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE; + hwq_attr.depth = qp->rq.max_wqe; + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); if (rc) goto fail_sq; @@ -848,10 +860,10 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) fail: bnxt_qplib_free_qp_hdr_buf(res, qp); fail_rq: - bnxt_qplib_free_hwq(res->pdev, &rq->hwq); + bnxt_qplib_free_hwq(res, &rq->hwq); kfree(rq->swq); fail_sq: - bnxt_qplib_free_hwq(res->pdev, &sq->hwq); + bnxt_qplib_free_hwq(res, &sq->hwq); kfree(sq->swq); exit: return rc; @@ -860,7 +872,9 @@ exit: int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct bnxt_qplib_hwq_attr hwq_attr = {}; unsigned long int psn_search, poff = 0; + struct bnxt_qplib_sg_info sginfo = {}; struct sq_psn_search **psn_search_ptr; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; @@ -887,12 +901,15 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) sizeof(struct sq_psn_search_ext) : sizeof(struct sq_psn_search); } - sq->hwq.max_elements = sq->max_wqe; - rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info, - &sq->hwq.max_elements, - BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, - psn_sz, - PAGE_SIZE, HWQ_TYPE_QUEUE); + + hwq_attr.res = res; + hwq_attr.sginfo = &sq->sg_info; + hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE; + hwq_attr.depth = sq->max_wqe; + hwq_attr.aux_stride = psn_sz; + hwq_attr.aux_depth = hwq_attr.depth; + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); if (rc) goto exit; @@ -956,12 +973,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) /* RQ */ if (rq->max_wqe) { - rq->hwq.max_elements = rq->max_wqe; - rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, - &rq->sg_info, - &rq->hwq.max_elements, - BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_QUEUE); + hwq_attr.res = res; + hwq_attr.sginfo = &rq->sg_info; + hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE; + hwq_attr.depth = rq->max_wqe; + hwq_attr.aux_stride = 0; + hwq_attr.aux_depth = 0; + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); if (rc) goto fail_sq; @@ -1029,10 +1048,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req_size = xrrq->max_elements * BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1; req_size &= ~(PAGE_SIZE - 1); - rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, - &xrrq->max_elements, - BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE, - 0, req_size, HWQ_TYPE_CTX); + sginfo.pgsize = req_size; + sginfo.pgshft = PAGE_SHIFT; + + hwq_attr.res = res; + hwq_attr.sginfo = &sginfo; + hwq_attr.depth = xrrq->max_elements; + hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE; + hwq_attr.aux_stride = 0; + hwq_attr.aux_depth = 0; + hwq_attr.type = HWQ_TYPE_CTX; + rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); if (rc) goto fail_buf_free; pbl = &xrrq->pbl[PBL_LVL_0]; @@ -1044,11 +1070,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req_size = xrrq->max_elements * BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1; req_size &= ~(PAGE_SIZE - 1); - - rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, - &xrrq->max_elements, - BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE, - 0, req_size, HWQ_TYPE_CTX); + sginfo.pgsize = req_size; + hwq_attr.depth = xrrq->max_elements; + hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE; + rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); if (rc) goto fail_orrq; @@ -1074,17 +1099,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) fail: if (qp->irrq.max_elements) - bnxt_qplib_free_hwq(res->pdev, &qp->irrq); + bnxt_qplib_free_hwq(res, &qp->irrq); fail_orrq: if (qp->orrq.max_elements) - bnxt_qplib_free_hwq(res->pdev, &qp->orrq); + bnxt_qplib_free_hwq(res, &qp->orrq); fail_buf_free: bnxt_qplib_free_qp_hdr_buf(res, qp); fail_rq: - bnxt_qplib_free_hwq(res->pdev, &rq->hwq); + bnxt_qplib_free_hwq(res, &rq->hwq); kfree(rq->swq); fail_sq: - bnxt_qplib_free_hwq(res->pdev, &sq->hwq); + bnxt_qplib_free_hwq(res, &sq->hwq); kfree(sq->swq); exit: return rc; @@ -1440,16 +1465,16 @@ void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { bnxt_qplib_free_qp_hdr_buf(res, qp); - bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); + bnxt_qplib_free_hwq(res, &qp->sq.hwq); kfree(qp->sq.swq); - bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq); + bnxt_qplib_free_hwq(res, &qp->rq.hwq); kfree(qp->rq.swq); if (qp->irrq.max_elements) - bnxt_qplib_free_hwq(res->pdev, &qp->irrq); + bnxt_qplib_free_hwq(res, &qp->irrq); if (qp->orrq.max_elements) - bnxt_qplib_free_hwq(res->pdev, &qp->orrq); + bnxt_qplib_free_hwq(res, &qp->orrq); } @@ -1927,17 +1952,19 @@ static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type) int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_create_cq req; + struct bnxt_qplib_hwq_attr hwq_attr = {}; struct creq_create_cq_resp resp; + struct cmdq_create_cq req; struct bnxt_qplib_pbl *pbl; u16 cmd_flags = 0; int rc; - cq->hwq.max_elements = cq->max_wqe; - rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info, - &cq->hwq.max_elements, - BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_QUEUE); + hwq_attr.res = res; + hwq_attr.depth = cq->max_wqe; + hwq_attr.stride = sizeof(struct cq_base); + hwq_attr.type = HWQ_TYPE_QUEUE; + hwq_attr.sginfo = &cq->sg_info; + rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr); if (rc) goto exit; @@ -1988,7 +2015,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) return 0; fail: - bnxt_qplib_free_hwq(res->pdev, &cq->hwq); + bnxt_qplib_free_hwq(res, &cq->hwq); exit: return rc; } @@ -2008,7 +2035,7 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) (void *)&resp, NULL, 0); if (rc) return rc; - bnxt_qplib_free_hwq(res->pdev, &cq->hwq); + bnxt_qplib_free_hwq(res, &cq->hwq); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 99e0a13cbefa..d3f080c18b27 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -550,7 +550,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); -int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); +int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq); void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, unsigned long *flags); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 1291b12287a5..fada81c300ba 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -520,9 +520,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, level = ctx->tim_tbl.level; req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) | __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]); - level = ctx->tqm_pde_level; - req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) | - __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]); + level = ctx->tqm_ctx.pde.level; + req.tqm_pg_size_tqm_lvl = + (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) | + __get_pbl_pg_idx(&ctx->tqm_ctx.pde.pbl[level]); req.qpc_page_dir = cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); @@ -535,7 +536,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, req.tim_page_dir = cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); req.tqm_page_dir = - cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]); + cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]); req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements); req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements); @@ -563,25 +564,32 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { kfree(rcfw->qp_tbl); kfree(rcfw->crsqe_tbl); - bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); - bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); + bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq); + bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq); rcfw->pdev = NULL; } -int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, +int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int qp_tbl_sz) { - u8 hwq_type; - - rcfw->pdev = pdev; - rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT; - hwq_type = bnxt_qplib_get_hwq_type(rcfw->res); - if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, - &rcfw->creq.max_elements, - BNXT_QPLIB_CREQE_UNITS, - 0, PAGE_SIZE, hwq_type)) { + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; + + rcfw->pdev = res->pdev; + rcfw->res = res; + + sginfo.pgsize = PAGE_SIZE; + sginfo.pgshft = PAGE_SHIFT; + + hwq_attr.sginfo = &sginfo; + hwq_attr.res = rcfw->res; + hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT; + hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS; + hwq_attr.type = bnxt_qplib_get_hwq_type(res); + + if (bnxt_qplib_alloc_init_hwq(&rcfw->creq, &hwq_attr)) { dev_err(&rcfw->pdev->dev, "HW channel CREQ allocation failed\n"); goto fail; @@ -591,13 +599,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, else rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192; - rcfw->cmdq.max_elements = rcfw->cmdq_depth; - if (bnxt_qplib_alloc_init_hwq - (rcfw->pdev, &rcfw->cmdq, NULL, - &rcfw->cmdq.max_elements, - BNXT_QPLIB_CMDQE_UNITS, 0, - bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth), - HWQ_TYPE_CTX)) { + sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth); + hwq_attr.depth = rcfw->cmdq_depth; + hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS; + hwq_attr.type = HWQ_TYPE_CTX; + if (bnxt_qplib_alloc_init_hwq(&rcfw->cmdq, &hwq_attr)) { dev_err(&rcfw->pdev->dev, "HW channel CMDQ allocation failed\n"); goto fail; @@ -690,8 +696,7 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, return 0; } -int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, - struct bnxt_qplib_rcfw *rcfw, +int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw, int msix_vector, int cp_bar_reg_off, int virt_fn, int (*aeq_handler)(struct bnxt_qplib_rcfw *, @@ -699,10 +704,12 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, { resource_size_t res_base; struct cmdq_init init; + struct pci_dev *pdev; u16 bmap_size; int rc; /* General */ + pdev = rcfw->pdev; rcfw->seq_num = 0; set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index dfeadc192e17..ab1531c7e27f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -268,7 +268,7 @@ struct bnxt_qplib_rcfw { }; void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); -int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, +int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int qp_tbl_sz); @@ -276,8 +276,7 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill); void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, bool need_init); -int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, - struct bnxt_qplib_rcfw *rcfw, +int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw, int msix_vector, int cp_bar_reg_off, int virt_fn, int (*aeq_handler)(struct bnxt_qplib_rcfw *, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 60ea1b924b67..4346b95963cf 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -55,9 +55,10 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, struct bnxt_qplib_stats *stats); /* PBL */ -static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, +static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl, bool is_umem) { + struct pci_dev *pdev = res->pdev; int i; if (!is_umem) { @@ -74,35 +75,57 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, pbl->pg_arr[i] = NULL; } } - kfree(pbl->pg_arr); + vfree(pbl->pg_arr); pbl->pg_arr = NULL; - kfree(pbl->pg_map_arr); + vfree(pbl->pg_map_arr); pbl->pg_map_arr = NULL; pbl->pg_count = 0; pbl->pg_size = 0; } -static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, - struct scatterlist *sghead, u32 pages, - u32 nmaps, u32 pg_size) +static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl, + struct bnxt_qplib_sg_info *sginfo) { + struct scatterlist *sghead = sginfo->sghead; struct sg_dma_page_iter sg_iter; + int i = 0; + + for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) { + pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); + pbl->pg_arr[i] = NULL; + pbl->pg_count++; + i++; + } +} + +static int __alloc_pbl(struct bnxt_qplib_res *res, + struct bnxt_qplib_pbl *pbl, + struct bnxt_qplib_sg_info *sginfo) +{ + struct pci_dev *pdev = res->pdev; + struct scatterlist *sghead; bool is_umem = false; + u32 pages, pg_size; int i; + if (sginfo->nopte) + return 0; + pages = sginfo->npages; + pg_size = sginfo->pgsize; + sghead = sginfo->sghead; /* page ptr arrays */ - pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL); + pbl->pg_arr = vmalloc(pages * sizeof(void *)); if (!pbl->pg_arr) return -ENOMEM; - pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL); + pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t)); if (!pbl->pg_map_arr) { - kfree(pbl->pg_arr); + vfree(pbl->pg_arr); pbl->pg_arr = NULL; return -ENOMEM; } pbl->pg_count = 0; - pbl->pg_size = pg_size; + pbl->pg_size = sginfo->pgsize; if (!sghead) { for (i = 0; i < pages; i++) { @@ -115,25 +138,19 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, pbl->pg_count++; } } else { - i = 0; is_umem = true; - for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) { - pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); - pbl->pg_arr[i] = NULL; - pbl->pg_count++; - i++; - } + bnxt_qplib_fill_user_dma_pages(pbl, sginfo); } return 0; - fail: - __free_pbl(pdev, pbl, is_umem); + __free_pbl(res, pbl, is_umem); return -ENOMEM; } /* HWQ */ -void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq) +void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, + struct bnxt_qplib_hwq *hwq) { int i; @@ -144,9 +161,9 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq) for (i = 0; i < hwq->level + 1; i++) { if (i == hwq->level) - __free_pbl(pdev, &hwq->pbl[i], hwq->is_user); + __free_pbl(res, &hwq->pbl[i], hwq->is_user); else - __free_pbl(pdev, &hwq->pbl[i], false); + __free_pbl(res, &hwq->pbl[i], false); } hwq->level = PBL_LVL_MAX; @@ -158,79 +175,113 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq) } /* All HWQs are power of 2 in size */ -int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, - struct bnxt_qplib_sg_info *sg_info, - u32 *elements, u32 element_size, u32 aux, - u32 pg_size, enum bnxt_qplib_hwq_type hwq_type) + +int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, + struct bnxt_qplib_hwq_attr *hwq_attr) { - u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0; + u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0; + struct bnxt_qplib_sg_info sginfo = {}; + u32 depth, stride, npbl, npde; dma_addr_t *src_phys_ptr, **dst_virt_ptr; struct scatterlist *sghead = NULL; - int i, rc; - + struct bnxt_qplib_res *res; + struct pci_dev *pdev; + int i, rc, lvl; + + res = hwq_attr->res; + pdev = res->pdev; + sghead = hwq_attr->sginfo->sghead; + pg_size = hwq_attr->sginfo->pgsize; hwq->level = PBL_LVL_MAX; - slots = roundup_pow_of_two(*elements); - if (aux) { - aux_size = roundup_pow_of_two(aux); - aux_pages = (slots * aux_size) / pg_size; - if ((slots * aux_size) % pg_size) + depth = roundup_pow_of_two(hwq_attr->depth); + stride = roundup_pow_of_two(hwq_attr->stride); + if (hwq_attr->aux_depth) { + aux_slots = hwq_attr->aux_depth; + aux_size = roundup_pow_of_two(hwq_attr->aux_stride); + aux_pages = (aux_slots * aux_size) / pg_size; + if ((aux_slots * aux_size) % pg_size) aux_pages++; } - size = roundup_pow_of_two(element_size); - - if (sg_info) - sghead = sg_info->sglist; if (!sghead) { hwq->is_user = false; - pages = (slots * size) / pg_size + aux_pages; - if ((slots * size) % pg_size) - pages++; - if (!pages) + npages = (depth * stride) / pg_size + aux_pages; + if ((depth * stride) % pg_size) + npages++; + if (!npages) return -EINVAL; - maps = 0; + hwq_attr->sginfo->npages = npages; } else { hwq->is_user = true; - pages = sg_info->npages; - maps = sg_info->nmap; + npages = hwq_attr->sginfo->npages; + npages = (npages * PAGE_SIZE) / + BIT_ULL(hwq_attr->sginfo->pgshft); + if ((hwq_attr->sginfo->npages * PAGE_SIZE) % + BIT_ULL(hwq_attr->sginfo->pgshft)) + if (!npages) + npages++; } - /* Alloc the 1st memory block; can be a PDL/PTL/PBL */ - if (sghead && (pages == MAX_PBL_LVL_0_PGS)) - rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead, - pages, maps, pg_size); - else - rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, - 1, 0, pg_size); - if (rc) - goto fail; - - hwq->level = PBL_LVL_0; + if (npages == MAX_PBL_LVL_0_PGS) { + /* This request is Level 0, map PTE */ + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo); + if (rc) + goto fail; + hwq->level = PBL_LVL_0; + } - if (pages > MAX_PBL_LVL_0_PGS) { - if (pages > MAX_PBL_LVL_1_PGS) { + if (npages > MAX_PBL_LVL_0_PGS) { + if (npages > MAX_PBL_LVL_1_PGS) { + u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? + 0 : PTU_PTE_VALID; /* 2 levels of indirection */ - rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL, - MAX_PBL_LVL_1_PGS_FOR_LVL_2, - 0, pg_size); + npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; + if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT)) + npbl++; + npde = npbl >> MAX_PDL_LVL_SHIFT; + if (npbl % BIT(MAX_PDL_LVL_SHIFT)) + npde++; + /* Alloc PDE pages */ + sginfo.pgsize = npde * pg_size; + sginfo.npages = 1; + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); + + /* Alloc PBL pages */ + sginfo.npages = npbl; + sginfo.pgsize = PAGE_SIZE; + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo); if (rc) goto fail; - /* Fill in lvl0 PBL */ + /* Fill PDL with PBL page pointers */ dst_virt_ptr = (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; - for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) - dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = - src_phys_ptr[i] | PTU_PDE_VALID; - hwq->level = PBL_LVL_1; - - rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead, - pages, maps, pg_size); + if (hwq_attr->type == HWQ_TYPE_MR) { + /* For MR it is expected that we supply only 1 contigous + * page i.e only 1 entry in the PDL that will contain + * all the PBLs for the user supplied memory region + */ + for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; + i++) + dst_virt_ptr[0][i] = src_phys_ptr[i] | + flag; + } else { + for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; + i++) + dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = + src_phys_ptr[i] | + PTU_PDE_VALID; + } + /* Alloc or init PTEs */ + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2], + hwq_attr->sginfo); if (rc) goto fail; - - /* Fill in lvl1 PBL */ + hwq->level = PBL_LVL_2; + if (hwq_attr->sginfo->nopte) + goto done; + /* Fill PBLs with PTE pointers */ dst_virt_ptr = (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr; src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr; @@ -238,7 +289,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = src_phys_ptr[i] | PTU_PTE_VALID; } - if (hwq_type == HWQ_TYPE_QUEUE) { + if (hwq_attr->type == HWQ_TYPE_QUEUE) { /* Find the last pg of the size */ i = hwq->pbl[PBL_LVL_2].pg_count; dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= @@ -248,25 +299,36 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, [PTR_IDX(i - 2)] |= PTU_PTE_NEXT_TO_LAST; } - hwq->level = PBL_LVL_2; - } else { - u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 : - PTU_PTE_VALID; + } else { /* pages < 512 npbl = 1, npde = 0 */ + u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? + 0 : PTU_PTE_VALID; /* 1 level of indirection */ - rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead, - pages, maps, pg_size); + npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; + if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT)) + npbl++; + sginfo.npages = npbl; + sginfo.pgsize = PAGE_SIZE; + /* Alloc PBL page */ + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); if (rc) goto fail; - /* Fill in lvl0 PBL */ + /* Alloc or init PTEs */ + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], + hwq_attr->sginfo); + if (rc) + goto fail; + hwq->level = PBL_LVL_1; + if (hwq_attr->sginfo->nopte) + goto done; + /* Fill PBL with PTE pointers */ dst_virt_ptr = (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; - for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) { + for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = src_phys_ptr[i] | flag; - } - if (hwq_type == HWQ_TYPE_QUEUE) { + if (hwq_attr->type == HWQ_TYPE_QUEUE) { /* Find the last pg of the size */ i = hwq->pbl[PBL_LVL_1].pg_count; dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= @@ -276,42 +338,141 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, [PTR_IDX(i - 2)] |= PTU_PTE_NEXT_TO_LAST; } - hwq->level = PBL_LVL_1; } } - hwq->pdev = pdev; - spin_lock_init(&hwq->lock); +done: hwq->prod = 0; hwq->cons = 0; - *elements = hwq->max_elements = slots; - hwq->element_size = size; - + hwq->pdev = pdev; + hwq->depth = hwq_attr->depth; + hwq->max_elements = depth; + hwq->element_size = stride; /* For direct access to the elements */ - hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr; - hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr; + lvl = hwq->level; + if (hwq_attr->sginfo->nopte && hwq->level) + lvl = hwq->level - 1; + hwq->pbl_ptr = hwq->pbl[lvl].pg_arr; + hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr; + spin_lock_init(&hwq->lock); return 0; - fail: - bnxt_qplib_free_hwq(pdev, hwq); + bnxt_qplib_free_hwq(res, hwq); return -ENOMEM; } /* Context Tables */ -void bnxt_qplib_free_ctx(struct pci_dev *pdev, +void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, struct bnxt_qplib_ctx *ctx) { int i; - bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl); - bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl); - bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl); - bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl); - bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl); + bnxt_qplib_free_hwq(res, &ctx->qpc_tbl); + bnxt_qplib_free_hwq(res, &ctx->mrw_tbl); + bnxt_qplib_free_hwq(res, &ctx->srqc_tbl); + bnxt_qplib_free_hwq(res, &ctx->cq_tbl); + bnxt_qplib_free_hwq(res, &ctx->tim_tbl); for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) - bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]); - bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde); - bnxt_qplib_free_stats_ctx(pdev, &ctx->stats); + bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]); + /* restore original pde level before destroy */ + ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level; + bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde); + bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats); +} + +static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res, + struct bnxt_qplib_ctx *ctx) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; + struct bnxt_qplib_tqm_ctx *tqmctx; + int rc = 0; + int i; + + tqmctx = &ctx->tqm_ctx; + + sginfo.pgsize = PAGE_SIZE; + sginfo.pgshft = PAGE_SHIFT; + hwq_attr.sginfo = &sginfo; + hwq_attr.res = res; + hwq_attr.type = HWQ_TYPE_CTX; + hwq_attr.depth = 512; + hwq_attr.stride = sizeof(u64); + /* Alloc pdl buffer */ + rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr); + if (rc) + goto out; + /* Save original pdl level */ + tqmctx->pde_level = tqmctx->pde.level; + + hwq_attr.stride = 1; + for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) { + if (!tqmctx->qcount[i]) + continue; + hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i]; + rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr); + if (rc) + goto out; + } +out: + return rc; +} + +static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx) +{ + struct bnxt_qplib_hwq *tbl; + dma_addr_t *dma_ptr; + __le64 **pbl_ptr, *ptr; + int i, j, k; + int fnz_idx = -1; + int pg_count; + + pbl_ptr = (__le64 **)ctx->pde.pbl_ptr; + + for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ; + i++, j += MAX_TQM_ALLOC_BLK_SIZE) { + tbl = &ctx->qtbl[i]; + if (!tbl->max_elements) + continue; + if (fnz_idx == -1) + fnz_idx = i; /* first non-zero index */ + switch (tbl->level) { + case PBL_LVL_2: + pg_count = tbl->pbl[PBL_LVL_1].pg_count; + for (k = 0; k < pg_count; k++) { + ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)]; + dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k]; + *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID); + } + break; + case PBL_LVL_1: + case PBL_LVL_0: + default: + ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)]; + *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] | + PTU_PTE_VALID); + break; + } + } + if (fnz_idx == -1) + fnz_idx = 0; + /* update pde level as per page table programming */ + ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 : + ctx->qtbl[fnz_idx].level + 1; +} + +static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res, + struct bnxt_qplib_ctx *ctx) +{ + int rc = 0; + + rc = bnxt_qplib_alloc_tqm_rings(res, ctx); + if (rc) + goto fail; + + bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx); +fail: + return rc; } /* @@ -335,120 +496,72 @@ void bnxt_qplib_free_ctx(struct pci_dev *pdev, * Returns: * 0 if success, else -ERRORS */ -int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, +int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, struct bnxt_qplib_ctx *ctx, bool virt_fn, bool is_p5) { - int i, j, k, rc = 0; - int fnz_idx = -1; - __le64 **pbl_ptr; + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; + int rc = 0; if (virt_fn || is_p5) goto stats_alloc; /* QPC Tables */ - ctx->qpc_tbl.max_elements = ctx->qpc_count; - rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, - &ctx->qpc_tbl.max_elements, - BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_CTX); + sginfo.pgsize = PAGE_SIZE; + sginfo.pgshft = PAGE_SHIFT; + hwq_attr.sginfo = &sginfo; + + hwq_attr.res = res; + hwq_attr.depth = ctx->qpc_count; + hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE; + hwq_attr.type = HWQ_TYPE_CTX; + rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr); if (rc) goto fail; /* MRW Tables */ - ctx->mrw_tbl.max_elements = ctx->mrw_count; - rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, - &ctx->mrw_tbl.max_elements, - BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_CTX); + hwq_attr.depth = ctx->mrw_count; + hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE; + rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr); if (rc) goto fail; /* SRQ Tables */ - ctx->srqc_tbl.max_elements = ctx->srqc_count; - rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, - &ctx->srqc_tbl.max_elements, - BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_CTX); + hwq_attr.depth = ctx->srqc_count; + hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE; + rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr); if (rc) goto fail; /* CQ Tables */ - ctx->cq_tbl.max_elements = ctx->cq_count; - rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, - &ctx->cq_tbl.max_elements, - BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_CTX); + hwq_attr.depth = ctx->cq_count; + hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE; + rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr); if (rc) goto fail; /* TQM Buffer */ - ctx->tqm_pde.max_elements = 512; - rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, - &ctx->tqm_pde.max_elements, sizeof(u64), - 0, PAGE_SIZE, HWQ_TYPE_CTX); + rc = bnxt_qplib_setup_tqm_rings(res, ctx); if (rc) goto fail; - - for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) { - if (!ctx->tqm_count[i]) - continue; - ctx->tqm_tbl[i].max_elements = ctx->qpc_count * - ctx->tqm_count[i]; - rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, - &ctx->tqm_tbl[i].max_elements, 1, - 0, PAGE_SIZE, HWQ_TYPE_CTX); - if (rc) - goto fail; - } - pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr; - for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ; - i++, j += MAX_TQM_ALLOC_BLK_SIZE) { - if (!ctx->tqm_tbl[i].max_elements) - continue; - if (fnz_idx == -1) - fnz_idx = i; - switch (ctx->tqm_tbl[i].level) { - case PBL_LVL_2: - for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count; - k++) - pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] = - cpu_to_le64( - ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k] - | PTU_PTE_VALID); - break; - case PBL_LVL_1: - case PBL_LVL_0: - default: - pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64( - ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] | - PTU_PTE_VALID); - break; - } - } - if (fnz_idx == -1) - fnz_idx = 0; - ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ? - PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1; - /* TIM Buffer */ ctx->tim_tbl.max_elements = ctx->qpc_count * 16; - rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, - &ctx->tim_tbl.max_elements, 1, - 0, PAGE_SIZE, HWQ_TYPE_CTX); + hwq_attr.depth = ctx->qpc_count * 16; + hwq_attr.stride = 1; + rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr); if (rc) goto fail; - stats_alloc: /* Stats */ - rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats); + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats); if (rc) goto fail; return 0; fail: - bnxt_qplib_free_ctx(pdev, ctx); + bnxt_qplib_free_ctx(res, ctx); return rc; } @@ -808,9 +921,6 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res) bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); bnxt_qplib_free_pd_tbl(&res->pd_tbl); bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); - - res->netdev = NULL; - res->pdev = NULL; } int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index aaa76d792185..fe8a6dd7aeb1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -55,7 +55,8 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; enum bnxt_qplib_hwq_type { HWQ_TYPE_CTX, HWQ_TYPE_QUEUE, - HWQ_TYPE_L2_CMPL + HWQ_TYPE_L2_CMPL, + HWQ_TYPE_MR }; #define MAX_PBL_LVL_0_PGS 1 @@ -63,6 +64,7 @@ enum bnxt_qplib_hwq_type { #define MAX_PBL_LVL_1_PGS_SHIFT 9 #define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256 #define MAX_PBL_LVL_2_PGS (256 * 512) +#define MAX_PDL_LVL_SHIFT 9 enum bnxt_qplib_pbl_lvl { PBL_LVL_0, @@ -85,17 +87,37 @@ struct bnxt_qplib_pbl { dma_addr_t *pg_map_arr; }; +struct bnxt_qplib_sg_info { + struct scatterlist *sghead; + u32 nmap; + u32 npages; + u32 pgshft; + u32 pgsize; + bool nopte; +}; + +struct bnxt_qplib_hwq_attr { + struct bnxt_qplib_res *res; + struct bnxt_qplib_sg_info *sginfo; + enum bnxt_qplib_hwq_type type; + u32 depth; + u32 stride; + u32 aux_stride; + u32 aux_depth; +}; + struct bnxt_qplib_hwq { struct pci_dev *pdev; /* lock to protect qplib_hwq */ spinlock_t lock; - struct bnxt_qplib_pbl pbl[PBL_LVL_MAX]; + struct bnxt_qplib_pbl pbl[PBL_LVL_MAX + 1]; enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */ /* ptr for easy access to the PBL entries */ void **pbl_ptr; /* ptr for easy access to the dma_addr */ dma_addr_t *pbl_dma_ptr; u32 max_elements; + u32 depth; u16 element_size; /* Size of each entry */ u32 prod; /* raw */ @@ -159,6 +181,15 @@ struct bnxt_qplib_vf_res { #define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64 #define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128 +#define MAX_TQM_ALLOC_REQ 48 +#define MAX_TQM_ALLOC_BLK_SIZE 8 +struct bnxt_qplib_tqm_ctx { + struct bnxt_qplib_hwq pde; + u8 pde_level; /* Original level */ + struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ]; + u8 qcount[MAX_TQM_ALLOC_REQ]; +}; + struct bnxt_qplib_ctx { u32 qpc_count; struct bnxt_qplib_hwq qpc_tbl; @@ -169,12 +200,7 @@ struct bnxt_qplib_ctx { u32 cq_count; struct bnxt_qplib_hwq cq_tbl; struct bnxt_qplib_hwq tim_tbl; -#define MAX_TQM_ALLOC_REQ 48 -#define MAX_TQM_ALLOC_BLK_SIZE 8 - u8 tqm_count[MAX_TQM_ALLOC_REQ]; - struct bnxt_qplib_hwq tqm_pde; - u32 tqm_pde_level; - struct bnxt_qplib_hwq tqm_tbl[MAX_TQM_ALLOC_REQ]; + struct bnxt_qplib_tqm_ctx tqm_ctx; struct bnxt_qplib_stats stats; struct bnxt_qplib_vf_res vf_res; u64 hwrm_intf_ver; @@ -223,11 +249,6 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx) RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL; } -struct bnxt_qplib_sg_info { - struct scatterlist *sglist; - u32 nmap; - u32 npages; -}; #define to_bnxt_qplib(ptr, type, member) \ container_of(ptr, type, member) @@ -235,11 +256,10 @@ struct bnxt_qplib_sg_info { struct bnxt_qplib_pd; struct bnxt_qplib_dev_attr; -void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq); -int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, - struct bnxt_qplib_sg_info *sg_info, u32 *elements, - u32 elements_per_page, u32 aux, u32 pg_size, - enum bnxt_qplib_hwq_type hwq_type); +void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, + struct bnxt_qplib_hwq *hwq); +int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, + struct bnxt_qplib_hwq_attr *hwq_attr); void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid); int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl, struct bnxt_qplib_pd *pd); @@ -258,9 +278,9 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res); int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, struct net_device *netdev, struct bnxt_qplib_dev_attr *dev_attr); -void bnxt_qplib_free_ctx(struct pci_dev *pdev, +void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, struct bnxt_qplib_ctx *ctx); -int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, +int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, struct bnxt_qplib_ctx *ctx, bool virt_fn, bool is_p5); #endif /* __BNXT_QPLIB_RES_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 40296b97d21e..66954ff6a2f2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -585,7 +585,7 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) /* Free the qplib's MRW memory */ if (mrw->hwq.max_elements) - bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); + bnxt_qplib_free_hwq(res, &mrw->hwq); return 0; } @@ -646,7 +646,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, if (mrw->hwq.max_elements) { mrw->va = 0; mrw->total_size = 0; - bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); + bnxt_qplib_free_hwq(res, &mrw->hwq); } return 0; @@ -656,10 +656,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct cmdq_register_mr req; + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; struct creq_register_mr_resp resp; - u16 cmd_flags = 0, level; + struct cmdq_register_mr req; int pg_ptrs, pages, i, rc; + u16 cmd_flags = 0, level; dma_addr_t **pbl_ptr; u32 pg_size; @@ -674,20 +676,23 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, if (pages > MAX_PBL_LVL_1_PGS) { dev_err(&res->pdev->dev, - "SP: Reg MR pages requested (0x%x) exceeded max (0x%x)\n", + "SP: Reg MR: pages requested (0x%x) exceeded max (0x%x)\n", pages, MAX_PBL_LVL_1_PGS); return -ENOMEM; } /* Free the hwq if it already exist, must be a rereg */ if (mr->hwq.max_elements) - bnxt_qplib_free_hwq(res->pdev, &mr->hwq); - - mr->hwq.max_elements = pages; + bnxt_qplib_free_hwq(res, &mr->hwq); /* Use system PAGE_SIZE */ - rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, - &mr->hwq.max_elements, - PAGE_SIZE, 0, PAGE_SIZE, - HWQ_TYPE_CTX); + hwq_attr.res = res; + hwq_attr.depth = pages; + hwq_attr.stride = PAGE_SIZE; + hwq_attr.type = HWQ_TYPE_MR; + hwq_attr.sginfo = &sginfo; + hwq_attr.sginfo->npages = pages; + hwq_attr.sginfo->pgsize = PAGE_SIZE; + hwq_attr.sginfo->pgshft = PAGE_SHIFT; + rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); if (rc) { dev_err(&res->pdev->dev, "SP: Reg MR memory allocation failed\n"); @@ -734,7 +739,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, fail: if (mr->hwq.max_elements) - bnxt_qplib_free_hwq(res->pdev, &mr->hwq); + bnxt_qplib_free_hwq(res, &mr->hwq); return rc; } @@ -742,6 +747,8 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, struct bnxt_qplib_frpl *frpl, int max_pg_ptrs) { + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; int pg_ptrs, pages, rc; /* Re-calculate the max to fit the HWQ allocation model */ @@ -753,10 +760,15 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, if (pages > MAX_PBL_LVL_1_PGS) return -ENOMEM; - frpl->hwq.max_elements = pages; - rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL, - &frpl->hwq.max_elements, PAGE_SIZE, 0, - PAGE_SIZE, HWQ_TYPE_CTX); + sginfo.pgsize = PAGE_SIZE; + sginfo.nopte = true; + + hwq_attr.res = res; + hwq_attr.depth = pg_ptrs; + hwq_attr.stride = PAGE_SIZE; + hwq_attr.sginfo = &sginfo; + hwq_attr.type = HWQ_TYPE_CTX; + rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr); if (!rc) frpl->max_pg_ptrs = pg_ptrs; @@ -766,7 +778,7 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res, struct bnxt_qplib_frpl *frpl) { - bnxt_qplib_free_hwq(res->pdev, &frpl->hwq); + bnxt_qplib_free_hwq(res, &frpl->hwq); return 0; } -- cgit v1.2.3-58-ga151 From b08fe048a69d8cb4c497b91c965fff754d3369f9 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Sat, 15 Feb 2020 12:11:01 -0500 Subject: RDMA/bnxt_re: Refactor net ring allocation function Introducing a new attribute structure to reduce the long list of arguments passed in bnxt_re_net_ring_alloc() function. The caller of bnxt_re_net_ring_alloc should fill in the list of attributes in bnxt_re_ring_attr structure and then pass the pointer to the function. Link: https://lore.kernel.org/r/1581786665-23705-5-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Naresh Kumar PBS Signed-off-by: Selvin Xavier Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/bnxt_re.h | 9 +++++ drivers/infiniband/hw/bnxt_re/main.c | 64 ++++++++++++++++++--------------- 2 files changed, 44 insertions(+), 29 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 86274f4c8225..c736e8254e75 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -89,6 +89,15 @@ #define BNXT_RE_DEFAULT_ACK_DELAY 16 +struct bnxt_re_ring_attr { + dma_addr_t *dma_arr; + int pages; + int type; + u32 depth; + u32 lrid; /* Logical ring id */ + u8 mode; +}; + struct bnxt_re_work { struct work_struct work; unsigned long event; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index a966c68dbc59..f013bf687384 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -427,9 +427,9 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, return rc; } -static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, - int pages, int type, u32 ring_mask, - u32 map_index, u16 *fw_ring_id) +static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, + struct bnxt_re_ring_attr *ring_attr, + u16 *fw_ring_id) { struct bnxt_en_dev *en_dev = rdev->en_dev; struct hwrm_ring_alloc_input req = {0}; @@ -443,18 +443,18 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, memset(&fw_msg, 0, sizeof(fw_msg)); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); req.enables = 0; - req.page_tbl_addr = cpu_to_le64(dma_arr[0]); - if (pages > 1) { + req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]); + if (ring_attr->pages > 1) { /* Page size is in log2 units */ req.page_size = BNXT_PAGE_SHIFT; req.page_tbl_depth = 1; } req.fbo = 0; /* Association of ring index with doorbell index and MSIX number */ - req.logical_id = cpu_to_le16(map_index); - req.length = cpu_to_le32(ring_mask + 1); - req.ring_type = type; - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req.logical_id = cpu_to_le16(ring_attr->lrid); + req.length = cpu_to_le32(ring_attr->depth + 1); + req.ring_type = ring_attr->type; + req.int_mode = ring_attr->mode; bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); @@ -1006,10 +1006,10 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev) static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) { + struct bnxt_re_ring_attr rattr = {}; + struct bnxt_qplib_ctx *qplib_ctx; int num_vec_created = 0; - dma_addr_t *pg_map; int rc = 0, i; - int pages; u8 type; /* Configure and allocate resources for qplib */ @@ -1030,10 +1030,13 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) if (rc) goto dealloc_res; + qplib_ctx = &rdev->qplib_ctx; for (i = 0; i < rdev->num_msix - 1; i++) { - rdev->nq[i].res = &rdev->qplib_res; - rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT + - BNXT_RE_MAX_SRQC_COUNT + 2; + struct bnxt_qplib_nq *nq; + + nq = &rdev->nq[i]; + nq->hwq.max_elements = (qplib_ctx->cq_count + + qplib_ctx->srqc_count + 2); rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); if (rc) { dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x", @@ -1041,12 +1044,13 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) goto free_nq; } type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr; - pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count; - rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type, - BNXT_QPLIB_NQE_MAX_CNT - 1, - rdev->msix_entries[i + 1].ring_idx, - &rdev->nq[i].ring_id); + rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; + rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; + rattr.type = type; + rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; + rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1; + rattr.lrid = rdev->msix_entries[i + 1].ring_idx; + rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to allocate NQ fw id with rc = 0x%x", @@ -1371,10 +1375,10 @@ static void bnxt_re_worker(struct work_struct *work) static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) { - dma_addr_t *pg_map; - u32 db_offt, ridx; - int pages, vid; + struct bnxt_re_ring_attr rattr; + u32 db_offt; bool locked; + int vid; u8 type; int rc; @@ -1383,6 +1387,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) locked = true; /* Registered a new RoCE device instance to netdev */ + memset(&rattr, 0, sizeof(rattr)); rc = bnxt_re_register_netdev(rdev); if (rc) { rtnl_unlock(); @@ -1422,12 +1427,13 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) } type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr; - pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count; - ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; - rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type, - BNXT_QPLIB_CREQE_MAX_CNT - 1, - ridx, &rdev->rcfw.creq_ring_id); + rattr.dma_arr = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr; + rattr.pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count; + rattr.type = type; + rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; + rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; + rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; + rc = bnxt_re_net_ring_alloc(rdev, &rattr, &rdev->rcfw.creq_ring_id); if (rc) { pr_err("Failed to allocate CREQ: %#x\n", rc); goto free_rcfw; -- cgit v1.2.3-58-ga151 From cee0c7bba4869170fd471758053406784eba35a5 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Sat, 15 Feb 2020 12:11:02 -0500 Subject: RDMA/bnxt_re: Refactor command queue management code Refactoring the command queue (rcfw) management code. A new data-structure is introduced to describe the bar register. each object which deals with mmio space should have a descriptor structure. This structure specifically hold DB register information. Thus, slow path creq structure now hold a bar register descriptor. Further cleanup the rcfw structure to introduce the command queue context and command response event queue context structures. Rest of the rcfw related code has been touched to incorporate these three structures. Link: https://lore.kernel.org/r/1581786665-23705-6-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Naresh Kumar PBS Signed-off-by: Selvin Xavier Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/main.c | 12 +- drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 419 +++++++++++++++++------------ drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 80 +++--- drivers/infiniband/hw/bnxt_re/qplib_res.h | 7 + 4 files changed, 313 insertions(+), 205 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index f013bf687384..65f106f84c33 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1344,7 +1344,7 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type); + bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); bnxt_qplib_free_rcfw_channel(&rdev->rcfw); } if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { @@ -1375,6 +1375,7 @@ static void bnxt_re_worker(struct work_struct *work) static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) { + struct bnxt_qplib_creq_ctx *creq; struct bnxt_re_ring_attr rattr; u32 db_offt; bool locked; @@ -1427,13 +1428,14 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) } type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - rattr.dma_arr = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr; - rattr.pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count; + creq = &rdev->rcfw.creq; + rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; + rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; rattr.type = type; rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; - rc = bnxt_re_net_ring_alloc(rdev, &rattr, &rdev->rcfw.creq_ring_id); + rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); if (rc) { pr_err("Failed to allocate CREQ: %#x\n", rc); goto free_rcfw; @@ -1527,7 +1529,7 @@ disable_rcfw: bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); free_ring: type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type); + bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); free_rcfw: bnxt_qplib_free_rcfw_channel(&rdev->rcfw); fail: diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index fada81c300ba..119113ecfb64 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -55,12 +55,14 @@ static void bnxt_qplib_service_creq(unsigned long data); /* Hardware communication channel */ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { + struct bnxt_qplib_cmdq_ctx *cmdq; u16 cbit; int rc; + cmdq = &rcfw->cmdq; cbit = cookie % rcfw->cmdq_depth; - rc = wait_event_timeout(rcfw->waitq, - !test_bit(cbit, rcfw->cmdq_bitmap), + rc = wait_event_timeout(cmdq->waitq, + !test_bit(cbit, cmdq->cmdq_bitmap), msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); return rc ? 0 : -ETIMEDOUT; }; @@ -68,15 +70,17 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; + struct bnxt_qplib_cmdq_ctx *cmdq; u16 cbit; + cmdq = &rcfw->cmdq; cbit = cookie % rcfw->cmdq_depth; - if (!test_bit(cbit, rcfw->cmdq_bitmap)) + if (!test_bit(cbit, cmdq->cmdq_bitmap)) goto done; do { mdelay(1); /* 1m sec */ bnxt_qplib_service_creq((unsigned long)rcfw); - } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); + } while (test_bit(cbit, cmdq->cmdq_bitmap) && --count); done: return count ? 0 : -ETIMEDOUT; }; @@ -84,56 +88,61 @@ done: static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, struct creq_base *resp, void *sb, u8 is_block) { - struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; - struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; + struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; + struct bnxt_qplib_cmdqe *cmdqe, **hwq_ptr; + struct bnxt_qplib_hwq *hwq = &cmdq->hwq; + struct bnxt_qplib_crsqe *crsqe; u32 cmdq_depth = rcfw->cmdq_depth; - struct bnxt_qplib_crsq *crsqe; u32 sw_prod, cmdq_prod; + struct pci_dev *pdev; unsigned long flags; u32 size, opcode; u16 cookie, cbit; + int pg, idx; u8 *preq; + pdev = rcfw->pdev; + opcode = req->opcode; - if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && + if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) && (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW && opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) { - dev_err(&rcfw->pdev->dev, + dev_err(&pdev->dev, "RCFW not initialized, reject opcode 0x%x\n", opcode); return -EINVAL; } - if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && + if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) && opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { - dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n"); + dev_err(&pdev->dev, "RCFW already initialized!\n"); return -EINVAL; } - if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) + if (test_bit(FIRMWARE_TIMED_OUT, &cmdq->flags)) return -ETIMEDOUT; /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ - spin_lock_irqsave(&cmdq->lock, flags); - if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { - dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n"); - spin_unlock_irqrestore(&cmdq->lock, flags); + spin_lock_irqsave(&hwq->lock, flags); + if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) { + dev_err(&pdev->dev, "RCFW: CMDQ is full!\n"); + spin_unlock_irqrestore(&hwq->lock, flags); return -EAGAIN; } - cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; + cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE; cbit = cookie % rcfw->cmdq_depth; if (is_block) cookie |= RCFW_CMD_IS_BLOCKING; - set_bit(cbit, rcfw->cmdq_bitmap); + set_bit(cbit, cmdq->cmdq_bitmap); req->cookie = cpu_to_le16(cookie); crsqe = &rcfw->crsqe_tbl[cbit]; if (crsqe->resp) { - spin_unlock_irqrestore(&cmdq->lock, flags); + spin_unlock_irqrestore(&hwq->lock, flags); return -EBUSY; } @@ -155,15 +164,18 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, BNXT_QPLIB_CMDQE_UNITS; } - cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; + hwq_ptr = (struct bnxt_qplib_cmdqe **)hwq->pbl_ptr; preq = (u8 *)req; do { + pg = 0; + idx = 0; + /* Locate the next cmdq slot */ - sw_prod = HWQ_CMP(cmdq->prod, cmdq); - cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)] + sw_prod = HWQ_CMP(hwq->prod, hwq); + cmdqe = &hwq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)] [get_cmdq_idx(sw_prod, cmdq_depth)]; if (!cmdqe) { - dev_err(&rcfw->pdev->dev, + dev_err(&pdev->dev, "RCFW request failed with no cmdqe!\n"); goto done; } @@ -172,31 +184,27 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe))); preq += min_t(u32, size, sizeof(*cmdqe)); size -= min_t(u32, size, sizeof(*cmdqe)); - cmdq->prod++; - rcfw->seq_num++; + hwq->prod++; } while (size > 0); + cmdq->seq_num++; - rcfw->seq_num++; - - cmdq_prod = cmdq->prod; - if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) { + cmdq_prod = hwq->prod; + if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) { /* The very first doorbell write * is required to set this flag * which prompts the FW to reset * its internal pointers */ cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG); - clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); + clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags); } /* ring CMDQ DB */ wmb(); - writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + - rcfw->cmdq_bar_reg_prod_off); - writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + - rcfw->cmdq_bar_reg_trig_off); + writel(cmdq_prod, cmdq->cmdq_mbox.prod); + writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db); done: - spin_unlock_irqrestore(&cmdq->lock, flags); + spin_unlock_irqrestore(&hwq->lock, flags); /* Return the CREQ response pointer */ return 0; } @@ -236,7 +244,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, /* timed out */ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n", cookie, opcode, RCFW_CMD_WAIT_TIME_MS); - set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); + set_bit(FIRMWARE_TIMED_OUT, &rcfw->cmdq.flags); return rc; } @@ -253,6 +261,8 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, struct creq_func_event *func_event) { + int rc; + switch (func_event->event) { case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: break; @@ -286,37 +296,41 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, default: return -EINVAL; } - return 0; + + rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL); + return rc; } static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, struct creq_qp_event *qp_event) { - struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; struct creq_qp_error_notification *err_event; - struct bnxt_qplib_crsq *crsqe; - unsigned long flags; + struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq; + struct bnxt_qplib_crsqe *crsqe; struct bnxt_qplib_qp *qp; u16 cbit, blocked = 0; - u16 cookie; + struct pci_dev *pdev; + unsigned long flags; __le16 mcookie; + u16 cookie; + int rc = 0; u32 qp_id; + pdev = rcfw->pdev; switch (qp_event->event) { case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: err_event = (struct creq_qp_error_notification *)qp_event; qp_id = le32_to_cpu(err_event->xid); qp = rcfw->qp_tbl[qp_id].qp_handle; - dev_dbg(&rcfw->pdev->dev, - "Received QP error notification\n"); - dev_dbg(&rcfw->pdev->dev, + dev_dbg(&pdev->dev, "Received QP error notification\n"); + dev_dbg(&pdev->dev, "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", qp_id, err_event->req_err_state_reason, err_event->res_err_state_reason); if (!qp) break; bnxt_qplib_mark_qp_error(qp); - rcfw->aeq_handler(rcfw, qp_event, qp); + rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp); break; default: /* @@ -328,7 +342,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, * */ - spin_lock_irqsave_nested(&cmdq->lock, flags, + spin_lock_irqsave_nested(&hwq->lock, flags, SINGLE_DEPTH_NESTING); cookie = le16_to_cpu(qp_event->cookie); mcookie = qp_event->cookie; @@ -342,23 +356,23 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, crsqe->resp = NULL; } else { if (crsqe->resp && crsqe->resp->cookie) - dev_err(&rcfw->pdev->dev, + dev_err(&pdev->dev, "CMD %s cookie sent=%#x, recd=%#x\n", crsqe->resp ? "mismatch" : "collision", crsqe->resp ? crsqe->resp->cookie : 0, mcookie); } - if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) - dev_warn(&rcfw->pdev->dev, + if (!test_and_clear_bit(cbit, rcfw->cmdq.cmdq_bitmap)) + dev_warn(&pdev->dev, "CMD bit %d was not requested\n", cbit); - cmdq->cons += crsqe->req_size; + hwq->cons += crsqe->req_size; crsqe->req_size = 0; if (!blocked) - wake_up(&rcfw->waitq); - spin_unlock_irqrestore(&cmdq->lock, flags); + wake_up(&rcfw->cmdq.waitq); + spin_unlock_irqrestore(&hwq->lock, flags); } - return 0; + return rc; } /* SP - CREQ Completion handlers */ @@ -366,20 +380,21 @@ static void bnxt_qplib_service_creq(unsigned long data) { struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx); - struct bnxt_qplib_hwq *creq = &rcfw->creq; + struct bnxt_qplib_creq_ctx *creq = &rcfw->creq; u32 type, budget = CREQ_ENTRY_POLL_BUDGET; - struct creq_base *creqe, **creq_ptr; + struct bnxt_qplib_hwq *hwq = &creq->hwq; + struct creq_base *creqe, **hwq_ptr; u32 sw_cons, raw_cons; unsigned long flags; /* Service the CREQ until budget is over */ - spin_lock_irqsave(&creq->lock, flags); - raw_cons = creq->cons; + spin_lock_irqsave(&hwq->lock, flags); + raw_cons = hwq->cons; while (budget > 0) { - sw_cons = HWQ_CMP(raw_cons, creq); - creq_ptr = (struct creq_base **)creq->pbl_ptr; - creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; - if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements)) + sw_cons = HWQ_CMP(raw_cons, hwq); + hwq_ptr = (struct creq_base **)hwq->pbl_ptr; + creqe = &hwq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; + if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements)) break; /* The valid test of the entry must be done first before * reading any further. @@ -391,12 +406,12 @@ static void bnxt_qplib_service_creq(unsigned long data) case CREQ_BASE_TYPE_QP_EVENT: bnxt_qplib_process_qp_event (rcfw, (struct creq_qp_event *)creqe); - rcfw->creq_qp_event_processed++; + creq->stats.creq_qp_event_processed++; break; case CREQ_BASE_TYPE_FUNC_EVENT: if (!bnxt_qplib_process_func_event (rcfw, (struct creq_func_event *)creqe)) - rcfw->creq_func_event_processed++; + creq->stats.creq_func_event_processed++; else dev_warn(&rcfw->pdev->dev, "aeqe:%#x Not handled\n", type); @@ -412,28 +427,31 @@ static void bnxt_qplib_service_creq(unsigned long data) budget--; } - if (creq->cons != raw_cons) { - creq->cons = raw_cons; - bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem, - raw_cons, creq->max_elements, - rcfw->creq_ring_id, gen_p5); + if (hwq->cons != raw_cons) { + hwq->cons = raw_cons; + bnxt_qplib_ring_creq_db_rearm(creq->creq_db.db, + raw_cons, hwq->max_elements, + creq->ring_id, gen_p5); } - spin_unlock_irqrestore(&creq->lock, flags); + spin_unlock_irqrestore(&hwq->lock, flags); } static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) { struct bnxt_qplib_rcfw *rcfw = dev_instance; - struct bnxt_qplib_hwq *creq = &rcfw->creq; + struct bnxt_qplib_creq_ctx *creq; struct creq_base **creq_ptr; + struct bnxt_qplib_hwq *hwq; u32 sw_cons; + creq = &rcfw->creq; + hwq = &creq->hwq; /* Prefetch the CREQ element */ - sw_cons = HWQ_CMP(creq->cons, creq); - creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr; + sw_cons = HWQ_CMP(hwq->cons, hwq); + creq_ptr = (struct creq_base **)creq->hwq.pbl_ptr; prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]); - tasklet_schedule(&rcfw->worker); + tasklet_schedule(&creq->creq_tasklet); return IRQ_HANDLED; } @@ -452,7 +470,7 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) if (rc) return rc; - clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); + clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags); return 0; } @@ -556,16 +574,17 @@ skip_ctx_setup: NULL, 0); if (rc) return rc; - set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); + set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags); return 0; } void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { + kfree(rcfw->cmdq.cmdq_bitmap); kfree(rcfw->qp_tbl); kfree(rcfw->crsqe_tbl); - bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq); - bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq); + bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq); + bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq); rcfw->pdev = NULL; } @@ -576,8 +595,13 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, { struct bnxt_qplib_hwq_attr hwq_attr = {}; struct bnxt_qplib_sg_info sginfo = {}; + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_creq_ctx *creq; + u32 bmap_size = 0; rcfw->pdev = res->pdev; + cmdq = &rcfw->cmdq; + creq = &rcfw->creq; rcfw->res = res; sginfo.pgsize = PAGE_SIZE; @@ -589,7 +613,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS; hwq_attr.type = bnxt_qplib_get_hwq_type(res); - if (bnxt_qplib_alloc_init_hwq(&rcfw->creq, &hwq_attr)) { + if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) { dev_err(&rcfw->pdev->dev, "HW channel CREQ allocation failed\n"); goto fail; @@ -603,17 +627,24 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, hwq_attr.depth = rcfw->cmdq_depth; hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS; hwq_attr.type = HWQ_TYPE_CTX; - if (bnxt_qplib_alloc_init_hwq(&rcfw->cmdq, &hwq_attr)) { + if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) { dev_err(&rcfw->pdev->dev, "HW channel CMDQ allocation failed\n"); goto fail; } - rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, + rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements, sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); if (!rcfw->crsqe_tbl) goto fail; + bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long); + cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); + if (!cmdq->cmdq_bitmap) + goto fail; + + cmdq->bmap_size = bmap_size; + rcfw->qp_tbl_size = qp_tbl_sz; rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), GFP_KERNEL); @@ -630,137 +661,201 @@ fail: void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) { bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx); + struct bnxt_qplib_creq_ctx *creq; - tasklet_disable(&rcfw->worker); + creq = &rcfw->creq; + tasklet_disable(&creq->creq_tasklet); /* Mask h/w interrupts */ - bnxt_qplib_ring_creq_db(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, - rcfw->creq.max_elements, rcfw->creq_ring_id, + bnxt_qplib_ring_creq_db(creq->creq_db.db, creq->hwq.cons, + creq->hwq.max_elements, creq->ring_id, gen_p5); /* Sync with last running IRQ-handler */ - synchronize_irq(rcfw->vector); + synchronize_irq(creq->msix_vec); if (kill) - tasklet_kill(&rcfw->worker); + tasklet_kill(&creq->creq_tasklet); - if (rcfw->requested) { - free_irq(rcfw->vector, rcfw); - rcfw->requested = false; + if (creq->requested) { + free_irq(creq->msix_vec, rcfw); + creq->requested = false; } } void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { + struct bnxt_qplib_creq_ctx *creq; + struct bnxt_qplib_cmdq_ctx *cmdq; unsigned long indx; + creq = &rcfw->creq; + cmdq = &rcfw->cmdq; + /* Make sure the HW channel is stopped! */ bnxt_qplib_rcfw_stop_irq(rcfw, true); - iounmap(rcfw->cmdq_bar_reg_iomem); - iounmap(rcfw->creq_bar_reg_iomem); + iounmap(cmdq->cmdq_mbox.reg.bar_reg); + iounmap(creq->creq_db.reg.bar_reg); - indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size); - if (indx != rcfw->bmap_size) + indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size); + if (indx != cmdq->bmap_size) dev_err(&rcfw->pdev->dev, "disabling RCFW with pending cmd-bit %lx\n", indx); - kfree(rcfw->cmdq_bitmap); - rcfw->bmap_size = 0; - rcfw->cmdq_bar_reg_iomem = NULL; - rcfw->creq_bar_reg_iomem = NULL; - rcfw->aeq_handler = NULL; - rcfw->vector = 0; + cmdq->cmdq_mbox.reg.bar_reg = NULL; + creq->creq_db.reg.bar_reg = NULL; + creq->aeq_handler = NULL; + creq->msix_vec = 0; } int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, bool need_init) { bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx); + struct bnxt_qplib_creq_ctx *creq; int rc; - if (rcfw->requested) + creq = &rcfw->creq; + + if (creq->requested) return -EFAULT; - rcfw->vector = msix_vector; + creq->msix_vec = msix_vector; if (need_init) - tasklet_init(&rcfw->worker, + tasklet_init(&creq->creq_tasklet, bnxt_qplib_service_creq, (unsigned long)rcfw); else - tasklet_enable(&rcfw->worker); - rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, + tasklet_enable(&creq->creq_tasklet); + rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0, "bnxt_qplib_creq", rcfw); if (rc) return rc; - rcfw->requested = true; - bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem, - rcfw->creq.cons, rcfw->creq.max_elements, - rcfw->creq_ring_id, gen_p5); + creq->requested = true; + bnxt_qplib_ring_creq_db_rearm(creq->creq_db.db, + creq->hwq.cons, creq->hwq.max_elements, + creq->ring_id, gen_p5); return 0; } -int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw, - int msix_vector, - int cp_bar_reg_off, int virt_fn, - int (*aeq_handler)(struct bnxt_qplib_rcfw *, - void *, void *)) +static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw, bool is_vf) { - resource_size_t res_base; - struct cmdq_init init; + struct bnxt_qplib_cmdq_mbox *mbox; + resource_size_t bar_reg; struct pci_dev *pdev; - u16 bmap_size; - int rc; + u16 prod_offt; + int rc = 0; - /* General */ pdev = rcfw->pdev; - rcfw->seq_num = 0; - set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); - bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long); - rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); - if (!rcfw->cmdq_bitmap) - return -ENOMEM; - rcfw->bmap_size = bmap_size; - - /* CMDQ */ - rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION; - res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg); - if (!res_base) + mbox = &rcfw->cmdq.cmdq_mbox; + + mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION; + mbox->reg.len = RCFW_COMM_SIZE; + mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id); + if (!mbox->reg.bar_base) { + dev_err(&pdev->dev, + "QPLIB: CMDQ BAR region %d resc start is 0!\n", + mbox->reg.bar_id); return -ENOMEM; + } - rcfw->cmdq_bar_reg_iomem = ioremap(res_base + - RCFW_COMM_BASE_OFFSET, - RCFW_COMM_SIZE); - if (!rcfw->cmdq_bar_reg_iomem) { - dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n", - rcfw->cmdq_bar_reg); + bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET; + mbox->reg.len = RCFW_COMM_SIZE; + mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len); + if (!mbox->reg.bar_reg) { + dev_err(&pdev->dev, + "QPLIB: CMDQ BAR region %d mapping failed\n", + mbox->reg.bar_id); return -ENOMEM; } - rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET : - RCFW_PF_COMM_PROD_OFFSET; + prod_offt = is_vf ? RCFW_VF_COMM_PROD_OFFSET : + RCFW_PF_COMM_PROD_OFFSET; + mbox->prod = (void __iomem *)(mbox->reg.bar_reg + prod_offt); + mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET); + return rc; +} - rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; +static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt) +{ + struct bnxt_qplib_creq_db *creq_db; + resource_size_t bar_reg; + struct pci_dev *pdev; - /* CREQ */ - rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; - res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); - if (!res_base) - dev_err(&rcfw->pdev->dev, - "CREQ BAR region %d resc start is 0!\n", - rcfw->creq_bar_reg); + pdev = rcfw->pdev; + creq_db = &rcfw->creq.creq_db; + + creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION; + creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id); + if (!creq_db->reg.bar_id) + dev_err(&pdev->dev, + "QPLIB: CREQ BAR region %d resc start is 0!", + creq_db->reg.bar_id); + + bar_reg = creq_db->reg.bar_base + reg_offt; /* Unconditionally map 8 bytes to support 57500 series */ - rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off, - 8); - if (!rcfw->creq_bar_reg_iomem) { - dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n", - rcfw->creq_bar_reg); - iounmap(rcfw->cmdq_bar_reg_iomem); - rcfw->cmdq_bar_reg_iomem = NULL; + creq_db->reg.len = 8; + creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len); + if (!creq_db->reg.bar_reg) { + dev_err(&pdev->dev, + "QPLIB: CREQ BAR region %d mapping failed", + creq_db->reg.bar_id); return -ENOMEM; } - rcfw->creq_qp_event_processed = 0; - rcfw->creq_func_event_processed = 0; + creq_db->db = creq_db->reg.bar_reg; + return 0; +} + +static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw) +{ + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_creq_ctx *creq; + struct bnxt_qplib_cmdq_mbox *mbox; + struct cmdq_init init = {0}; + + cmdq = &rcfw->cmdq; + creq = &rcfw->creq; + mbox = &cmdq->cmdq_mbox; + + init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); + init.cmdq_size_cmdq_lvl = + cpu_to_le16(((rcfw->cmdq_depth << + CMDQ_INIT_CMDQ_SIZE_SFT) & + CMDQ_INIT_CMDQ_SIZE_MASK) | + ((cmdq->hwq.level << + CMDQ_INIT_CMDQ_LVL_SFT) & + CMDQ_INIT_CMDQ_LVL_MASK)); + init.creq_ring_id = cpu_to_le16(creq->ring_id); + /* Write to the Bono mailbox register */ + __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4); +} - if (aeq_handler) - rcfw->aeq_handler = aeq_handler; - init_waitqueue_head(&rcfw->waitq); +int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw, + int msix_vector, + int cp_bar_reg_off, int virt_fn, + aeq_handler_t aeq_handler) +{ + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_creq_ctx *creq; + int rc; + + cmdq = &rcfw->cmdq; + creq = &rcfw->creq; + + /* Clear to defaults */ + + cmdq->seq_num = 0; + set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags); + init_waitqueue_head(&cmdq->waitq); + + creq->stats.creq_qp_event_processed = 0; + creq->stats.creq_func_event_processed = 0; + creq->aeq_handler = aeq_handler; + + rc = bnxt_qplib_map_cmdq_mbox(rcfw, virt_fn); + if (rc) + return rc; + + rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off); + if (rc) + return rc; rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); if (rc) { @@ -770,16 +865,8 @@ int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw, return rc; } - init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); - init.cmdq_size_cmdq_lvl = cpu_to_le16( - ((rcfw->cmdq_depth << CMDQ_INIT_CMDQ_SIZE_SFT) & - CMDQ_INIT_CMDQ_SIZE_MASK) | - ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) & - CMDQ_INIT_CMDQ_LVL_MASK)); - init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id); + bnxt_qplib_start_rcfw(rcfw); - /* Write to the Bono mailbox register */ - __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index ab1531c7e27f..1aff6d458ac5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -206,8 +206,9 @@ static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons, #define CREQ_ENTRY_POLL_BUDGET 0x100 /* HWQ */ +typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *); -struct bnxt_qplib_crsq { +struct bnxt_qplib_crsqe { struct creq_qp_event *resp; u32 req_size; }; @@ -225,41 +226,53 @@ struct bnxt_qplib_qp_node { #define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF +#define FIRMWARE_INITIALIZED_FLAG (0) +#define FIRMWARE_FIRST_FLAG (31) +#define FIRMWARE_TIMED_OUT (3) +struct bnxt_qplib_cmdq_mbox { + struct bnxt_qplib_reg_desc reg; + void __iomem *prod; + void __iomem *db; +}; + +struct bnxt_qplib_cmdq_ctx { + struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_cmdq_mbox cmdq_mbox; + wait_queue_head_t waitq; + unsigned long flags; + unsigned long *cmdq_bitmap; + u32 bmap_size; + u32 seq_num; +}; + +struct bnxt_qplib_creq_db { + struct bnxt_qplib_reg_desc reg; + void __iomem *db; +}; + +struct bnxt_qplib_creq_stat { + u64 creq_qp_event_processed; + u64 creq_func_event_processed; +}; + +struct bnxt_qplib_creq_ctx { + struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_creq_db creq_db; + struct bnxt_qplib_creq_stat stats; + struct tasklet_struct creq_tasklet; + aeq_handler_t aeq_handler; + u16 ring_id; + int msix_vec; + bool requested; /*irq handler installed */ +}; + /* RCFW Communication Channels */ struct bnxt_qplib_rcfw { struct pci_dev *pdev; struct bnxt_qplib_res *res; - int vector; - struct tasklet_struct worker; - bool requested; - unsigned long *cmdq_bitmap; - u32 bmap_size; - unsigned long flags; -#define FIRMWARE_INITIALIZED_FLAG 0 -#define FIRMWARE_FIRST_FLAG 31 -#define FIRMWARE_TIMED_OUT 3 - wait_queue_head_t waitq; - int (*aeq_handler)(struct bnxt_qplib_rcfw *, - void *, void *); - u32 seq_num; - - /* Bar region info */ - void __iomem *cmdq_bar_reg_iomem; - u16 cmdq_bar_reg; - u16 cmdq_bar_reg_prod_off; - u16 cmdq_bar_reg_trig_off; - u16 creq_ring_id; - u16 creq_bar_reg; - void __iomem *creq_bar_reg_iomem; - - /* Cmd-Resp and Async Event notification queue */ - struct bnxt_qplib_hwq creq; - u64 creq_qp_event_processed; - u64 creq_func_event_processed; - - /* Actual Cmd and Resp Queues */ - struct bnxt_qplib_hwq cmdq; - struct bnxt_qplib_crsq *crsqe_tbl; + struct bnxt_qplib_cmdq_ctx cmdq; + struct bnxt_qplib_creq_ctx creq; + struct bnxt_qplib_crsqe *crsqe_tbl; int qp_tbl_size; struct bnxt_qplib_qp_node *qp_tbl; u64 oos_prev; @@ -279,8 +292,7 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw, int msix_vector, int cp_bar_reg_off, int virt_fn, - int (*aeq_handler)(struct bnxt_qplib_rcfw *, - void *aeqe, void *obj)); + aeq_handler_t aeq_handler); struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( struct bnxt_qplib_rcfw *rcfw, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index fe8a6dd7aeb1..5fa278e744eb 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -80,6 +80,13 @@ enum bnxt_qplib_pbl_lvl { #define ROCE_PG_SIZE_8M (8 * 1024 * 1024) #define ROCE_PG_SIZE_1G (1024 * 1024 * 1024) +struct bnxt_qplib_reg_desc { + u8 bar_id; + resource_size_t bar_base; + void __iomem *bar_reg; + size_t len; +}; + struct bnxt_qplib_pbl { u32 pg_count; u32 pg_size; -- cgit v1.2.3-58-ga151 From 9555352bacfdfc68a972d5bf8a08849183b9e607 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Sat, 15 Feb 2020 12:11:03 -0500 Subject: RDMA/bnxt_re: Refactor notification queue management code Cleaning up the notification queue data structures and management code. The CQ and SRQ event handlers have been type defined instead of in-place declaration. NQ doorbell register descriptor has been added in base NQ structure. The nq->vector has been renamed to nq->msix_vec. Link: https://lore.kernel.org/r/1581786665-23705-7-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 114 ++++++++++++++++++------------- drivers/infiniband/hw/bnxt_re/qplib_fp.h | 54 +++++++-------- 2 files changed, 94 insertions(+), 74 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index e7fe86e61fbb..33272e58363c 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -236,16 +236,16 @@ fail: static void bnxt_qplib_service_nq(unsigned long data) { struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data; + bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx); struct bnxt_qplib_hwq *hwq = &nq->hwq; struct nq_base *nqe, **nq_ptr; struct bnxt_qplib_cq *cq; int num_cqne_processed = 0; int num_srqne_processed = 0; - u32 sw_cons, raw_cons; - u16 type; int budget = nq->budget; + u32 sw_cons, raw_cons; uintptr_t q_handle; - bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx); + u16 type; /* Service the NQ until empty */ raw_cons = hwq->cons; @@ -314,7 +314,7 @@ static void bnxt_qplib_service_nq(unsigned long data) } if (hwq->cons != raw_cons) { hwq->cons = raw_cons; - bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons, + bnxt_qplib_ring_nq_db_rearm(nq->nq_db.db, hwq->cons, hwq->max_elements, nq->ring_id, gen_p5); } @@ -333,7 +333,7 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]); /* Fan out to CPU affinitized kthreads? */ - tasklet_schedule(&nq->worker); + tasklet_schedule(&nq->nq_tasklet); return IRQ_HANDLED; } @@ -341,17 +341,17 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) { bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx); - tasklet_disable(&nq->worker); + tasklet_disable(&nq->nq_tasklet); /* Mask h/w interrupt */ - bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons, + bnxt_qplib_ring_nq_db(nq->nq_db.db, nq->hwq.cons, nq->hwq.max_elements, nq->ring_id, gen_p5); /* Sync with last running IRQ handler */ - synchronize_irq(nq->vector); + synchronize_irq(nq->msix_vec); if (kill) - tasklet_kill(&nq->worker); + tasklet_kill(&nq->nq_tasklet); if (nq->requested) { - irq_set_affinity_hint(nq->vector, NULL); - free_irq(nq->vector, nq); + irq_set_affinity_hint(nq->msix_vec, NULL); + free_irq(nq->msix_vec, nq); nq->requested = false; } } @@ -364,16 +364,17 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) } /* Make sure the HW is stopped! */ - if (nq->requested) - bnxt_qplib_nq_stop_irq(nq, true); + bnxt_qplib_nq_stop_irq(nq, true); - if (nq->bar_reg_iomem) - iounmap(nq->bar_reg_iomem); - nq->bar_reg_iomem = NULL; + if (nq->nq_db.reg.bar_reg) { + iounmap(nq->nq_db.reg.bar_reg); + nq->nq_db.reg.bar_reg = NULL; + nq->nq_db.db = NULL; + } nq->cqn_handler = NULL; nq->srqn_handler = NULL; - nq->vector = 0; + nq->msix_vec = 0; } int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, @@ -385,68 +386,87 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, if (nq->requested) return -EFAULT; - nq->vector = msix_vector; + nq->msix_vec = msix_vector; if (need_init) - tasklet_init(&nq->worker, bnxt_qplib_service_nq, + tasklet_init(&nq->nq_tasklet, bnxt_qplib_service_nq, (unsigned long)nq); else - tasklet_enable(&nq->worker); + tasklet_enable(&nq->nq_tasklet); snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx); - rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); + rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq); if (rc) return rc; cpumask_clear(&nq->mask); cpumask_set_cpu(nq_indx, &nq->mask); - rc = irq_set_affinity_hint(nq->vector, &nq->mask); + rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask); if (rc) { dev_warn(&nq->pdev->dev, "set affinity failed; vector: %d nq_idx: %d\n", - nq->vector, nq_indx); + nq->msix_vec, nq_indx); } nq->requested = true; - bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons, + bnxt_qplib_ring_nq_db_rearm(nq->nq_db.db, nq->hwq.cons, nq->hwq.max_elements, nq->ring_id, gen_p5); return rc; } +static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt) +{ + resource_size_t reg_base; + struct bnxt_qplib_nq_db *nq_db; + struct pci_dev *pdev; + int rc = 0; + + pdev = nq->pdev; + nq_db = &nq->nq_db; + + nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION; + nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id); + if (!nq_db->reg.bar_base) { + dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!", + nq_db->reg.bar_id); + rc = -ENOMEM; + goto fail; + } + + reg_base = nq_db->reg.bar_base + reg_offt; + /* Unconditionally map 8 bytes to support 57500 series */ + nq_db->reg.len = 8; + nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len); + if (!nq_db->reg.bar_reg) { + dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed", + nq_db->reg.bar_id); + rc = -ENOMEM; + goto fail; + } + + nq_db->db = nq_db->reg.bar_reg; +fail: + return rc; +} + int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, int nq_idx, int msix_vector, int bar_reg_offset, - int (*cqn_handler)(struct bnxt_qplib_nq *nq, - struct bnxt_qplib_cq *), - int (*srqn_handler)(struct bnxt_qplib_nq *nq, - struct bnxt_qplib_srq *, - u8 event)) + cqn_handler_t cqn_handler, + srqn_handler_t srqn_handler) { - resource_size_t nq_base; int rc = -1; - if (cqn_handler) - nq->cqn_handler = cqn_handler; - - if (srqn_handler) - nq->srqn_handler = srqn_handler; + nq->pdev = pdev; + nq->cqn_handler = cqn_handler; + nq->srqn_handler = srqn_handler; /* Have a task to schedule CQ notifiers in post send case */ nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); if (!nq->cqn_wq) return -ENOMEM; - nq->bar_reg = NQ_CONS_PCI_BAR_REGION; - nq->bar_reg_off = bar_reg_offset; - nq_base = pci_resource_start(pdev, nq->bar_reg); - if (!nq_base) { - rc = -ENOMEM; - goto fail; - } - /* Unconditionally map 8 bytes to support 57500 series */ - nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8); - if (!nq->bar_reg_iomem) { - rc = -ENOMEM; + rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset); + if (rc) goto fail; - } rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); if (rc) { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index d3f080c18b27..765e5d272963 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -470,29 +470,32 @@ static inline void bnxt_qplib_ring_nq_db(void __iomem *db, u32 raw_cons, writel(NQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK), db); } +struct bnxt_qplib_nq_db { + struct bnxt_qplib_reg_desc reg; + void __iomem *db; +}; + +typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, + struct bnxt_qplib_cq *cq); +typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq, + struct bnxt_qplib_srq *srq, u8 event); + struct bnxt_qplib_nq { - struct pci_dev *pdev; - struct bnxt_qplib_res *res; - - int vector; - cpumask_t mask; - int budget; - bool requested; - struct tasklet_struct worker; - struct bnxt_qplib_hwq hwq; - - u16 bar_reg; - u32 bar_reg_off; - u16 ring_id; - void __iomem *bar_reg_iomem; - - int (*cqn_handler)(struct bnxt_qplib_nq *nq, - struct bnxt_qplib_cq *cq); - int (*srqn_handler)(struct bnxt_qplib_nq *nq, - struct bnxt_qplib_srq *srq, - u8 event); - struct workqueue_struct *cqn_wq; - char name[32]; + struct pci_dev *pdev; + struct bnxt_qplib_res *res; + char name[32]; + struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_nq_db nq_db; + u16 ring_id; + int msix_vec; + cpumask_t mask; + struct tasklet_struct nq_tasklet; + bool requested; + int budget; + + cqn_handler_t cqn_handler; + srqn_handler_t srqn_handler; + struct workqueue_struct *cqn_wq; }; struct bnxt_qplib_nq_work { @@ -507,11 +510,8 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, int msix_vector, bool need_init); int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, int nq_idx, int msix_vector, int bar_reg_offset, - int (*cqn_handler)(struct bnxt_qplib_nq *nq, - struct bnxt_qplib_cq *cq), - int (*srqn_handler)(struct bnxt_qplib_nq *nq, - struct bnxt_qplib_srq *srq, - u8 event)); + cqn_handler_t cqn_handler, + srqn_handler_t srq_handler); int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, -- cgit v1.2.3-58-ga151 From 6f53196bc5e7fd3c05337f24977cacb08e0f9753 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Sat, 15 Feb 2020 12:11:04 -0500 Subject: RDMA/bnxt_re: Refactor doorbell management functions Moving all the fast path doorbell functions at one place under qplib_res.h. To pass doorbell record information a new structure bnxt_qplib_db_info has been introduced. Every roce object holds an instance of this structure and doorbell information is initialized during resource creation. When DB is rung only the current queue index is read from hardware ring and rest of the data is taken from pre-initialized dbinfo structure. Link: https://lore.kernel.org/r/1581786665-23705-8-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 143 ++++++++++------------------- drivers/infiniband/hw/bnxt_re/qplib_fp.h | 44 +-------- drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 21 ++--- drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 2 +- drivers/infiniband/hw/bnxt_re/qplib_res.h | 78 ++++++++++++++++ 5 files changed, 141 insertions(+), 147 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 33272e58363c..2ccf1c3708d1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -53,9 +53,7 @@ #include "qplib_sp.h" #include "qplib_fp.h" -static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq); static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp); -static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type); static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) { @@ -236,7 +234,6 @@ fail: static void bnxt_qplib_service_nq(unsigned long data) { struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data; - bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx); struct bnxt_qplib_hwq *hwq = &nq->hwq; struct nq_base *nqe, **nq_ptr; struct bnxt_qplib_cq *cq; @@ -272,7 +269,8 @@ static void bnxt_qplib_service_nq(unsigned long data) q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) << 32; cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle; - bnxt_qplib_arm_cq_enable(cq); + bnxt_qplib_armen_db(&cq->dbinfo, + DBC_DBC_TYPE_CQ_ARMENA); spin_lock_bh(&cq->compl_lock); atomic_set(&cq->arm_state, 0); if (!nq->cqn_handler(nq, (cq))) @@ -285,14 +283,16 @@ static void bnxt_qplib_service_nq(unsigned long data) } case NQ_BASE_TYPE_SRQ_EVENT: { + struct bnxt_qplib_srq *srq; struct nq_srq_event *nqsrqe = (struct nq_srq_event *)nqe; q_handle = le32_to_cpu(nqsrqe->srq_handle_low); q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) << 32; - bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle, - DBC_DBC_TYPE_SRQ_ARMENA); + srq = (struct bnxt_qplib_srq *)q_handle; + bnxt_qplib_armen_db(&srq->dbinfo, + DBC_DBC_TYPE_SRQ_ARMENA); if (!nq->srqn_handler(nq, (struct bnxt_qplib_srq *)q_handle, nqsrqe->event)) @@ -314,9 +314,7 @@ static void bnxt_qplib_service_nq(unsigned long data) } if (hwq->cons != raw_cons) { hwq->cons = raw_cons; - bnxt_qplib_ring_nq_db_rearm(nq->nq_db.db, hwq->cons, - hwq->max_elements, nq->ring_id, - gen_p5); + bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true); } } @@ -340,11 +338,9 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) { - bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx); tasklet_disable(&nq->nq_tasklet); /* Mask h/w interrupt */ - bnxt_qplib_ring_nq_db(nq->nq_db.db, nq->hwq.cons, - nq->hwq.max_elements, nq->ring_id, gen_p5); + bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false); /* Sync with last running IRQ handler */ synchronize_irq(nq->msix_vec); if (kill) @@ -369,7 +365,6 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) if (nq->nq_db.reg.bar_reg) { iounmap(nq->nq_db.reg.bar_reg); nq->nq_db.reg.bar_reg = NULL; - nq->nq_db.db = NULL; } nq->cqn_handler = NULL; @@ -380,7 +375,6 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, int msix_vector, bool need_init) { - bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx); int rc; if (nq->requested) @@ -407,8 +401,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, nq->msix_vec, nq_indx); } nq->requested = true; - bnxt_qplib_ring_nq_db_rearm(nq->nq_db.db, nq->hwq.cons, - nq->hwq.max_elements, nq->ring_id, gen_p5); + bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true); return rc; } @@ -443,7 +436,9 @@ static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt) goto fail; } - nq_db->db = nq_db->reg.bar_reg; + nq_db->dbinfo.db = nq_db->reg.bar_reg; + nq_db->dbinfo.hwq = &nq->hwq; + nq_db->dbinfo.xid = nq->ring_id; fail: return rc; } @@ -516,24 +511,6 @@ int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq) } /* SRQ */ -static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type) -{ - struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; - void __iomem *db; - u32 sw_prod; - u64 val = 0; - - /* Ring DB */ - sw_prod = (arm_type == DBC_DBC_TYPE_SRQ_ARM) ? - srq->threshold : HWQ_CMP(srq_hwq->prod, srq_hwq); - db = (arm_type == DBC_DBC_TYPE_SRQ_ARMENA) ? srq->dbr_base : - srq->dpi->dbr; - val = ((srq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type; - val <<= 32; - val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK; - writeq(val, db); -} - void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) { @@ -624,9 +601,12 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, srq->swq[srq->last_idx].next_idx = -1; srq->id = le32_to_cpu(resp.xid); - srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; + srq->dbinfo.hwq = &srq->hwq; + srq->dbinfo.xid = srq->id; + srq->dbinfo.db = srq->dpi->dbr; + srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem; if (srq->threshold) - bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARMENA); + bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); srq->arm_req = false; return 0; @@ -650,7 +630,7 @@ int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, srq_hwq->max_elements - sw_cons + sw_prod; if (count > srq->threshold) { srq->arm_req = false; - bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM); + bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); } else { /* Deferred arming */ srq->arm_req = true; @@ -738,10 +718,10 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, srq_hwq->max_elements - sw_cons + sw_prod; spin_unlock(&srq_hwq->lock); /* Ring DB */ - bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ); + bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); if (srq->arm_req == true && count > srq->threshold) { srq->arm_req = false; - bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM); + bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); } done: return rc; @@ -872,6 +852,15 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; + qp->cctx = res->cctx; + sq->dbinfo.hwq = &sq->hwq; + sq->dbinfo.xid = qp->id; + sq->dbinfo.db = qp->dpi->dbr; + if (rq->max_wqe) { + rq->dbinfo.hwq = &rq->hwq; + rq->dbinfo.xid = qp->id; + rq->dbinfo.db = qp->dpi->dbr; + } rcfw->qp_tbl[qp->id].qp_id = qp->id; rcfw->qp_tbl[qp->id].qp_handle = (void *)qp; @@ -1109,9 +1098,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; - qp->cctx = res->cctx; INIT_LIST_HEAD(&qp->sq_flush); INIT_LIST_HEAD(&qp->rq_flush); + qp->cctx = res->cctx; + sq->dbinfo.hwq = &sq->hwq; + sq->dbinfo.xid = qp->id; + sq->dbinfo.db = qp->dpi->dbr; + if (rq->max_wqe) { + rq->dbinfo.hwq = &rq->hwq; + rq->dbinfo.xid = qp->id; + rq->dbinfo.db = qp->dpi->dbr; + } rcfw->qp_tbl[qp->id].qp_id = qp->id; rcfw->qp_tbl[qp->id].qp_handle = (void *)qp; @@ -1551,16 +1548,8 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp) { struct bnxt_qplib_q *sq = &qp->sq; - u32 sw_prod; - u64 val = 0; - val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | - DBC_DBC_TYPE_SQ); - val <<= 32; - sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); - val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK; - /* Flush all the WQE writes to HW */ - writeq(val, qp->dpi->dbr); + bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ); } int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, @@ -1852,16 +1841,8 @@ done: void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp) { struct bnxt_qplib_q *rq = &qp->rq; - u32 sw_prod; - u64 val = 0; - val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | - DBC_DBC_TYPE_RQ); - val <<= 32; - sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq); - val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK; - /* Flush the writes to HW Rx WQE before the ringing Rx DB */ - writeq(val, qp->dpi->dbr); + bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ); } int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, @@ -1941,34 +1922,6 @@ done: } /* CQ */ - -/* Spinlock must be held */ -static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq) -{ - u64 val = 0; - - val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | - DBC_DBC_TYPE_CQ_ARMENA; - val <<= 32; - /* Flush memory writes before enabling the CQ */ - writeq(val, cq->dbr_base); -} - -static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type) -{ - struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; - u32 sw_cons; - u64 val = 0; - - /* Ring DB */ - val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type; - val <<= 32; - sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq); - val |= (sw_cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK; - /* flush memory writes before arming the CQ */ - writeq(val, cq->dpi->dbr); -} - int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; @@ -2023,7 +1976,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) goto fail; cq->id = le32_to_cpu(resp.xid); - cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; init_waitqueue_head(&cq->waitq); INIT_LIST_HEAD(&cq->sqf_head); @@ -2031,7 +1983,13 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) spin_lock_init(&cq->compl_lock); spin_lock_init(&cq->flush_lock); - bnxt_qplib_arm_cq_enable(cq); + cq->dbinfo.hwq = &cq->hwq; + cq->dbinfo.xid = cq->id; + cq->dbinfo.db = cq->dpi->dbr; + cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem; + + bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA); + return 0; fail: @@ -2188,8 +2146,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, sq->send_phantom = true; /* TODO: Only ARM if the previous SQE is ARMALL */ - bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL); - + bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL); rc = -EAGAIN; goto out; } @@ -2859,7 +2816,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, } if (cq->hwq.cons != raw_cons) { cq->hwq.cons = raw_cons; - bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ); + bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ); } exit: return num_cqes - budget; @@ -2868,7 +2825,7 @@ exit: void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) { if (arm_type) - bnxt_qplib_arm_cq(cq, arm_type); + bnxt_qplib_ring_db(&cq->dbinfo, arm_type); /* Using cq->arm_state variable to track whether to issue cq handler */ atomic_set(&cq->arm_state, 1); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 765e5d272963..9e8d1c5c3f4a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -42,7 +42,7 @@ struct bnxt_qplib_srq { struct bnxt_qplib_pd *pd; struct bnxt_qplib_dpi *dpi; - void __iomem *dbr_base; + struct bnxt_qplib_db_info dbinfo; u64 srq_handle; u32 id; u32 max_wqe; @@ -236,6 +236,7 @@ struct bnxt_qplib_swqe { struct bnxt_qplib_q { struct bnxt_qplib_hwq hwq; struct bnxt_qplib_swq *swq; + struct bnxt_qplib_db_info dbinfo; struct bnxt_qplib_sg_info sg_info; u32 max_wqe; u16 q_full_delta; @@ -370,7 +371,7 @@ struct bnxt_qplib_cqe { #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01 struct bnxt_qplib_cq { struct bnxt_qplib_dpi *dpi; - void __iomem *dbr_base; + struct bnxt_qplib_db_info dbinfo; u32 max_wqe; u32 id; u16 count; @@ -433,46 +434,9 @@ struct bnxt_qplib_cq { NQ_DB_IDX_VALID | \ NQ_DB_IRQ_DIS) -static inline void bnxt_qplib_ring_nq_db64(void __iomem *db, u32 index, - u32 xid, bool arm) -{ - u64 val; - - val = xid & DBC_DBC_XID_MASK; - val |= DBC_DBC_PATH_ROCE; - val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ; - val <<= 32; - val |= index & DBC_DBC_INDEX_MASK; - writeq(val, db); -} - -static inline void bnxt_qplib_ring_nq_db_rearm(void __iomem *db, u32 raw_cons, - u32 max_elements, u32 xid, - bool gen_p5) -{ - u32 index = raw_cons & (max_elements - 1); - - if (gen_p5) - bnxt_qplib_ring_nq_db64(db, index, xid, true); - else - writel(NQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK), db); -} - -static inline void bnxt_qplib_ring_nq_db(void __iomem *db, u32 raw_cons, - u32 max_elements, u32 xid, - bool gen_p5) -{ - u32 index = raw_cons & (max_elements - 1); - - if (gen_p5) - bnxt_qplib_ring_nq_db64(db, index, xid, false); - else - writel(NQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK), db); -} - struct bnxt_qplib_nq_db { struct bnxt_qplib_reg_desc reg; - void __iomem *db; + struct bnxt_qplib_db_info dbinfo; }; typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 119113ecfb64..b0b050e5cd12 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -379,7 +379,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, static void bnxt_qplib_service_creq(unsigned long data) { struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; - bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx); struct bnxt_qplib_creq_ctx *creq = &rcfw->creq; u32 type, budget = CREQ_ENTRY_POLL_BUDGET; struct bnxt_qplib_hwq *hwq = &creq->hwq; @@ -429,9 +428,8 @@ static void bnxt_qplib_service_creq(unsigned long data) if (hwq->cons != raw_cons) { hwq->cons = raw_cons; - bnxt_qplib_ring_creq_db_rearm(creq->creq_db.db, - raw_cons, hwq->max_elements, - creq->ring_id, gen_p5); + bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, + rcfw->res->cctx, true); } spin_unlock_irqrestore(&hwq->lock, flags); } @@ -660,15 +658,12 @@ fail: void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) { - bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx); struct bnxt_qplib_creq_ctx *creq; creq = &rcfw->creq; tasklet_disable(&creq->creq_tasklet); /* Mask h/w interrupts */ - bnxt_qplib_ring_creq_db(creq->creq_db.db, creq->hwq.cons, - creq->hwq.max_elements, creq->ring_id, - gen_p5); + bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false); /* Sync with last running IRQ-handler */ synchronize_irq(creq->msix_vec); if (kill) @@ -708,7 +703,6 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, bool need_init) { - bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx); struct bnxt_qplib_creq_ctx *creq; int rc; @@ -728,9 +722,8 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, if (rc) return rc; creq->requested = true; - bnxt_qplib_ring_creq_db_rearm(creq->creq_db.db, - creq->hwq.cons, creq->hwq.max_elements, - creq->ring_id, gen_p5); + + bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, true); return 0; } @@ -799,7 +792,9 @@ static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt) creq_db->reg.bar_id); return -ENOMEM; } - creq_db->db = creq_db->reg.bar_reg; + creq_db->dbinfo.db = creq_db->reg.bar_reg; + creq_db->dbinfo.hwq = &rcfw->creq.hwq; + creq_db->dbinfo.xid = rcfw->creq.ring_id; return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 1aff6d458ac5..411fce3493b6 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -247,7 +247,7 @@ struct bnxt_qplib_cmdq_ctx { struct bnxt_qplib_creq_db { struct bnxt_qplib_reg_desc reg; - void __iomem *db; + struct bnxt_qplib_db_info dbinfo; }; struct bnxt_qplib_creq_stat { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 5fa278e744eb..95b645dbbc2d 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -133,6 +133,13 @@ struct bnxt_qplib_hwq { u8 is_user; }; +struct bnxt_qplib_db_info { + void __iomem *db; + void __iomem *priv_db; + struct bnxt_qplib_hwq *hwq; + u32 xid; +}; + /* Tables */ struct bnxt_qplib_pd_tbl { unsigned long *tbl; @@ -290,4 +297,75 @@ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, struct bnxt_qplib_ctx *ctx, bool virt_fn, bool is_p5); + +static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info, + bool arm) +{ + u32 key; + + key = info->hwq->cons & (info->hwq->max_elements - 1); + key |= (CMPL_DOORBELL_IDX_VALID | + (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK)); + if (!arm) + key |= CMPL_DOORBELL_MASK; + writel(key, info->db); +} + +static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info, + u32 type) +{ + u64 key = 0; + + key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type; + key <<= 32; + key |= (info->hwq->cons & (info->hwq->max_elements - 1)) & + DBC_DBC_INDEX_MASK; + writeq(key, info->db); +} + +static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info, + u32 type) +{ + u64 key = 0; + + key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type; + key <<= 32; + key |= (info->hwq->prod & (info->hwq->max_elements - 1)) & + DBC_DBC_INDEX_MASK; + writeq(key, info->db); +} + +static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info, + u32 type) +{ + u64 key = 0; + + key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type; + key <<= 32; + writeq(key, info->priv_db); +} + +static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info, + u32 th) +{ + u64 key = 0; + + key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | th; + key <<= 32; + key |= th & DBC_DBC_INDEX_MASK; + writeq(key, info->priv_db); +} + +static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info, + struct bnxt_qplib_chip_ctx *cctx, + bool arm) +{ + u32 type; + + type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ; + if (bnxt_qplib_is_chip_gen_p5(cctx)) + bnxt_qplib_ring_db(info, type); + else + bnxt_qplib_ring_db32(info, arm); +} #endif /* __BNXT_QPLIB_RES_H__ */ -- cgit v1.2.3-58-ga151 From 6ccad8483b28666bdafbb52fe94e64f41e7836be Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Sat, 15 Feb 2020 12:11:05 -0500 Subject: RDMA/bnxt_re: use ibdev based message printing functions Replacing the dev_err/dbg/warn with ibdev_err/dbg/warn. In the IB device provider driver these functions are recommended to use. Currently qplib layer function calls has not been replaced due to unavailability of ib_device pointer at that layer. Link: https://lore.kernel.org/r/1581786665-23705-9-git-send-email-devesh.sharma@broadcom.com Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 280 +++++++++++++++---------------- drivers/infiniband/hw/bnxt_re/main.c | 131 ++++++++------- 2 files changed, 208 insertions(+), 203 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 4368aacdd482..ad3e524187e3 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -313,8 +313,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) if (ctx->idx == 0 && rdma_link_local_addr((struct in6_addr *)gid_to_del) && ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) { - dev_dbg(rdev_to_dev(rdev), - "Trying to delete GID0 while QP1 is alive\n"); + ibdev_dbg(&rdev->ibdev, + "Trying to delete GID0 while QP1 is alive\n"); return -EFAULT; } ctx->refcnt--; @@ -322,8 +322,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, vlan_id, true); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to remove GID: %#x", rc); + ibdev_err(&rdev->ibdev, + "Failed to remove GID: %#x", rc); } else { ctx_tbl = sgid_tbl->ctx; ctx_tbl[ctx->idx] = NULL; @@ -360,7 +360,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) } if (rc < 0) { - dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc); + ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc); return rc; } @@ -423,12 +423,12 @@ static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) wqe.bind.r_key = fence->bind_rkey; fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); - dev_dbg(rdev_to_dev(qp->rdev), - "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", + ibdev_dbg(&qp->rdev->ibdev, + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", wqe.bind.r_key, qp->qplib_qp.id, pd); rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); if (rc) { - dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); + ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n"); return rc; } bnxt_qplib_post_send_db(&qp->qplib_qp); @@ -479,7 +479,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) DMA_BIDIRECTIONAL); rc = dma_mapping_error(dev, dma_addr); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); + ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n"); rc = -EIO; fence->dma_addr = 0; goto fail; @@ -499,7 +499,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); + ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n"); goto fail; } @@ -511,7 +511,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); + ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n"); goto fail; } mr->ib_mr.rkey = mr->qplib_mr.rkey; @@ -519,8 +519,8 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) /* Create a fence MW only for kernel consumers */ mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); if (IS_ERR(mw)) { - dev_err(rdev_to_dev(rdev), - "Failed to create fence-MW for PD: %p\n", pd); + ibdev_err(&rdev->ibdev, + "Failed to create fence-MW for PD: %p\n", pd); rc = PTR_ERR(mw); goto fail; } @@ -558,7 +558,7 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) pd->rdev = rdev; if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) { - dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD"); + ibdev_err(&rdev->ibdev, "Failed to allocate HW PD"); rc = -ENOMEM; goto fail; } @@ -585,16 +585,16 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to copy user response\n"); + ibdev_err(&rdev->ibdev, + "Failed to copy user response\n"); goto dbfail; } } if (!udata) if (bnxt_re_create_fence_mr(pd)) - dev_warn(rdev_to_dev(rdev), - "Failed to create Fence-MR\n"); + ibdev_warn(&rdev->ibdev, + "Failed to create Fence-MR\n"); return 0; dbfail: bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, @@ -645,7 +645,7 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr, int rc; if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { - dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set"); + ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set"); return -EINVAL; } @@ -675,7 +675,7 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr, rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, !(flags & RDMA_CREATE_AH_SLEEPABLE)); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH"); + ibdev_err(&rdev->ibdev, "Failed to allocate HW AH"); return rc; } @@ -759,16 +759,16 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) mutex_unlock(&rdev->qp_lock); atomic_dec(&rdev->qp_count); - dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n"); + ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n"); bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah, true); bnxt_qplib_clean_qp(&qp->qplib_qp); - dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n"); + ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); if (rc) { - dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed"); + ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed"); goto fail; } bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); @@ -802,7 +802,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); + ibdev_err(&rdev->ibdev, "Failed to destroy HW QP"); return rc; } @@ -938,8 +938,8 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to allocate HW AH for Shadow QP"); + ibdev_err(&rdev->ibdev, + "Failed to allocate HW AH for Shadow QP"); goto fail; } @@ -1032,7 +1032,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); if (!srq) { - dev_err(rdev_to_dev(rdev), "SRQ not found"); + ibdev_err(&rdev->ibdev, "SRQ not found"); return -EINVAL; } qplqp->srq = &srq->qplib_srq; @@ -1140,8 +1140,7 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, qptype = __from_ib_qp_type(init_attr->qp_type); if (qptype == IB_QPT_MAX) { - dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported", - qptype); + ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype); qptype = -EINVAL; goto out; } @@ -1188,15 +1187,15 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ if (init_attr->create_flags) - dev_dbg(rdev_to_dev(rdev), - "QP create flags 0x%x not supported", - init_attr->create_flags); + ibdev_dbg(&rdev->ibdev, + "QP create flags 0x%x not supported", + init_attr->create_flags); /* Setup CQs */ if (init_attr->send_cq) { cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); if (!cq) { - dev_err(rdev_to_dev(rdev), "Send CQ not found"); + ibdev_err(&rdev->ibdev, "Send CQ not found"); rc = -EINVAL; goto out; } @@ -1207,7 +1206,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, if (init_attr->recv_cq) { cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); if (!cq) { - dev_err(rdev_to_dev(rdev), "Receive CQ not found"); + ibdev_err(&rdev->ibdev, "Receive CQ not found"); rc = -EINVAL; goto out; } @@ -1253,8 +1252,7 @@ static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); if (!sqp) { rc = -ENODEV; - dev_err(rdev_to_dev(rdev), - "Failed to create Shadow QP for QP1"); + ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1"); goto out; } rdev->gsi_ctx.gsi_sqp = sqp; @@ -1267,8 +1265,8 @@ static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, bnxt_qplib_destroy_qp(&rdev->qplib_res, &sqp->qplib_qp); rc = -ENODEV; - dev_err(rdev_to_dev(rdev), - "Failed to create AH entry for ShadowQP"); + ibdev_err(&rdev->ibdev, + "Failed to create AH entry for ShadowQP"); goto out; } rdev->gsi_ctx.gsi_sah = sah; @@ -1296,7 +1294,7 @@ static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp); if (rc) { - dev_err(rdev_to_dev(rdev), "create HW QP1 failed!"); + ibdev_err(&rdev->ibdev, "create HW QP1 failed!"); goto out; } @@ -1316,14 +1314,14 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, init_attr->cap.max_send_sge > dev_attr->max_qp_sges || init_attr->cap.max_recv_sge > dev_attr->max_qp_sges || init_attr->cap.max_inline_data > dev_attr->max_inline_data) { - dev_err(rdev_to_dev(rdev), - "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x", - init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, - init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, - init_attr->cap.max_send_sge, dev_attr->max_qp_sges, - init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, - init_attr->cap.max_inline_data, - dev_attr->max_inline_data); + ibdev_err(&rdev->ibdev, + "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x", + init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, + init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, + init_attr->cap.max_send_sge, dev_attr->max_qp_sges, + init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, + init_attr->cap.max_inline_data, + dev_attr->max_inline_data); rc = false; } return rc; @@ -1365,7 +1363,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, } else { rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); + ibdev_err(&rdev->ibdev, "Failed to create HW QP"); goto free_umem; } if (udata) { @@ -1375,7 +1373,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, resp.rsvd = 0; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to copy QP udata"); + ibdev_err(&rdev->ibdev, "Failed to copy QP udata"); goto qp_destroy; } } @@ -1548,7 +1546,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, int rc, entries; if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { - dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded"); + ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded"); rc = -EINVAL; goto exit; } @@ -1583,7 +1581,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq); if (rc) { - dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!"); + ibdev_err(&rdev->ibdev, "Create HW SRQ failed!"); goto fail; } @@ -1593,7 +1591,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, resp.srqid = srq->qplib_srq.id; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { - dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); + ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!"); bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq); goto fail; @@ -1632,7 +1630,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, srq->qplib_srq.threshold = srq_attr->srq_limit; rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); if (rc) { - dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!"); + ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); return rc; } /* On success, update the shadow */ @@ -1640,8 +1638,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, /* No need to Build and send response back to udata */ break; default: - dev_err(rdev_to_dev(rdev), - "Unsupported srq_attr_mask 0x%x", srq_attr_mask); + ibdev_err(&rdev->ibdev, + "Unsupported srq_attr_mask 0x%x", srq_attr_mask); return -EINVAL; } return 0; @@ -1659,7 +1657,7 @@ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) tsrq.qplib_srq.id = srq->qplib_srq.id; rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq); if (rc) { - dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!"); + ibdev_err(&rdev->ibdev, "Query HW SRQ failed!"); return rc; } srq_attr->max_wr = srq->qplib_srq.max_wqe; @@ -1725,8 +1723,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) - dev_err(rdev_to_dev(rdev), - "Failed to modify Shadow QP for QP1"); + ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1"); return rc; } @@ -1747,15 +1744,15 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, new_qp_state = qp_attr->qp_state; if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state, ib_qp->qp_type, qp_attr_mask)) { - dev_err(rdev_to_dev(rdev), - "Invalid attribute mask: %#x specified ", - qp_attr_mask); - dev_err(rdev_to_dev(rdev), - "for qpn: %#x type: %#x", - ib_qp->qp_num, ib_qp->qp_type); - dev_err(rdev_to_dev(rdev), - "curr_qp_state=0x%x, new_qp_state=0x%x\n", - curr_qp_state, new_qp_state); + ibdev_err(&rdev->ibdev, + "Invalid attribute mask: %#x specified ", + qp_attr_mask); + ibdev_err(&rdev->ibdev, + "for qpn: %#x type: %#x", + ib_qp->qp_num, ib_qp->qp_type); + ibdev_err(&rdev->ibdev, + "curr_qp_state=0x%x, new_qp_state=0x%x\n", + curr_qp_state, new_qp_state); return -EINVAL; } qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; @@ -1763,18 +1760,16 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, if (!qp->sumem && qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { - dev_dbg(rdev_to_dev(rdev), - "Move QP = %p to flush list\n", - qp); + ibdev_dbg(&rdev->ibdev, + "Move QP = %p to flush list\n", qp); flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); } if (!qp->sumem && qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { - dev_dbg(rdev_to_dev(rdev), - "Move QP = %p out of flush list\n", - qp); + ibdev_dbg(&rdev->ibdev, + "Move QP = %p out of flush list\n", qp); flags = bnxt_re_lock_cqs(qp); bnxt_qplib_clean_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); @@ -1905,10 +1900,10 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (qp_attr->max_dest_rd_atomic > dev_attr->max_qp_init_rd_atom) { - dev_err(rdev_to_dev(rdev), - "max_dest_rd_atomic requested%d is > dev_max%d", - qp_attr->max_dest_rd_atomic, - dev_attr->max_qp_init_rd_atom); + ibdev_err(&rdev->ibdev, + "max_dest_rd_atomic requested%d is > dev_max%d", + qp_attr->max_dest_rd_atomic, + dev_attr->max_qp_init_rd_atom); return -EINVAL; } @@ -1929,8 +1924,8 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) || (qp_attr->cap.max_inline_data >= dev_attr->max_inline_data)) { - dev_err(rdev_to_dev(rdev), - "Create QP failed - max exceeded"); + ibdev_err(&rdev->ibdev, + "Create QP failed - max exceeded"); return -EINVAL; } entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); @@ -1963,7 +1958,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, } rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to modify HW QP"); + ibdev_err(&rdev->ibdev, "Failed to modify HW QP"); return rc; } if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) @@ -1988,7 +1983,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); + ibdev_err(&rdev->ibdev, "Failed to query HW QP"); goto out; } qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); @@ -2193,7 +2188,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, wqe->num_sge++; } else { - dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!"); + ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!"); rc = -ENOMEM; } return rc; @@ -2429,8 +2424,8 @@ static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev, if ((sge_len + wqe->inline_len) > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) { - dev_err(rdev_to_dev(rdev), - "Inline data size requested > supported value"); + ibdev_err(&rdev->ibdev, + "Inline data size requested > supported value"); return -EINVAL; } sge_len = wr->sg_list[i].length; @@ -2490,8 +2485,8 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, /* Common */ wqe.num_sge = wr->num_sge; if (wr->num_sge > qp->qplib_qp.sq.max_sge) { - dev_err(rdev_to_dev(rdev), - "Limit exceeded for Send SGEs"); + ibdev_err(&rdev->ibdev, + "Limit exceeded for Send SGEs"); rc = -EINVAL; goto bad; } @@ -2510,9 +2505,9 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); bad: if (rc) { - dev_err(rdev_to_dev(rdev), - "Post send failed opcode = %#x rc = %d", - wr->opcode, rc); + ibdev_err(&rdev->ibdev, + "Post send failed opcode = %#x rc = %d", + wr->opcode, rc); break; } wr = wr->next; @@ -2539,8 +2534,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, /* Common */ wqe.num_sge = wr->num_sge; if (wr->num_sge > qp->qplib_qp.sq.max_sge) { - dev_err(rdev_to_dev(qp->rdev), - "Limit exceeded for Send SGEs"); + ibdev_err(&qp->rdev->ibdev, + "Limit exceeded for Send SGEs"); rc = -EINVAL; goto bad; } @@ -2585,8 +2580,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, rc = bnxt_re_build_atomic_wqe(wr, &wqe); break; case IB_WR_RDMA_READ_WITH_INV: - dev_err(rdev_to_dev(qp->rdev), - "RDMA Read with Invalidate is not supported"); + ibdev_err(&qp->rdev->ibdev, + "RDMA Read with Invalidate is not supported"); rc = -EINVAL; goto bad; case IB_WR_LOCAL_INV: @@ -2597,8 +2592,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, break; default: /* Unsupported WRs */ - dev_err(rdev_to_dev(qp->rdev), - "WR (%#x) is not supported", wr->opcode); + ibdev_err(&qp->rdev->ibdev, + "WR (%#x) is not supported", wr->opcode); rc = -EINVAL; goto bad; } @@ -2606,9 +2601,9 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); bad: if (rc) { - dev_err(rdev_to_dev(qp->rdev), - "post_send failed op:%#x qps = %#x rc = %d\n", - wr->opcode, qp->qplib_qp.state, rc); + ibdev_err(&qp->rdev->ibdev, + "post_send failed op:%#x qps = %#x rc = %d\n", + wr->opcode, qp->qplib_qp.state, rc); *bad_wr = wr; break; } @@ -2636,8 +2631,8 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, /* Common */ wqe.num_sge = wr->num_sge; if (wr->num_sge > qp->qplib_qp.rq.max_sge) { - dev_err(rdev_to_dev(rdev), - "Limit exceeded for Receive SGEs"); + ibdev_err(&rdev->ibdev, + "Limit exceeded for Receive SGEs"); rc = -EINVAL; break; } @@ -2673,8 +2668,8 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, /* Common */ wqe.num_sge = wr->num_sge; if (wr->num_sge > qp->qplib_qp.rq.max_sge) { - dev_err(rdev_to_dev(qp->rdev), - "Limit exceeded for Receive SGEs"); + ibdev_err(&qp->rdev->ibdev, + "Limit exceeded for Receive SGEs"); rc = -EINVAL; *bad_wr = wr; break; @@ -2745,7 +2740,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, /* Validate CQ fields */ if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { - dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded"); + ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded"); return -EINVAL; } @@ -2801,7 +2796,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to create HW CQ"); + ibdev_err(&rdev->ibdev, "Failed to create HW CQ"); goto fail; } @@ -2821,7 +2816,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, resp.rsvd = 0; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata"); + ibdev_err(&rdev->ibdev, "Failed to copy CQ udata"); bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); goto c2fail; } @@ -3100,7 +3095,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags, cqe->raweth_qp1_flags2); if (pkt_type < 0) { - dev_err(rdev_to_dev(rdev), "Invalid packet\n"); + ibdev_err(&rdev->ibdev, "Invalid packet\n"); return -EINVAL; } @@ -3149,8 +3144,8 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to post Rx buffers to shadow QP"); + ibdev_err(&rdev->ibdev, + "Failed to post Rx buffers to shadow QP"); return -ENOMEM; } @@ -3305,11 +3300,11 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp) rc = bnxt_re_bind_fence_mw(lib_qp); if (!rc) { lib_qp->sq.phantom_wqe_cnt++; - dev_dbg(&lib_qp->sq.hwq.pdev->dev, - "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", - lib_qp->id, lib_qp->sq.hwq.prod, - HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), - lib_qp->sq.phantom_wqe_cnt); + ibdev_dbg(&qp->rdev->ibdev, + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", + lib_qp->id, lib_qp->sq.hwq.prod, + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), + lib_qp->sq.phantom_wqe_cnt); } spin_unlock_irqrestore(&qp->sq_lock, flags); @@ -3332,7 +3327,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) budget = min_t(u32, num_entries, cq->max_cql); num_entries = budget; if (!cq->cql) { - dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); + ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use"); goto exit; } cqe = &cq->cql[0]; @@ -3345,8 +3340,8 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp); if (send_phantom_wqe(qp) == -ENOMEM) - dev_err(rdev_to_dev(cq->rdev), - "Phantom failed! Scheduled to send again\n"); + ibdev_err(&cq->rdev->ibdev, + "Phantom failed! Scheduled to send again\n"); else sq->send_phantom = false; } @@ -3370,8 +3365,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) (unsigned long)(cqe->qp_handle), struct bnxt_re_qp, qplib_qp); if (!qp) { - dev_err(rdev_to_dev(cq->rdev), - "POLL CQ : bad QP handle"); + ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle"); continue; } wc->qp = &qp->ib_qp; @@ -3436,9 +3430,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) bnxt_re_process_res_ud_wc(qp, wc, cqe); break; default: - dev_err(rdev_to_dev(cq->rdev), - "POLL CQ : type 0x%x not handled", - cqe->opcode); + ibdev_err(&cq->rdev->ibdev, + "POLL CQ : type 0x%x not handled", + cqe->opcode); continue; } wc++; @@ -3531,7 +3525,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) { - dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc); return rc; } @@ -3578,7 +3572,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, int rc; if (type != IB_MR_TYPE_MEM_REG) { - dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type); + ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type); return ERR_PTR(-EINVAL); } if (max_num_sg > MAX_PBL_LVL_1_PGS) @@ -3608,8 +3602,8 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl, max_num_sg); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to allocate HW FR page list"); + ibdev_err(&rdev->ibdev, + "Failed to allocate HW FR page list"); goto fail_mr; } @@ -3644,7 +3638,7 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); if (rc) { - dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); + ibdev_err(&rdev->ibdev, "Allocate MW failed!"); goto fail; } mw->ib_mw.rkey = mw->qplib_mw.rkey; @@ -3665,7 +3659,7 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); if (rc) { - dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); + ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc); return rc; } @@ -3717,8 +3711,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, int umem_pgs, page_shift, rc; if (length > BNXT_RE_MAX_MR_SIZE) { - dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n", - length, BNXT_RE_MAX_MR_SIZE); + ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n", + length, BNXT_RE_MAX_MR_SIZE); return ERR_PTR(-ENOMEM); } @@ -3733,7 +3727,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to allocate MR"); + ibdev_err(&rdev->ibdev, "Failed to allocate MR"); goto free_mr; } /* The fixed portion of the rkey is the same as the lkey */ @@ -3741,7 +3735,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags); if (IS_ERR(umem)) { - dev_err(rdev_to_dev(rdev), "Failed to get umem"); + ibdev_err(&rdev->ibdev, "Failed to get umem"); rc = -EFAULT; goto free_mrw; } @@ -3750,7 +3744,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, mr->qplib_mr.va = virt_addr; umem_pgs = ib_umem_page_count(umem); if (!umem_pgs) { - dev_err(rdev_to_dev(rdev), "umem is invalid!"); + ibdev_err(&rdev->ibdev, "umem is invalid!"); rc = -EINVAL; goto free_umem; } @@ -3767,15 +3761,15 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, virt_addr)); if (!bnxt_re_page_size_ok(page_shift)) { - dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); + ibdev_err(&rdev->ibdev, "umem page size unsupported!"); rc = -EFAULT; goto fail; } if (page_shift == BNXT_RE_PAGE_SHIFT_4K && length > BNXT_RE_MAX_MR_SIZE_LOW) { - dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu", - length, (u64)BNXT_RE_MAX_MR_SIZE_LOW); + ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu", + length, (u64)BNXT_RE_MAX_MR_SIZE_LOW); rc = -EINVAL; goto fail; } @@ -3785,7 +3779,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl, umem_pgs, false, 1 << page_shift); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to register user MR"); + ibdev_err(&rdev->ibdev, "Failed to register user MR"); goto fail; } @@ -3818,12 +3812,11 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) u32 chip_met_rev_num = 0; int rc; - dev_dbg(rdev_to_dev(rdev), "ABI version requested %u", - ibdev->ops.uverbs_abi_ver); + ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver); if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) { - dev_dbg(rdev_to_dev(rdev), " is different from the device %d ", - BNXT_RE_ABI_VERSION); + ibdev_dbg(ibdev, " is different from the device %d ", + BNXT_RE_ABI_VERSION); return -EPERM; } @@ -3855,7 +3848,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to copy user context"); + ibdev_err(ibdev, "Failed to copy user context"); rc = -EFAULT; goto cfail; } @@ -3905,15 +3898,14 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, PAGE_SIZE, vma->vm_page_prot)) { - dev_err(rdev_to_dev(rdev), "Failed to map DPI"); + ibdev_err(&rdev->ibdev, "Failed to map DPI"); return -EAGAIN; } } else { pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT; if (remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, vma->vm_page_prot)) { - dev_err(rdev_to_dev(rdev), - "Failed to map shared page"); + ibdev_err(&rdev->ibdev, "Failed to map shared page"); return -EAGAIN; } } diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 65f106f84c33..b5128cce8e21 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -269,7 +269,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) * to f/w will timeout and that will set the * timeout bit. */ - dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n"); + ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n"); return; } @@ -286,8 +286,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) rc = bnxt_qplib_nq_start_irq(nq, indx - 1, msix_ent[indx].vector, false); if (rc) - dev_warn(rdev_to_dev(rdev), - "Failed to reinit NQ index %d\n", indx - 1); + ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n", + indx - 1); } } @@ -373,9 +373,9 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) goto done; } if (num_msix_got != num_msix_want) { - dev_warn(rdev_to_dev(rdev), - "Requested %d MSI-X vectors, got %d\n", - num_msix_want, num_msix_got); + ibdev_warn(&rdev->ibdev, + "Requested %d MSI-X vectors, got %d\n", + num_msix_want, num_msix_got); } rdev->num_msix = num_msix_got; done: @@ -422,8 +422,8 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); if (rc) - dev_err(rdev_to_dev(rdev), - "Failed to free HW ring:%d :%#x", req.ring_id, rc); + ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x", + req.ring_id, rc); return rc; } @@ -483,8 +483,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, sizeof(req), DFLT_HWRM_CMD_TIMEOUT); rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); if (rc) - dev_err(rdev_to_dev(rdev), - "Failed to free HW stats context %#x", rc); + ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x", + rc); return rc; } @@ -757,8 +757,8 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev, /* Allocate bnxt_re_dev instance here */ rdev = ib_alloc_device(bnxt_re_dev, ibdev); if (!rdev) { - dev_err(NULL, "%s: bnxt_re_dev allocation failure!", - ROCE_DRV_MODULE_NAME); + ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!", + ROCE_DRV_MODULE_NAME); return NULL; } /* Default values */ @@ -887,8 +887,8 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, int rc = 0; if (!srq) { - dev_err(NULL, "%s: SRQ is NULL, SRQN not handled", - ROCE_DRV_MODULE_NAME); + ibdev_err(NULL, "%s: SRQ is NULL, SRQN not handled", + ROCE_DRV_MODULE_NAME); rc = -EINVAL; goto done; } @@ -915,8 +915,8 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, qplib_cq); if (!cq) { - dev_err(NULL, "%s: CQ is NULL, CQN not handled", - ROCE_DRV_MODULE_NAME); + ibdev_err(NULL, "%s: CQ is NULL, CQN not handled", + ROCE_DRV_MODULE_NAME); return -EINVAL; } if (cq->ib_cq.comp_handler) { @@ -963,8 +963,8 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev) db_offt, &bnxt_re_cqn_handler, &bnxt_re_srqn_handler); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to enable NQ with rc = 0x%x", rc); + ibdev_err(&rdev->ibdev, + "Failed to enable NQ with rc = 0x%x", rc); goto fail; } num_vec_enabled++; @@ -1039,8 +1039,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) qplib_ctx->srqc_count + 2); rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); if (rc) { - dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x", - i, rc); + ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", + i, rc); goto free_nq; } type = bnxt_qplib_get_ring_type(rdev->chip_ctx); @@ -1052,9 +1052,9 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) rattr.lrid = rdev->msix_entries[i + 1].ring_idx; rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to allocate NQ fw id with rc = 0x%x", - rc); + ibdev_err(&rdev->ibdev, + "Failed to allocate NQ fw id with rc = 0x%x", + rc); bnxt_qplib_free_nq(&rdev->nq[i]); goto free_nq; } @@ -1128,10 +1128,10 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir, return rc; if (resp.queue_cfg_info) { - dev_warn(rdev_to_dev(rdev), - "Asymmetric cos queue configuration detected"); - dev_warn(rdev_to_dev(rdev), - " on device, QoS may not be fully functional\n"); + ibdev_warn(&rdev->ibdev, + "Asymmetric cos queue configuration detected"); + ibdev_warn(&rdev->ibdev, + " on device, QoS may not be fully functional\n"); } qcfgmap = &resp.pri0_cos_queue_id; tmp_map = (u8 *)cid_map; @@ -1184,7 +1184,7 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) return 0; if (!sgid_tbl) { - dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated"); + ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated"); return -EINVAL; } @@ -1261,7 +1261,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) /* Get cosq id for this priority */ rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map); if (rc) { - dev_warn(rdev_to_dev(rdev), "no cos for p_mask %x\n", prio_map); + ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map); return rc; } /* Parse CoS IDs for app priority */ @@ -1270,8 +1270,8 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) /* Config BONO. */ rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq); if (rc) { - dev_warn(rdev_to_dev(rdev), "no tc for cos{%x, %x}\n", - rdev->cosq[0], rdev->cosq[1]); + ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n", + rdev->cosq[0], rdev->cosq[1]); return rc; } @@ -1306,8 +1306,8 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to query HW version, rc = 0x%x", rc); + ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", + rc); return; } rdev->qplib_ctx.hwrm_intf_ver = @@ -1338,8 +1338,8 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); if (rc) - dev_warn(rdev_to_dev(rdev), - "Failed to deinitialize RCFW: %#x", rc); + ibdev_warn(&rdev->ibdev, + "Failed to deinitialize RCFW: %#x", rc); bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); @@ -1350,16 +1350,16 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { rc = bnxt_re_free_msix(rdev); if (rc) - dev_warn(rdev_to_dev(rdev), - "Failed to free MSI-X vectors: %#x", rc); + ibdev_warn(&rdev->ibdev, + "Failed to free MSI-X vectors: %#x", rc); } bnxt_re_destroy_chip_ctx(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { rc = bnxt_re_unregister_netdev(rdev); if (rc) - dev_warn(rdev_to_dev(rdev), - "Failed to unregister with netdev: %#x", rc); + ibdev_warn(&rdev->ibdev, + "Failed to unregister with netdev: %#x", rc); } } @@ -1392,14 +1392,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) rc = bnxt_re_register_netdev(rdev); if (rc) { rtnl_unlock(); - pr_err("Failed to register with netedev: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to register with netedev: %#x\n", rc); return -EINVAL; } set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); rc = bnxt_re_setup_chip_ctx(rdev); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to get chip context\n"); + ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); return -EINVAL; } @@ -1408,7 +1409,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) rc = bnxt_re_request_msix(rdev); if (rc) { - pr_err("Failed to get MSI-X vectors: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to get MSI-X vectors: %#x\n", rc); rc = -EINVAL; goto fail; } @@ -1423,7 +1425,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) &rdev->qplib_ctx, BNXT_RE_MAX_QPC_COUNT); if (rc) { - pr_err("Failed to allocate RCFW Channel: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to allocate RCFW Channel: %#x\n", rc); goto fail; } @@ -1437,7 +1440,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); if (rc) { - pr_err("Failed to allocate CREQ: %#x\n", rc); + ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); goto free_rcfw; } db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX); @@ -1446,7 +1449,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) vid, db_offt, rdev->is_virtfn, &bnxt_re_aeq_handler); if (rc) { - pr_err("Failed to enable RCFW channel: %#x\n", rc); + ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n", + rc); goto free_ring; } @@ -1460,21 +1464,24 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0, bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)); if (rc) { - pr_err("Failed to allocate QPLIB context: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to allocate QPLIB context: %#x\n", rc); goto disable_rcfw; } rc = bnxt_re_net_stats_ctx_alloc(rdev, rdev->qplib_ctx.stats.dma_map, &rdev->qplib_ctx.stats.fw_id); if (rc) { - pr_err("Failed to allocate stats context: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to allocate stats context: %#x\n", rc); goto free_ctx; } rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, rdev->is_virtfn); if (rc) { - pr_err("Failed to initialize RCFW: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to initialize RCFW: %#x\n", rc); goto free_sctx; } set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags); @@ -1482,13 +1489,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) /* Resources based on the 'new' device caps */ rc = bnxt_re_alloc_res(rdev); if (rc) { - pr_err("Failed to allocate resources: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to allocate resources: %#x\n", rc); goto fail; } set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags); rc = bnxt_re_init_res(rdev); if (rc) { - pr_err("Failed to initialize resources: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to initialize resources: %#x\n", rc); goto fail; } @@ -1497,7 +1506,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) if (!rdev->is_virtfn) { rc = bnxt_re_setup_qos(rdev); if (rc) - pr_info("RoCE priority not yet configured\n"); + ibdev_info(&rdev->ibdev, + "RoCE priority not yet configured\n"); INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker); set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags); @@ -1510,11 +1520,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) /* Register ib dev */ rc = bnxt_re_register_ib(rdev); if (rc) { - pr_err("Failed to register with IB: %#x\n", rc); + ibdev_err(&rdev->ibdev, + "Failed to register with IB: %#x\n", rc); goto fail; } set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); - dev_info(rdev_to_dev(rdev), "Device registered successfully"); + ibdev_info(&rdev->ibdev, "Device registered successfully"); ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, &rdev->active_width); set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); @@ -1563,7 +1574,8 @@ static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev) en_dev = bnxt_re_dev_probe(netdev); if (IS_ERR(en_dev)) { if (en_dev != ERR_PTR(-ENODEV)) - pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME); + ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n", + ROCE_DRV_MODULE_NAME); rc = PTR_ERR(en_dev); goto exit; } @@ -1600,8 +1612,8 @@ static void bnxt_re_task(struct work_struct *work) case NETDEV_REGISTER: rc = bnxt_re_ib_reg(rdev); if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to register with IB: %#x", rc); + ibdev_err(&rdev->ibdev, + "Failed to register with IB: %#x", rc); bnxt_re_remove_one(rdev); bnxt_re_dev_unreg(rdev); goto exit; @@ -1678,8 +1690,9 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, if (rc == -ENODEV) break; if (rc) { - pr_err("Failed to register with the device %s: %#x\n", - real_dev->name, rc); + ibdev_err(&rdev->ibdev, + "Failed to register with the device %s: %#x\n", + real_dev->name, rc); break; } bnxt_re_init_one(rdev); @@ -1764,7 +1777,7 @@ static void __exit bnxt_re_mod_exit(void) * cleanup is done before PF cleanup */ list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { - dev_info(rdev_to_dev(rdev), "Unregistering Device"); + ibdev_info(&rdev->ibdev, "Unregistering Device"); /* * Flush out any scheduled tasks before destroying the * resources -- cgit v1.2.3-58-ga151 From 65a166201552113f9e1e8d1bb4a55a1eb70cb19c Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Wed, 26 Feb 2020 10:17:24 -0400 Subject: RDMA/bnxt_re: Using vmalloc requires including vmalloc.h Add it Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation") Signed-off-by: Stephen Rothwell Reviewed-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_res.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 4346b95963cf..fc5909c7f2e0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "roce_hsi.h" #include "qplib_res.h" #include "qplib_sp.h" -- cgit v1.2.3-58-ga151 From 25baba217cdfc1dfc75a9f427f4551b9a90e372b Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Tue, 18 Feb 2020 11:59:11 +0200 Subject: RDMA/siw: Fix setting active_{speed, width} attributes Make sure to set the active_{speed, width} attributes to avoid reporting the same values regardless of the underlying device. Fixes: 303ae1cdfdf7 ("rdma/siw: application interface") Link: https://lore.kernel.org/r/20200218095911.26614-1-kamalheib1@gmail.com Signed-off-by: Kamal Heib Tested-by: Bernard Metzler Reviewed-by: Bernard Metzler Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/siw/siw_verbs.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 73485d0da907..d5390d498c61 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -165,11 +165,12 @@ int siw_query_port(struct ib_device *base_dev, u8 port, struct ib_port_attr *attr) { struct siw_device *sdev = to_siw_dev(base_dev); + int rv; memset(attr, 0, sizeof(*attr)); - attr->active_speed = 2; - attr->active_width = 2; + rv = ib_get_eth_speed(base_dev, port, &attr->active_speed, + &attr->active_width); attr->gid_tbl_len = 1; attr->max_msg_sz = -1; attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); @@ -192,7 +193,7 @@ int siw_query_port(struct ib_device *base_dev, u8 port, * attr->subnet_timeout = 0; * attr->init_type_repy = 0; */ - return 0; + return rv; } int siw_get_port_immutable(struct ib_device *base_dev, u8 port, -- cgit v1.2.3-58-ga151 From 7c11910783a1ea17e88777552ef146cace607b3c Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 18 Feb 2020 15:45:38 -0400 Subject: RDMA/ucma: Put a lock around every call to the rdma_cm layer The rdma_cm must be used single threaded. This appears to be a bug in the design, as it does have lots of locking that seems like it should allow concurrency. However, when it is all said and done every single place that uses the cma_exch() scheme is broken, and all the unlocked reads from the ucma of the cm_id data are wrong too. syzkaller has been finding endless bugs related to this. Fixing this in any elegant way is some enormous amount of work. Take a very big hammer and put a mutex around everything to do with the ucma_context at the top of every syscall. Fixes: 75216638572f ("RDMA/cma: Export rdma cm interface to userspace") Link: https://lore.kernel.org/r/20200218210432.GA31966@ziepe.ca Reported-by: syzbot+adb15cf8c2798e4e0db4@syzkaller.appspotmail.com Reported-by: syzbot+e5579222b6a3edd96522@syzkaller.appspotmail.com Reported-by: syzbot+4b628fcc748474003457@syzkaller.appspotmail.com Reported-by: syzbot+29ee8f76017ce6cf03da@syzkaller.appspotmail.com Reported-by: syzbot+6956235342b7317ec564@syzkaller.appspotmail.com Reported-by: syzbot+b358909d8d01556b790b@syzkaller.appspotmail.com Reported-by: syzbot+6b46b135602a3f3ac99e@syzkaller.appspotmail.com Reported-by: syzbot+8458d13b13562abf6b77@syzkaller.appspotmail.com Reported-by: syzbot+bd034f3fdc0402e942ed@syzkaller.appspotmail.com Reported-by: syzbot+c92378b32760a4eef756@syzkaller.appspotmail.com Reported-by: syzbot+68b44a1597636e0b342c@syzkaller.appspotmail.com Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/ucma.c | 49 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 66ad29c672fc..16b6cf57fa85 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -91,6 +91,7 @@ struct ucma_context { struct ucma_file *file; struct rdma_cm_id *cm_id; + struct mutex mutex; u64 uid; struct list_head list; @@ -216,6 +217,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; + mutex_init(&ctx->mutex); if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL)) goto error; @@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) } events_reported = ctx->events_reported; + mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; } @@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); + ucma_put_ctx(ctx); return ret; } @@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? @@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: + mutex_unlock(&ctx->mutex); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; @@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); @@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file, ret = -ENOSYS; break; } + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; @@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); + mutex_lock(&ctx->mutex); ret = rdma_connect(ctx->cm_id, &conn_param); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? cmd.backlog : max_backlog; + mutex_lock(&ctx->mutex); ret = rdma_listen(ctx->cm_id, ctx->backlog); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); + mutex_unlock(&ctx->mutex); if (!ret) ctx->uid = cmd.uid; mutex_unlock(&file->mut); - } else + } else { + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, NULL, NULL); - + mutex_unlock(&ctx->mutex); + } ucma_put_ctx(ctx); return ret; } @@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_disconnect(ctx->cm_id); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; + mutex_lock(&ctx->mutex); ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); + mutex_unlock(&ctx->mutex); if (ret) goto out; @@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx, struct sa_path_rec opa; sa_convert_path_ib_to_opa(&opa, &sa_path); + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &opa); + mutex_unlock(&ctx->mutex); } else { + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &sa_path); + mutex_unlock(&ctx->mutex); } if (ret) return ret; @@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level, switch (level) { case RDMA_OPTION_ID: + mutex_lock(&ctx->mutex); ret = ucma_set_option_id(ctx, optname, optval, optlen); + mutex_unlock(&ctx->mutex); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); @@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; @@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); + mutex_unlock(&ctx->mutex); if (ret) goto err2; @@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, goto out; } + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); + mutex_unlock(&mc->ctx->mutex); + mutex_lock(&mc->ctx->file->mut); ucma_cleanup_mc_events(mc); list_del(&mc->list); -- cgit v1.2.3-58-ga151 From 968707207141aba5b32871b300dcc601da3afe5c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 20 Feb 2020 09:12:38 +0200 Subject: RDMA/ipoib: Don't set constant driver version There is no need to set driver version in in-tree kernel code. Link: https://lore.kernel.org/r/20200220071239.231800-2-leon@kernel.org Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/ulp/ipoib/ipoib.h | 2 -- drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | 3 --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 4 ---- 3 files changed, 9 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 2aa3457a30ce..e188a95984b5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -838,6 +838,4 @@ extern int ipoib_debug_level; #define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff) -extern const char ipoib_driver_version[]; - #endif /* _IPOIB_H */ diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 63e4f9d15fd9..a47097d4577c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -68,9 +68,6 @@ static void ipoib_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent), sizeof(drvinfo->bus_info)); - strlcpy(drvinfo->version, ipoib_driver_version, - sizeof(drvinfo->version)); - strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver)); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4a0d3a9e72e1..81b8227214f1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -52,10 +52,6 @@ #include #include -#define DRV_VERSION "1.0.0" - -const char ipoib_driver_version[] = DRV_VERSION; - MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); MODULE_LICENSE("Dual BSD/GPL"); -- cgit v1.2.3-58-ga151 From 699d9e7542825464acb4e08212bca1b6d5a82593 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 20 Feb 2020 09:12:39 +0200 Subject: RDMA/opa_vnic: Delete driver version The default version provided by "ethtool -i" it the correct way to identify Driver version. There is no need to overwrite it. Link: https://lore.kernel.org/r/20200220071239.231800-3-leon@kernel.org Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c | 2 -- drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h | 1 - drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c | 5 ----- 3 files changed, 8 deletions(-) diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c index 8ad7da989a0e..42d557dff19d 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c @@ -125,8 +125,6 @@ static void vnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, opa_vnic_driver_version, - sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent), sizeof(drvinfo->bus_info)); } diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h index 6dbc08e1a6a6..dd942dd642bd 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h @@ -292,7 +292,6 @@ struct opa_vnic_mac_tbl_node { hlist_for_each_entry(obj, &name[bkt], member) extern char opa_vnic_driver_name[]; -extern const char opa_vnic_driver_version[]; struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, u8 port_num, u8 vport_num); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index be5befd92d16..6e8d650c17c7 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -59,9 +59,7 @@ #include "opa_vnic_internal.h" -#define DRV_VERSION "1.0" char opa_vnic_driver_name[] = "opa_vnic"; -const char opa_vnic_driver_version[] = DRV_VERSION; /* * The trap service level is kept in bits 3 to 7 in the trap_sl_rsvd @@ -1041,9 +1039,6 @@ static int __init opa_vnic_init(void) { int rc; - pr_info("OPA Virtual Network Driver - v%s\n", - opa_vnic_driver_version); - rc = ib_register_client(&opa_vnic_client); if (rc) pr_err("VNIC driver register failed %d\n", rc); -- cgit v1.2.3-58-ga151 From dfaf2854b02e5c45bf806c7a1d8b4706b87ddc8e Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Sat, 22 Feb 2020 17:17:19 +0800 Subject: RDMA/hns: Treat revision HIP08_A as a special case Set revisions that equal to or higher than HIP08_B as default to maintain backward compatibility. Link: https://lore.kernel.org/r/1582363039-10714-1-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index dee1cc8ffb42..593bf8d986db 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1680,7 +1680,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR; caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE; - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) { caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW | HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR | HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL; @@ -1928,7 +1928,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->srqc_bt_num, &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC); - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) { caps->sccc_hop_num = ctx_hop_num; caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; @@ -1988,7 +1988,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) return ret; } - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) { ret = hns_roce_query_pf_timer_resource(hr_dev); if (ret) { dev_err(hr_dev->dev, @@ -1996,16 +1996,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ret); return ret; } - } - - ret = hns_roce_alloc_vf_resource(hr_dev); - if (ret) { - dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n", - ret); - return ret; - } - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { ret = hns_roce_set_vf_switch_param(hr_dev, 0); if (ret) { dev_err(hr_dev->dev, @@ -2015,6 +2006,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) } } + ret = hns_roce_alloc_vf_resource(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n", + ret); + return ret; + } + hr_dev->vendor_part_id = hr_dev->pci_dev->device; hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); @@ -2287,7 +2285,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) hns_roce_function_clear(hr_dev); hns_roce_free_link_table(hr_dev, &priv->tpq); @@ -4472,7 +4470,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, V2_QPC_BYTE_24_HOP_LIMIT_S, 0); - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B && is_udp) + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B && is_udp) roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2); else -- cgit v1.2.3-58-ga151 From 0fc99566f6eeb8c1d7e1795710b95c03a987124d Mon Sep 17 00:00:00 2001 From: Yixian Liu Date: Sat, 22 Feb 2020 18:25:57 +0800 Subject: RDMA/hns: Use flush framework for the case in aeq As now we already have flush framework, using it instead of current flush process for qp error in asynchronized interrupt (aeq). Link: https://lore.kernel.org/r/1582367158-27030-2-git-send-email-liuyixian@huawei.com Signed-off-by: Yixian Liu Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 36 ------------------------------ drivers/infiniband/hw/hns/hns_roce_qp.c | 9 ++++++++ 2 files changed, 9 insertions(+), 36 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 593bf8d986db..2b372ee55fb5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5184,39 +5184,6 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) return ret; } -static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn) -{ - struct hns_roce_qp *hr_qp; - struct ib_qp_attr attr; - int attr_mask; - int ret; - - hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); - if (!hr_qp) { - dev_warn(hr_dev->dev, "no hr_qp can be found!\n"); - return; - } - - if (hr_qp->ibqp.uobject) { - if (hr_qp->sdb_en == 1) { - hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); - if (hr_qp->rdb_en == 1) - hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); - } else { - dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n"); - return; - } - } - - attr_mask = IB_QP_STATE; - attr.qp_state = IB_QPS_ERR; - ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask, - hr_qp->state, IB_QPS_ERR); - if (ret) - dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n", - qpn); -} - static void hns_roce_irq_work_handle(struct work_struct *work) { struct hns_roce_work *irq_work = @@ -5240,17 +5207,14 @@ static void hns_roce_irq_work_handle(struct work_struct *work) case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n", qpn, irq_work->sub_type); - hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: dev_err(dev, "Invalid request local work queue 0x%x error.\n", qpn); - hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n", qpn, irq_work->sub_type); - hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: dev_warn(dev, "SRQ limit reach.\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index c52e1b00f30d..6c3f0f737963 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -98,6 +98,15 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) return; } + if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && + (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) { + qp->state = IB_QPS_ERR; + if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) + init_flush_work(hr_dev, qp); + } + qp->event(qp, (enum hns_roce_event)event_type); if (atomic_dec_and_test(&qp->refcount)) -- cgit v1.2.3-58-ga151 From 75c994e6943c2aa887871eb6e6cea6c073b1cfd3 Mon Sep 17 00:00:00 2001 From: Yixian Liu Date: Sat, 22 Feb 2020 18:25:58 +0800 Subject: RDMA/hns: Stop doorbell update while qp state error There are two paths to update qp producer index into hardware now, one path is doorbell in post verbs (send and recv), the another is mailbox in modify qp verb which is called by flush process. This will lead the hardware to be broken to correctly generate flush cqe. With stopping doorbell update and holding qp spinlock in modify qp during flush process, the problem can be solved. Fixes: 0425e3e6e0c7 ("RDMA/hns: Support flush cqe for hip08 in kernel space") Link: https://lore.kernel.org/r/1582367158-27030-3-git-send-email-liuyixian@huawei.com Signed-off-by: Yixian Liu Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 76 ++++++++++++++++-------------- 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 2b372ee55fb5..b19dedeba066 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -244,6 +244,38 @@ static int check_send_valid(struct hns_roce_dev *hr_dev, return 0; } +static inline void update_sq_db(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *qp) +{ + /* + * Hip08 hardware cannot flush the WQEs in SQ if the QP state + * gets into errored mode. Hence, as a workaround to this + * hardware limitation, driver needs to assist in flushing. But + * the flushing operation uses mailbox to convey the QP state to + * the hardware and which can sleep due to the mutex protection + * around the mailbox calls. Hence, use the deferred flush for + * now. + */ + if (qp->state == IB_QPS_ERR) { + if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) + init_flush_work(hr_dev, qp); + } else { + struct hns_roce_v2_db sq_db = {}; + + roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M, + V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn); + roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M, + V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB); + roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M, + V2_DB_PARAMETER_IDX_S, + qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)); + roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M, + V2_DB_PARAMETER_SL_S, qp->sl); + + hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l); + } +} + static int hns_roce_v2_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) @@ -255,7 +287,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct hns_roce_qp *qp = to_hr_qp(ibqp); struct hns_roce_wqe_frmr_seg *fseg; struct device *dev = hr_dev->dev; - struct hns_roce_v2_db sq_db = {}; unsigned int owner_bit; unsigned int sge_idx; unsigned int wqe_idx; @@ -580,36 +611,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, out: if (likely(nreq)) { qp->sq.head += nreq; + qp->next_sge = sge_idx; /* Memory barrier */ wmb(); - - roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M, - V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn); - roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M, - V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB); - roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M, - V2_DB_PARAMETER_IDX_S, - qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)); - roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M, - V2_DB_PARAMETER_SL_S, qp->sl); - - hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l); - - qp->next_sge = sge_idx; - - /* - * Hip08 hardware cannot flush the WQEs in SQ if the QP state - * gets into errored mode. Hence, as a workaround to this - * hardware limitation, driver needs to assist in flushing. But - * the flushing operation uses mailbox to convey the QP state to - * the hardware and which can sleep due to the mutex protection - * around the mailbox calls. Hence, use the deferred flush for - * now. - */ - if (qp->state == IB_QPS_ERR) - if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, - &qp->flush_flag)) - init_flush_work(hr_dev, qp); + update_sq_db(hr_dev, qp); } spin_unlock_irqrestore(&qp->sq.lock, flags); @@ -706,8 +711,6 @@ out: /* Memory barrier */ wmb(); - *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff; - /* * Hip08 hardware cannot flush the WQEs in RQ if the QP state * gets into errored mode. Hence, as a workaround to this @@ -717,10 +720,13 @@ out: * around the mailbox calls. Hence, use the deferred flush for * now. */ - if (hr_qp->state == IB_QPS_ERR) + if (hr_qp->state == IB_QPS_ERR) { if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) init_flush_work(hr_dev, hr_qp); + } else { + *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff; + } } spin_unlock_irqrestore(&hr_qp->rq.lock, flags); @@ -4750,7 +4756,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, /* When QP state is err, SQ and RQ WQE should be flushed */ if (new_state == IB_QPS_ERR) { spin_lock_irqsave(&hr_qp->sq.lock, sq_flag); - spin_lock_irqsave(&hr_qp->rq.lock, rq_flag); hr_qp->state = IB_QPS_ERR; roce_set_field(context->byte_160_sq_ci_pi, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, @@ -4759,8 +4764,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_160_sq_ci_pi, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); + spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag); if (!ibqp->srq) { + spin_lock_irqsave(&hr_qp->rq.lock, rq_flag); roce_set_field(context->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, @@ -4768,9 +4775,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); + spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag); } - spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag); - spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag); } /* Configure the optional fields */ -- cgit v1.2.3-58-ga151 From e365b26c6b66fe2bc800399912f7cbc875edf693 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 24 Feb 2020 14:37:32 +0800 Subject: RDMA/hns: Optimize qp destroy flow Wrap the duplicate code in hip08 and hip06 qp destruction process as hns_roce_qp_destroy() to simply the qp destroy flow. Link: https://lore.kernel.org/r/1582526258-13825-2-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 5 ++-- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 19 ++----------- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 39 +-------------------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 41 +++++++++++++++++++++++++++++ 4 files changed, 46 insertions(+), 58 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index d0a83926dc8f..f7335c9ff9d3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1250,9 +1250,8 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); -void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); -void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, - int cnt); +void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_udata *udata); __be32 send_ieth(const struct ib_send_wr *wr); int to_hr_qp_type(int qp_type); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 89dac44b3cef..c05a905c518e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -3618,26 +3618,11 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) if (send_cq && send_cq != recv_cq) __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); } - hns_roce_unlock_cqs(send_cq, recv_cq); - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_qp_free(hr_dev, hr_qp); - - /* RC QP, release QPN */ - if (hr_qp->ibqp.qp_type == IB_QPT_RC) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); - - hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); - - ib_umem_release(hr_qp->umem); - if (!udata) { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); + hns_roce_unlock_cqs(send_cq, recv_cq); - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); - } + hns_roce_qp_destroy(hr_dev, hr_qp, udata); - kfree(hr_qp); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index b19dedeba066..96ccef0a3097 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5040,43 +5040,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, hns_roce_unlock_cqs(send_cq, recv_cq); spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); - hns_roce_qp_free(hr_dev, hr_qp); - - /* Not special_QP, free their QPN */ - if ((hr_qp->ibqp.qp_type == IB_QPT_RC) || - (hr_qp->ibqp.qp_type == IB_QPT_UC) || - (hr_qp->ibqp.qp_type == IB_QPT_UD)) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); - - hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); - - if (udata) { - struct hns_roce_ucontext *context = - rdma_udata_to_drv_context( - udata, - struct hns_roce_ucontext, - ibucontext); - - if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) - hns_roce_db_unmap_user(context, &hr_qp->sdb); - - if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) - hns_roce_db_unmap_user(context, &hr_qp->rdb); - } else { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); - if (hr_qp->rq.wqe_cnt) - hns_roce_free_db(hr_dev, &hr_qp->rdb); - } - ib_umem_release(hr_qp->umem); - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hr_qp->rq.wqe_cnt) { - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); - kfree(hr_qp->rq_inl_buf.wqe_list); - } - return ret; } @@ -5091,7 +5054,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", hr_qp->qpn, ret); - kfree(hr_qp); + hns_roce_qp_destroy(hr_dev, hr_qp, udata); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 6c3f0f737963..da25b1d7b5d7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1087,6 +1087,47 @@ err_out: return ret; } +void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_udata *udata) +{ + hns_roce_qp_free(hr_dev, hr_qp); + + /* Not special_QP, free their QPN */ + if (hr_qp->ibqp.qp_type != IB_QPT_GSI) + hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + + hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); + + if (udata) { + struct hns_roce_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct hns_roce_ucontext, + ibucontext); + + if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) + hns_roce_db_unmap_user(context, &hr_qp->sdb); + + if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) + hns_roce_db_unmap_user(context, &hr_qp->rdb); + } else { + kfree(hr_qp->sq.wrid); + kfree(hr_qp->rq.wrid); + hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); + if (hr_qp->rq.wqe_cnt) + hns_roce_free_db(hr_dev, &hr_qp->rdb); + } + ib_umem_release(hr_qp->umem); + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hr_qp->rq.wqe_cnt) { + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); + kfree(hr_qp->rq_inl_buf.wqe_list); + } + + kfree(hr_qp); +} + struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) -- cgit v1.2.3-58-ga151 From b71961d1daa040c4979760e50606f34e24642764 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 24 Feb 2020 14:37:33 +0800 Subject: RDMA/hns: Optimize qp context create and destroy flow Rename the qp context related functions and adjusts the code location to distinguish between the qp context and the entire qp. Link: https://lore.kernel.org/r/1582526258-13825-3-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 4 - drivers/infiniband/hw/hns/hns_roce_qp.c | 166 ++++++++++++++--------------- 2 files changed, 81 insertions(+), 89 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 96ccef0a3097..82021fa46d9d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5019,10 +5019,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, spin_lock_irqsave(&hr_dev->qp_list_lock, flags); hns_roce_lock_cqs(send_cq, recv_cq); - list_del(&hr_qp->node); - list_del(&hr_qp->sq_node); - list_del(&hr_qp->rq_node); - if (!udata) { if (recv_cq) __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index da25b1d7b5d7..396356a11b1b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -187,50 +187,75 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) } } -static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, - struct hns_roce_qp *hr_qp) +static void add_qp_to_list(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_cq *send_cq, struct ib_cq *recv_cq) +{ + struct hns_roce_cq *hr_send_cq, *hr_recv_cq; + unsigned long flags; + + hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; + hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; + + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); + hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); + + list_add_tail(&hr_qp->node, &hr_dev->qp_list); + if (hr_send_cq) + list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); + if (hr_recv_cq) + list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); + + hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); +} + +static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) { struct xarray *xa = &hr_dev->qp_table_xa; int ret; - if (!qpn) + if (!hr_qp->qpn) return -EINVAL; - hr_qp->qpn = qpn; - atomic_set(&hr_qp->refcount, 1); - init_completion(&hr_qp->free); - - ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1), - hr_qp, GFP_KERNEL)); + ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); if (ret) - dev_err(hr_dev->dev, "QPC xa_store failed\n"); + dev_err(hr_dev->dev, "Failed to xa store for QPC\n"); + else + /* add QP to device's QP list for softwc */ + add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, + init_attr->recv_cq); return ret; } -static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, - struct hns_roce_qp *hr_qp) +static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct device *dev = hr_dev->dev; int ret; - if (!qpn) + if (!hr_qp->qpn) return -EINVAL; - hr_qp->qpn = qpn; + /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ + if (hr_qp->ibqp.qp_type == IB_QPT_GSI && + hr_dev->hw_rev == HNS_ROCE_HW_VER1) + return 0; /* Alloc memory for QPC */ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); if (ret) { - dev_err(dev, "QPC table get failed\n"); + dev_err(dev, "Failed to get QPC table\n"); goto err_out; } /* Alloc memory for IRRL */ ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); if (ret) { - dev_err(dev, "IRRL table get failed\n"); + dev_err(dev, "Failed to get IRRL table\n"); goto err_put_qp; } @@ -239,7 +264,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, hr_qp->qpn); if (ret) { - dev_err(dev, "TRRL table get failed\n"); + dev_err(dev, "Failed to get TRRL table\n"); goto err_put_irrl; } } @@ -249,22 +274,13 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, hr_qp->qpn); if (ret) { - dev_err(dev, "SCC CTX table get failed\n"); + dev_err(dev, "Failed to get SCC CTX table\n"); goto err_put_trrl; } } - ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); - if (ret) - goto err_put_sccc; - return 0; -err_put_sccc: - if (hr_dev->caps.sccc_entry_sz) - hns_roce_table_put(hr_dev, &qp_table->sccc_table, - hr_qp->qpn); - err_put_trrl: if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); @@ -284,25 +300,27 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) struct xarray *xa = &hr_dev->qp_table_xa; unsigned long flags; + list_del(&hr_qp->node); + list_del(&hr_qp->sq_node); + list_del(&hr_qp->rq_node); + xa_lock_irqsave(xa, flags); __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1)); xa_unlock_irqrestore(xa, flags); } -void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - if (atomic_dec_and_test(&hr_qp->refcount)) - complete(&hr_qp->free); - wait_for_completion(&hr_qp->free); + /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ + if (hr_qp->ibqp.qp_type == IB_QPT_GSI && + hr_dev->hw_rev == HNS_ROCE_HW_VER1) + return; - if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { - if (hr_dev->caps.trrl_entry_sz) - hns_roce_table_put(hr_dev, &qp_table->trrl_table, - hr_qp->qpn); - hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); - } + if (hr_dev->caps.trrl_entry_sz) + hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); + hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); } void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, @@ -728,29 +746,6 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) kfree(hr_qp->rq_inl_buf.wqe_list); } -static void add_qp_to_list(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - struct ib_cq *send_cq, struct ib_cq *recv_cq) -{ - struct hns_roce_cq *hr_send_cq, *hr_recv_cq; - unsigned long flags; - - hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; - hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; - - spin_lock_irqsave(&hr_dev->qp_list_lock, flags); - hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); - - list_add_tail(&hr_qp->node, &hr_dev->qp_list); - if (hr_send_cq) - list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); - if (hr_recv_cq) - list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); - - hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); - spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); -} - static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, @@ -975,6 +970,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } } + hr_qp->qpn = qpn; + hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, hr_qp->region_cnt); hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, @@ -986,20 +983,16 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_mtr; } - if (init_attr->qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) { - /* In v1 engine, GSI QP context in RoCE engine's register */ - ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); - if (ret) { - dev_err(dev, "hns_roce_qp_alloc failed!\n"); - goto err_qpn; - } - } else { - ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp); - if (ret) { - dev_err(dev, "hns_roce_qp_alloc failed!\n"); - goto err_qpn; - } + ret = alloc_qpc(hr_dev, hr_qp); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "Failed to alloc QP context\n"); + goto err_qpn; + } + + ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "Failed to store QP\n"); + goto err_qpc; } if (sqpn) @@ -1011,29 +1004,28 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (ret) - goto err_qp; + goto err_store; } if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); if (ret) - goto err_qp; + goto err_store; } hr_qp->event = hns_roce_ib_qp_event; - - add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, init_attr->recv_cq); + atomic_set(&hr_qp->refcount, 1); + init_completion(&hr_qp->free); hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); return 0; -err_qp: - if (init_attr->qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) - hns_roce_qp_remove(hr_dev, hr_qp); - else - hns_roce_qp_free(hr_dev, hr_qp); +err_store: + hns_roce_qp_remove(hr_dev, hr_qp); + +err_qpc: + free_qpc(hr_dev, hr_qp); err_qpn: if (!sqpn) @@ -1090,7 +1082,11 @@ err_out: void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata) { - hns_roce_qp_free(hr_dev, hr_qp); + if (atomic_dec_and_test(&hr_qp->refcount)) + complete(&hr_qp->free); + wait_for_completion(&hr_qp->free); + + free_qpc(hr_dev, hr_qp); /* Not special_QP, free their QPN */ if (hr_qp->ibqp.qp_type != IB_QPT_GSI) -- cgit v1.2.3-58-ga151 From df83a66e1b2e1194cd6cd744f09f06448e0b8b3c Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 24 Feb 2020 14:37:34 +0800 Subject: RDMA/hns: Optimize qp number assign flow Encapsulate the code associated with the qp number assignment into alloc_qpn() and free_qpn(). Link: https://lore.kernel.org/r/1582526258-13825-4-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_qp.c | 91 ++++++++++++++++----------------- 1 file changed, 44 insertions(+), 47 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 396356a11b1b..8ec0ea96badf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -156,15 +156,34 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, } } -static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, - int align, unsigned long *base) +static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { - struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; + unsigned long num = 0; + int ret; + + if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { + /* when hw version is v1, the sqpn is allocated */ + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) + num = HNS_ROCE_MAX_PORTS + + hr_dev->iboe.phy_port[hr_qp->port]; + else + num = 1; + + hr_qp->doorbell_qpn = 1; + } else { + ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap, + 1, 1, &num); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n"); + return -ENOMEM; + } + + hr_qp->doorbell_qpn = (u32)num; + } + + hr_qp->qpn = num; - return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, - base) ? - -ENOMEM : - 0; + return 0; } enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) @@ -323,15 +342,17 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); } -void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, - int cnt) +static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - if (base_qpn < hr_dev->caps.reserved_qps) + if (hr_qp->ibqp.qp_type == IB_QPT_GSI) + return; + + if (hr_qp->qpn < hr_dev->caps.reserved_qps) return; - hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR); + hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR); } static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, @@ -749,7 +770,7 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, - struct ib_udata *udata, unsigned long sqpn, + struct ib_udata *udata, struct hns_roce_qp *hr_qp) { dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; @@ -759,7 +780,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( udata, struct hns_roce_ucontext, ibucontext); struct hns_roce_buf_region *r; - unsigned long qpn = 0; u32 page_shift; int buf_count; int ret; @@ -959,19 +979,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } } - if (sqpn) { - qpn = sqpn; - } else { - /* Get QPN */ - ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn); - if (ret) { - dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n"); - goto err_wrid; - } - } - - hr_qp->qpn = qpn; - hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, hr_qp->region_cnt); hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, @@ -980,6 +987,12 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->regions, hr_qp->region_cnt); if (ret) { dev_err(dev, "mtr attach error for create qp\n"); + goto err_wrid; + } + + ret = alloc_qpn(hr_dev, hr_qp); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "Failed to alloc QPN\n"); goto err_mtr; } @@ -995,11 +1008,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_qpc; } - if (sqpn) - hr_qp->doorbell_qpn = 1; - else - hr_qp->doorbell_qpn = (u32)hr_qp->qpn; - if (udata) { ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); @@ -1013,6 +1021,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_store; } + hr_qp->ibqp.qp_num = hr_qp->qpn; hr_qp->event = hns_roce_ib_qp_event; atomic_set(&hr_qp->refcount, 1); init_completion(&hr_qp->free); @@ -1028,8 +1037,7 @@ err_qpc: free_qpc(hr_dev, hr_qp); err_qpn: - if (!sqpn) - hns_roce_release_range_qp(hr_dev, qpn, 1); + free_qpn(hr_dev, hr_qp); err_mtr: hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); @@ -1088,9 +1096,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, free_qpc(hr_dev, hr_qp); - /* Not special_QP, free their QPN */ - if (hr_qp->ibqp.qp_type != IB_QPT_GSI) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + free_qpn(hr_dev, hr_qp); hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); @@ -1139,7 +1145,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, if (!hr_qp) return ERR_PTR(-ENOMEM); - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, + ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); if (ret) { ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n", @@ -1148,8 +1154,6 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, return ERR_PTR(ret); } - hr_qp->ibqp.qp_num = hr_qp->qpn; - break; } case IB_QPT_GSI: { @@ -1166,15 +1170,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, hr_qp->port = init_attr->port_num - 1; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; - /* when hw version is v1, the sqpn is allocated */ - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + - hr_dev->iboe.phy_port[hr_qp->port]; - else - hr_qp->ibqp.qp_num = 1; - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, - hr_qp->ibqp.qp_num, hr_qp); + hr_qp); if (ret) { ibdev_err(ibdev, "Create GSI QP failed!\n"); kfree(hr_qp); -- cgit v1.2.3-58-ga151 From 24c22112b9c2084a362c6fe7d7ddb2bbef5b4a2e Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 24 Feb 2020 14:37:35 +0800 Subject: RDMA/hns: Optimize qp buffer allocation flow Encapsulate qp buffer allocation related code into 3 functions: alloc_qp_buf(), map_wqe_buf() and free_qp_buf(). Link: https://lore.kernel.org/r/1582526258-13825-5-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 - drivers/infiniband/hw/hns/hns_roce_qp.c | 266 +++++++++++++++------------- 2 files changed, 144 insertions(+), 123 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index f7335c9ff9d3..d7dcf6ebc526 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -673,7 +673,6 @@ struct hns_roce_qp { /* this define must less than HNS_ROCE_MAX_BT_REGION */ #define HNS_ROCE_WQE_REGION_MAX 3 struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX]; - int region_cnt; int wqe_bt_pg_shift; u32 buff_size; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 8ec0ea96badf..fea77f00340b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -767,23 +767,147 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) kfree(hr_qp->rq_inl_buf.wqe_list); } +static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + u32 page_shift, bool is_user) +{ + dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_buf_region *r; + int region_count; + int buf_count; + int ret; + int i; + + region_count = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->regions, + ARRAY_SIZE(hr_qp->regions), page_shift); + + /* alloc a tmp list to store WQE buffers address */ + ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, region_count); + if (ret) { + ibdev_err(ibdev, "Failed to alloc WQE buffer list\n"); + return ret; + } + + for (i = 0; i < region_count; i++) { + r = &hr_qp->regions[i]; + if (is_user) + buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i], + r->count, r->offset, hr_qp->umem, + page_shift); + else + buf_count = hns_roce_get_kmem_bufs(hr_dev, buf_list[i], + r->count, r->offset, &hr_qp->hr_buf); + + if (buf_count != r->count) { + ibdev_err(ibdev, "Failed to get %s WQE buf, expect %d = %d.\n", + is_user ? "user" : "kernel", + r->count, buf_count); + ret = -ENOBUFS; + goto done; + } + } + + hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, + region_count); + hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, + page_shift); + ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, hr_qp->regions, + region_count); + if (ret) + ibdev_err(ibdev, "Failed to attatch WQE's mtr\n"); + + goto done; + + hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); +done: + hns_roce_free_buf_list(buf_list, region_count); + + return ret; +} + +static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, unsigned long addr) +{ + u32 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; + struct ib_device *ibdev = &hr_dev->ib_dev; + bool is_rq_buf_inline; + int ret; + + is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hns_roce_qp_has_rq(init_attr); + if (is_rq_buf_inline) { + ret = alloc_rq_inline_buf(hr_qp, init_attr); + if (ret) { + ibdev_err(ibdev, "Failed to alloc inline RQ buffer\n"); + return ret; + } + } + + if (udata) { + hr_qp->umem = ib_umem_get(udata, addr, hr_qp->buff_size, 0); + if (IS_ERR(hr_qp->umem)) { + ret = PTR_ERR(hr_qp->umem); + goto err_inline; + } + } else { + ret = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, + (1 << page_shift) * 2, + &hr_qp->hr_buf, page_shift); + if (ret) + goto err_inline; + } + + ret = map_wqe_buf(hr_dev, hr_qp, page_shift, udata); + if (ret) + goto err_alloc; + + return 0; + +err_inline: + if (is_rq_buf_inline) + free_rq_inline_buf(hr_qp); + +err_alloc: + if (udata) { + ib_umem_release(hr_qp->umem); + hr_qp->umem = NULL; + } else { + hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); + } + + ibdev_err(ibdev, "Failed to alloc WQE buffer, ret %d.\n", ret); + + return ret; +} + +static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +{ + hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); + if (hr_qp->umem) { + ib_umem_release(hr_qp->umem); + hr_qp->umem = NULL; + } + + if (hr_qp->hr_buf.nbufs > 0) + hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hr_qp->rq.wqe_cnt) + free_rq_inline_buf(hr_qp); +} static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct hns_roce_qp *hr_qp) { - dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; struct device *dev = hr_dev->dev; struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp_resp resp = {}; struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( udata, struct hns_roce_ucontext, ibucontext); - struct hns_roce_buf_region *r; - u32 page_shift; - int buf_count; int ret; - int i; mutex_init(&hr_qp->mutex); spin_lock_init(&hr_qp->sq.lock); @@ -806,59 +930,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_out; } - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hns_roce_qp_has_rq(init_attr)) { - ret = alloc_rq_inline_buf(hr_qp, init_attr); - if (ret) { - dev_err(dev, "allocate receive inline buffer failed\n"); - goto err_out; - } - } - - page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "ib_copy_from_udata error for create qp\n"); ret = -EFAULT; - goto err_alloc_rq_inline_buf; + goto err_out; } ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, &ucmd); if (ret) { dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); - goto err_alloc_rq_inline_buf; - } - - hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr, - hr_qp->buff_size, 0); - if (IS_ERR(hr_qp->umem)) { - dev_err(dev, "ib_umem_get error for create qp\n"); - ret = PTR_ERR(hr_qp->umem); - goto err_alloc_rq_inline_buf; - } - hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, - hr_qp->regions, ARRAY_SIZE(hr_qp->regions), - page_shift); - ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, - hr_qp->region_cnt); - if (ret) { - dev_err(dev, "alloc buf_list error for create qp\n"); - goto err_alloc_list; - } - - for (i = 0; i < hr_qp->region_cnt; i++) { - r = &hr_qp->regions[i]; - buf_count = hns_roce_get_umem_bufs(hr_dev, - buf_list[i], r->count, r->offset, - hr_qp->umem, page_shift); - if (buf_count != r->count) { - dev_err(dev, - "get umem buf err, expect %d,ret %d.\n", - r->count, buf_count); - ret = -ENOBUFS; - goto err_get_bufs; - } + goto err_out; } if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && @@ -869,7 +952,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, &hr_qp->sdb); if (ret) { dev_err(dev, "sq record doorbell map failed!\n"); - goto err_get_bufs; + goto err_out; } /* indicate kernel supports sq record db */ @@ -896,13 +979,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { dev_err(dev, "init_attr->create_flags error!\n"); ret = -EINVAL; - goto err_alloc_rq_inline_buf; + goto err_out; } if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { dev_err(dev, "init_attr->create_flags error!\n"); ret = -EINVAL; - goto err_alloc_rq_inline_buf; + goto err_out; } /* Set SQ size */ @@ -910,7 +993,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp); if (ret) { dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); - goto err_alloc_rq_inline_buf; + goto err_out; } /* QP doorbell register address */ @@ -924,49 +1007,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); if (ret) { dev_err(dev, "rq record doorbell alloc failed!\n"); - goto err_alloc_rq_inline_buf; + goto err_out; } *hr_qp->rdb.db_record = 0; hr_qp->rdb_en = 1; } - /* Allocate QP buf */ - if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, - (1 << page_shift) * 2, - &hr_qp->hr_buf, page_shift)) { - dev_err(dev, "hns_roce_buf_alloc error!\n"); - ret = -ENOMEM; - goto err_db; - } - hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, - hr_qp->regions, ARRAY_SIZE(hr_qp->regions), - page_shift); - ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, - hr_qp->region_cnt); - if (ret) { - dev_err(dev, "alloc buf_list error for create qp!\n"); - goto err_alloc_list; - } - - for (i = 0; i < hr_qp->region_cnt; i++) { - r = &hr_qp->regions[i]; - buf_count = hns_roce_get_kmem_bufs(hr_dev, - buf_list[i], r->count, r->offset, - &hr_qp->hr_buf); - if (buf_count != r->count) { - dev_err(dev, - "get kmem buf err, expect %d,ret %d.\n", - r->count, buf_count); - ret = -ENOBUFS; - goto err_get_bufs; - } - } - hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) { ret = -ENOMEM; - goto err_get_bufs; + goto err_db; } if (hr_qp->rq.wqe_cnt) { @@ -979,21 +1030,16 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } } - hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, - hr_qp->region_cnt); - hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, - page_shift); - ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, - hr_qp->regions, hr_qp->region_cnt); + ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); if (ret) { - dev_err(dev, "mtr attach error for create qp\n"); - goto err_wrid; + ibdev_err(&hr_dev->ib_dev, "Failed to alloc QP buffer\n"); + goto err_db; } ret = alloc_qpn(hr_dev, hr_qp); if (ret) { ibdev_err(&hr_dev->ib_dev, "Failed to alloc QPN\n"); - goto err_mtr; + goto err_buf; } ret = alloc_qpc(hr_dev, hr_qp); @@ -1026,8 +1072,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, atomic_set(&hr_qp->refcount, 1); init_completion(&hr_qp->free); - hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); - return 0; err_store: @@ -1039,10 +1083,9 @@ err_qpc: err_qpn: free_qpn(hr_dev, hr_qp); -err_mtr: - hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); +err_buf: + free_qp_buf(hr_dev, hr_qp); -err_wrid: if (udata) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp)) && @@ -1065,24 +1108,11 @@ err_sq_wrid: if (!udata) kfree(hr_qp->sq.wrid); -err_get_bufs: - hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); - -err_alloc_list: - if (!hr_qp->umem) - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); - ib_umem_release(hr_qp->umem); - err_db: if (!udata && hns_roce_qp_has_rq(init_attr) && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) hns_roce_free_db(hr_dev, &hr_qp->rdb); -err_alloc_rq_inline_buf: - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hns_roce_qp_has_rq(init_attr)) - free_rq_inline_buf(hr_qp); - err_out: return ret; } @@ -1098,7 +1128,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, free_qpn(hr_dev, hr_qp); - hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); + free_qp_buf(hr_dev, hr_qp); if (udata) { struct hns_roce_ucontext *context = @@ -1115,17 +1145,9 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } else { kfree(hr_qp->sq.wrid); kfree(hr_qp->rq.wrid); - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); if (hr_qp->rq.wqe_cnt) hns_roce_free_db(hr_dev, &hr_qp->rdb); } - ib_umem_release(hr_qp->umem); - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hr_qp->rq.wqe_cnt) { - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); - kfree(hr_qp->rq_inl_buf.wqe_list); - } kfree(hr_qp); } -- cgit v1.2.3-58-ga151 From ae85bf92effc1e4d4f5b3a3a291d2440a7200e25 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 24 Feb 2020 14:37:36 +0800 Subject: RDMA/hns: Optimize qp param setup flow Encapsulate the qp param setup related code into set_qp_param(). Link: https://lore.kernel.org/r/1582526258-13825-6-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_qp.c | 136 +++++++++++++++++--------------- 1 file changed, 72 insertions(+), 64 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index fea77f00340b..51c6318fcc42 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -355,18 +355,18 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR); } -static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, +static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, bool is_user, int has_rq, struct hns_roce_qp *hr_qp) { - struct device *dev = hr_dev->dev; + struct ib_device *ibdev = &hr_dev->ib_dev; u32 max_cnt; /* Check the validity of QP support capacity */ if (cap->max_recv_wr > hr_dev->caps.max_wqes || cap->max_recv_sge > hr_dev->caps.max_rq_sg) { - dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n", - cap->max_recv_wr, cap->max_recv_sge); + ibdev_err(ibdev, "Failed to check max recv WR %d and SGE %d\n", + cap->max_recv_wr, cap->max_recv_sge); return -EINVAL; } @@ -378,7 +378,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, cap->max_recv_sge = 0; } else { if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { - dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n"); + ibdev_err(ibdev, "Failed to check user max recv WR and SGE\n"); return -EINVAL; } @@ -390,7 +390,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { - dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n"); + ibdev_err(ibdev, "Failed to check RQ WQE count limit\n"); return -EINVAL; } @@ -421,12 +421,12 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, /* Sanity check SQ size before proceeding */ if (ucmd->log_sq_stride > max_sq_stride || ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { - ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n"); + ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n"); return -EINVAL; } if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { - ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n", + ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n", cap->max_send_sge); return -EINVAL; } @@ -434,10 +434,9 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, return 0; } -static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, - struct hns_roce_qp *hr_qp, - struct hns_roce_ib_create_qp *ucmd) +static int set_user_sq_size(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, + struct hns_roce_ib_create_qp *ucmd) { u32 ex_sge_num; u32 page_size; @@ -450,7 +449,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); if (ret) { - ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n"); + ibdev_err(&hr_dev->ib_dev, "Failed to check user SQ size limit\n"); return ret; } @@ -469,9 +468,9 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE && hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) { if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { - dev_err(hr_dev->dev, - "The extended sge cnt error! sge_cnt=%d\n", - hr_qp->sge.sge_cnt); + ibdev_err(&hr_dev->ib_dev, + "Failed to check extended SGE size limit %d\n", + hr_qp->sge.sge_cnt); return -EINVAL; } } @@ -635,9 +634,8 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev, return 0; } -static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, - struct hns_roce_qp *hr_qp) +static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) { struct device *dev = hr_dev->dev; u32 page_size; @@ -896,6 +894,58 @@ static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hr_qp->rq.wqe_cnt) free_rq_inline_buf(hr_qp); } + +static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, + struct hns_roce_ib_create_qp *ucmd) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + hr_qp->ibqp.qp_type = init_attr->qp_type; + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; + else + hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; + + ret = set_rq_size(hr_dev, &init_attr->cap, udata, + hns_roce_qp_has_rq(init_attr), hr_qp); + if (ret) { + ibdev_err(ibdev, "Failed to set user RQ size\n"); + return ret; + } + + if (udata) { + if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) { + ibdev_err(ibdev, "Failed to copy QP ucmd\n"); + return -EFAULT; + } + + ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); + if (ret) + ibdev_err(ibdev, "Failed to set user SQ size\n"); + } else { + if (init_attr->create_flags & + IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { + ibdev_err(ibdev, "Failed to check multicast loopback\n"); + return -EINVAL; + } + + if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { + ibdev_err(ibdev, "Failed to check ipoib ud lso\n"); + return -EINVAL; + } + + ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); + if (ret) + ibdev_err(ibdev, "Failed to set kernel SQ size\n"); + } + + return ret; +} + static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, @@ -916,34 +966,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->state = IB_QPS_RESET; hr_qp->flush_flag = 0; - hr_qp->ibqp.qp_type = init_attr->qp_type; - - if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) - hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; - else - hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; - - ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata, - hns_roce_qp_has_rq(init_attr), hr_qp); + ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); if (ret) { - dev_err(dev, "hns_roce_set_rq_size failed\n"); - goto err_out; + ibdev_err(&hr_dev->ib_dev, "Failed to set QP param\n"); + return ret; } if (udata) { - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { - dev_err(dev, "ib_copy_from_udata error for create qp\n"); - ret = -EFAULT; - goto err_out; - } - - ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, - &ucmd); - if (ret) { - dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); - goto err_out; - } - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && (udata->inlen >= sizeof(ucmd)) && (udata->outlen >= sizeof(resp)) && @@ -975,27 +1004,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->rdb_en = 1; } } else { - if (init_attr->create_flags & - IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { - dev_err(dev, "init_attr->create_flags error!\n"); - ret = -EINVAL; - goto err_out; - } - - if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { - dev_err(dev, "init_attr->create_flags error!\n"); - ret = -EINVAL; - goto err_out; - } - - /* Set SQ size */ - ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, - hr_qp); - if (ret) { - dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); - goto err_out; - } - /* QP doorbell register address */ hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + DB_REG_OFFSET * hr_dev->priv_uar.index; -- cgit v1.2.3-58-ga151 From b37c413997100d1ad5627bf38f0ebfeee170f305 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 24 Feb 2020 14:37:37 +0800 Subject: RDMA/hns: Optimize kernel qp wrid allocation flow Encapsulate the kernel qp wrid allocation related code into 2 functions: alloc_kernel_wrid() and free_kernel_wrid(). Link: https://lore.kernel.org/r/1582526258-13825-7-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_qp.c | 72 ++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 27 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 51c6318fcc42..a27c3dad8aae 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -895,6 +895,45 @@ static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) free_rq_inline_buf(hr_qp); } +static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + u64 *sq_wrid = NULL; + u64 *rq_wrid = NULL; + int ret; + + sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(sq_wrid)) { + ibdev_err(ibdev, "Failed to alloc SQ wrid\n"); + return -ENOMEM; + } + + if (hr_qp->rq.wqe_cnt) { + rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(rq_wrid)) { + ibdev_err(ibdev, "Failed to alloc RQ wrid\n"); + ret = -ENOMEM; + goto err_sq; + } + } + + hr_qp->sq.wrid = sq_wrid; + hr_qp->rq.wrid = rq_wrid; + return 0; +err_sq: + kfree(sq_wrid); + + return ret; +} + +static void free_kernel_wrid(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + kfree(hr_qp->rq.wrid); + kfree(hr_qp->sq.wrid); +} + static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, @@ -1021,21 +1060,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->rdb_en = 1; } - hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), - GFP_KERNEL); - if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) { - ret = -ENOMEM; + ret = alloc_kernel_wrid(hr_dev, hr_qp); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "Failed to alloc wrid\n"); goto err_db; } - - if (hr_qp->rq.wqe_cnt) { - hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), - GFP_KERNEL); - if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) { - ret = -ENOMEM; - goto err_sq_wrid; - } - } } ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); @@ -1084,24 +1113,20 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, err_store: hns_roce_qp_remove(hr_dev, hr_qp); - err_qpc: free_qpc(hr_dev, hr_qp); - err_qpn: free_qpn(hr_dev, hr_qp); - err_buf: free_qp_buf(hr_dev, hr_qp); + free_kernel_wrid(hr_dev, hr_qp); + if (udata) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp)) && hns_roce_qp_has_rq(init_attr)) hns_roce_db_unmap_user(uctx, &hr_qp->rdb); - } else { - if (hr_qp->rq.wqe_cnt) - kfree(hr_qp->rq.wrid); } err_sq_dbmap: @@ -1112,10 +1137,6 @@ err_sq_dbmap: hns_roce_qp_has_sq(init_attr)) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); -err_sq_wrid: - if (!udata) - kfree(hr_qp->sq.wrid); - err_db: if (!udata && hns_roce_qp_has_rq(init_attr) && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) @@ -1133,10 +1154,9 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, wait_for_completion(&hr_qp->free); free_qpc(hr_dev, hr_qp); - free_qpn(hr_dev, hr_qp); - free_qp_buf(hr_dev, hr_qp); + free_kernel_wrid(hr_dev, hr_qp); if (udata) { struct hns_roce_ucontext *context = @@ -1151,8 +1171,6 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) hns_roce_db_unmap_user(context, &hr_qp->rdb); } else { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); if (hr_qp->rq.wqe_cnt) hns_roce_free_db(hr_dev, &hr_qp->rdb); } -- cgit v1.2.3-58-ga151 From cfec045b822a1c3f8f47604520d3194729d9d2af Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Mon, 24 Feb 2020 14:37:38 +0800 Subject: RDMA/hns: Optimize qp doorbell allocation flow Encapsulate the kernel qp doorbell allocation related code into 2 functions: alloc_qp_db() and free_qp_db(). Link: https://lore.kernel.org/r/1582526258-13825-8-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_qp.c | 234 ++++++++++++++++++-------------- 1 file changed, 132 insertions(+), 102 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index a27c3dad8aae..2a7535534ea8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -843,7 +843,7 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } if (udata) { - hr_qp->umem = ib_umem_get(udata, addr, hr_qp->buff_size, 0); + hr_qp->umem = ib_umem_get(ibdev, addr, hr_qp->buff_size, 0); if (IS_ERR(hr_qp->umem)) { ret = PTR_ERR(hr_qp->umem); goto err_inline; @@ -895,6 +895,114 @@ static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) free_rq_inline_buf(hr_qp); } +static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, + struct hns_roce_ib_create_qp_resp *resp, + struct hns_roce_ib_create_qp *ucmd) +{ + return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && + udata->outlen >= offsetofend(typeof(*resp), cap_flags) && + hns_roce_qp_has_sq(init_attr) && + udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); +} + +static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, + struct hns_roce_ib_create_qp_resp *resp) +{ + return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && + udata->outlen >= offsetofend(typeof(*resp), cap_flags) && + hns_roce_qp_has_rq(init_attr)); +} + +static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, + struct ib_qp_init_attr *init_attr) +{ + return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && + hns_roce_qp_has_rq(init_attr)); +} + +static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, + struct hns_roce_ib_create_qp *ucmd, + struct hns_roce_ib_create_qp_resp *resp) +{ + struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct hns_roce_ucontext, ibucontext); + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + if (udata) { + if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { + ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr, + &hr_qp->sdb); + if (ret) { + ibdev_err(ibdev, + "Failed to map user SQ doorbell\n"); + goto err_out; + } + hr_qp->sdb_en = 1; + resp->cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB; + } + + if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { + ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr, + &hr_qp->rdb); + if (ret) { + ibdev_err(ibdev, + "Failed to map user RQ doorbell\n"); + goto err_sdb; + } + hr_qp->rdb_en = 1; + resp->cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; + } + } else { + /* QP doorbell register address */ + hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + + DB_REG_OFFSET * hr_dev->priv_uar.index; + hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + + DB_REG_OFFSET * hr_dev->priv_uar.index; + + if (kernel_qp_has_rdb(hr_dev, init_attr)) { + ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); + if (ret) { + ibdev_err(ibdev, + "Failed to alloc kernel RQ doorbell\n"); + goto err_out; + } + *hr_qp->rdb.db_record = 0; + hr_qp->rdb_en = 1; + } + } + + return 0; +err_sdb: + if (udata && hr_qp->sdb_en) + hns_roce_db_unmap_user(uctx, &hr_qp->sdb); +err_out: + return ret; +} + +static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_udata *udata) +{ + struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct hns_roce_ucontext, ibucontext); + + if (udata) { + if (hr_qp->rdb_en) + hns_roce_db_unmap_user(uctx, &hr_qp->rdb); + if (hr_qp->sdb_en) + hns_roce_db_unmap_user(uctx, &hr_qp->sdb); + } else { + if (hr_qp->rdb_en) + hns_roce_free_db(hr_dev, &hr_qp->rdb); + } +} + static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { @@ -991,11 +1099,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_udata *udata, struct hns_roce_qp *hr_qp) { - struct device *dev = hr_dev->dev; - struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp_resp resp = {}; - struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( - udata, struct hns_roce_ucontext, ibucontext); + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_ib_create_qp ucmd; int ret; mutex_init(&hr_qp->mutex); @@ -1007,95 +1113,55 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); if (ret) { - ibdev_err(&hr_dev->ib_dev, "Failed to set QP param\n"); + ibdev_err(ibdev, "Failed to set QP param\n"); return ret; } - if (udata) { - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && - (udata->inlen >= sizeof(ucmd)) && - (udata->outlen >= sizeof(resp)) && - hns_roce_qp_has_sq(init_attr)) { - ret = hns_roce_db_map_user(uctx, udata, ucmd.sdb_addr, - &hr_qp->sdb); - if (ret) { - dev_err(dev, "sq record doorbell map failed!\n"); - goto err_out; - } - - /* indicate kernel supports sq record db */ - resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB; - hr_qp->sdb_en = 1; - } - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(resp)) && - hns_roce_qp_has_rq(init_attr)) { - ret = hns_roce_db_map_user(uctx, udata, ucmd.db_addr, - &hr_qp->rdb); - if (ret) { - dev_err(dev, "rq record doorbell map failed!\n"); - goto err_sq_dbmap; - } - - /* indicate kernel supports rq record db */ - resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; - hr_qp->rdb_en = 1; - } - } else { - /* QP doorbell register address */ - hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - hns_roce_qp_has_rq(init_attr)) { - ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); - if (ret) { - dev_err(dev, "rq record doorbell alloc failed!\n"); - goto err_out; - } - *hr_qp->rdb.db_record = 0; - hr_qp->rdb_en = 1; - } - + if (!udata) { ret = alloc_kernel_wrid(hr_dev, hr_qp); if (ret) { - ibdev_err(&hr_dev->ib_dev, "Failed to alloc wrid\n"); - goto err_db; + ibdev_err(ibdev, "Failed to alloc wrid\n"); + return ret; } } + ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); + if (ret) { + ibdev_err(ibdev, "Failed to alloc QP doorbell\n"); + goto err_wrid; + } + ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); if (ret) { - ibdev_err(&hr_dev->ib_dev, "Failed to alloc QP buffer\n"); + ibdev_err(ibdev, "Failed to alloc QP buffer\n"); goto err_db; } ret = alloc_qpn(hr_dev, hr_qp); if (ret) { - ibdev_err(&hr_dev->ib_dev, "Failed to alloc QPN\n"); + ibdev_err(ibdev, "Failed to alloc QPN\n"); goto err_buf; } ret = alloc_qpc(hr_dev, hr_qp); if (ret) { - ibdev_err(&hr_dev->ib_dev, "Failed to alloc QP context\n"); + ibdev_err(ibdev, "Failed to alloc QP context\n"); goto err_qpn; } ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); if (ret) { - ibdev_err(&hr_dev->ib_dev, "Failed to store QP\n"); + ibdev_err(ibdev, "Failed to store QP\n"); goto err_qpc; } if (udata) { ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); - if (ret) + if (ret) { + ibdev_err(ibdev, "copy qp resp failed!\n"); goto err_store; + } } if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { @@ -1119,30 +1185,10 @@ err_qpn: free_qpn(hr_dev, hr_qp); err_buf: free_qp_buf(hr_dev, hr_qp); - - free_kernel_wrid(hr_dev, hr_qp); - - if (udata) { - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(resp)) && - hns_roce_qp_has_rq(init_attr)) - hns_roce_db_unmap_user(uctx, &hr_qp->rdb); - } - -err_sq_dbmap: - if (udata) - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && - (udata->inlen >= sizeof(ucmd)) && - (udata->outlen >= sizeof(resp)) && - hns_roce_qp_has_sq(init_attr)) - hns_roce_db_unmap_user(uctx, &hr_qp->sdb); - err_db: - if (!udata && hns_roce_qp_has_rq(init_attr) && - (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) - hns_roce_free_db(hr_dev, &hr_qp->rdb); - -err_out: + free_qp_db(hr_dev, hr_qp, udata); +err_wrid: + free_kernel_wrid(hr_dev, hr_qp); return ret; } @@ -1157,23 +1203,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, free_qpn(hr_dev, hr_qp); free_qp_buf(hr_dev, hr_qp); free_kernel_wrid(hr_dev, hr_qp); - - if (udata) { - struct hns_roce_ucontext *context = - rdma_udata_to_drv_context( - udata, - struct hns_roce_ucontext, - ibucontext); - - if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) - hns_roce_db_unmap_user(context, &hr_qp->sdb); - - if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) - hns_roce_db_unmap_user(context, &hr_qp->rdb); - } else { - if (hr_qp->rq.wqe_cnt) - hns_roce_free_db(hr_dev, &hr_qp->rdb); - } + free_qp_db(hr_dev, hr_qp, udata); kfree(hr_qp); } -- cgit v1.2.3-58-ga151 From 88d033077b2c7ca556a96b255bfa0ec502881b10 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 25 Feb 2020 13:40:08 +0200 Subject: RDMA/efa: Unified getters/setters for device structs bitmask access Use unified macros for device structs access instead of open coding the shifts and masks over and over again. Link: https://lore.kernel.org/r/20200225114010.21790-2-galpress@amazon.com Reviewed-by: Firas JahJah Reviewed-by: Yossi Leybovich Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_admin_cmds_defs.h | 7 +- drivers/infiniband/hw/efa/efa_admin_defs.h | 4 +- drivers/infiniband/hw/efa/efa_com.c | 154 +++++++++++------------- drivers/infiniband/hw/efa/efa_com_cmd.c | 29 ++--- drivers/infiniband/hw/efa/efa_common_defs.h | 13 +- drivers/infiniband/hw/efa/efa_regs_defs.h | 22 +--- 6 files changed, 101 insertions(+), 128 deletions(-) diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h index 74b787a90660..96b104ab5415 100644 --- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_ADMIN_CMDS_H_ @@ -801,21 +801,16 @@ struct efa_admin_mmio_req_read_less_resp { /* create_qp_cmd */ #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0) -#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_SHIFT 1 #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1) /* reg_mr_cmd */ #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0) -#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7 #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7) #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0) -#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT 2 #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2) /* create_cq_cmd */ -#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5 #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5) -#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_SHIFT 6 #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6) #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h index c8e0c8b905be..29d53ed63b3e 100644 --- a/drivers/infiniband/hw/efa/efa_admin_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_ADMIN_H_ @@ -121,9 +121,7 @@ struct efa_admin_aenq_entry { /* aq_common_desc */ #define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) #define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) -#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 #define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) -#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 #define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) /* acq_common_desc */ diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c index 0778f4f7dccd..9e6399bfcf6d 100644 --- a/drivers/infiniband/hw/efa/efa_com.c +++ b/drivers/infiniband/hw/efa/efa_com.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* - * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include "efa_com.h" @@ -16,21 +16,10 @@ #define EFA_ASYNC_QUEUE_DEPTH 16 #define EFA_ADMIN_QUEUE_DEPTH 32 -#define MIN_EFA_VER\ - ((EFA_ADMIN_API_VERSION_MAJOR << EFA_REGS_VERSION_MAJOR_VERSION_SHIFT) | \ - (EFA_ADMIN_API_VERSION_MINOR & EFA_REGS_VERSION_MINOR_VERSION_MASK)) - #define EFA_CTRL_MAJOR 0 #define EFA_CTRL_MINOR 0 #define EFA_CTRL_SUB_MINOR 1 -#define MIN_EFA_CTRL_VER \ - (((EFA_CTRL_MAJOR) << \ - (EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ - ((EFA_CTRL_MINOR) << \ - (EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ - (EFA_CTRL_SUB_MINOR)) - #define EFA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) #define EFA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) @@ -84,7 +73,7 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset) struct efa_com_mmio_read *mmio_read = &edev->mmio_read; struct efa_admin_mmio_req_read_less_resp *read_resp; unsigned long exp_time; - u32 mmio_read_reg; + u32 mmio_read_reg = 0; u32 err; read_resp = mmio_read->read_resp; @@ -94,10 +83,9 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset) /* trash DMA req_id to identify when hardware is done */ read_resp->req_id = mmio_read->seq_num + 0x9aL; - mmio_read_reg = (offset << EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & - EFA_REGS_MMIO_REG_READ_REG_OFF_MASK; - mmio_read_reg |= mmio_read->seq_num & - EFA_REGS_MMIO_REG_READ_REQ_ID_MASK; + EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset); + EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID, + mmio_read->seq_num); writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF); @@ -137,9 +125,9 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev) struct efa_com_admin_queue *aq = &edev->aq; struct efa_com_admin_sq *sq = &aq->sq; u16 size = aq->depth * sizeof(*sq->entries); + u32 aq_caps = 0; u32 addr_high; u32 addr_low; - u32 aq_caps; sq->entries = dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL); @@ -160,10 +148,9 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev) writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF); writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF); - aq_caps = aq->depth & EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK; - aq_caps |= (sizeof(struct efa_admin_aq_entry) << - EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & - EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; + EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth); + EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE, + sizeof(struct efa_admin_aq_entry)); writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF); @@ -175,9 +162,9 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev) struct efa_com_admin_queue *aq = &edev->aq; struct efa_com_admin_cq *cq = &aq->cq; u16 size = aq->depth * sizeof(*cq->entries); + u32 acq_caps = 0; u32 addr_high; u32 addr_low; - u32 acq_caps; cq->entries = dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL); @@ -195,13 +182,11 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev) writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF); writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF); - acq_caps = aq->depth & EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; - acq_caps |= (sizeof(struct efa_admin_acq_entry) << - EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & - EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; - acq_caps |= (aq->msix_vector_idx << - EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT) & - EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK; + EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth); + EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE, + sizeof(struct efa_admin_acq_entry)); + EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR, + aq->msix_vector_idx); writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF); @@ -212,7 +197,8 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev, struct efa_aenq_handlers *aenq_handlers) { struct efa_com_aenq *aenq = &edev->aenq; - u32 addr_low, addr_high, aenq_caps; + u32 addr_low, addr_high; + u32 aenq_caps = 0; u16 size; if (!aenq_handlers) { @@ -237,13 +223,11 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev, writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF); writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF); - aenq_caps = aenq->depth & EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; - aenq_caps |= (sizeof(struct efa_admin_aenq_entry) << - EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & - EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; - aenq_caps |= (aenq->msix_vector_idx - << EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT) & - EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK; + EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth); + EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE, + sizeof(struct efa_admin_aenq_entry)); + EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR, + aenq->msix_vector_idx); writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF); /* @@ -280,8 +264,8 @@ static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq, static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq, struct efa_comp_ctx *comp_ctx) { - u16 cmd_id = comp_ctx->user_cqe->acq_common_descriptor.command & - EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; + u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command, + EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID); u16 ctx_id = cmd_id & (aq->depth - 1); ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id); @@ -335,8 +319,8 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; cmd->aq_common_descriptor.command_id = cmd_id; - cmd->aq_common_descriptor.flags |= aq->sq.phase & - EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; + EFA_SET(&cmd->aq_common_descriptor.flags, + EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase); comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true); if (!comp_ctx) { @@ -427,8 +411,8 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a struct efa_comp_ctx *comp_ctx; u16 cmd_id; - cmd_id = cqe->acq_common_descriptor.command & - EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; + cmd_id = EFA_GET(&cqe->acq_common_descriptor.command, + EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID); comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false); if (!comp_ctx) { @@ -743,7 +727,7 @@ int efa_com_admin_init(struct efa_com_dev *edev, int err; dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF); - if (!(dev_sts & EFA_REGS_DEV_STS_READY_MASK)) { + if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) { ibdev_err(edev->efa_dev, "Device isn't ready, abort com init %#x\n", dev_sts); return -ENODEV; @@ -778,8 +762,7 @@ int efa_com_admin_init(struct efa_com_dev *edev, goto err_destroy_cq; cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF); - timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> - EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; + timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO); if (timeout) /* the resolution of timeout reg is 100ms */ aq->completion_timeout = timeout * 100000; @@ -940,7 +923,9 @@ void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev) int efa_com_validate_version(struct efa_com_dev *edev) { + u32 min_ctrl_ver = 0; u32 ctrl_ver_masked; + u32 min_ver = 0; u32 ctrl_ver; u32 ver; @@ -953,33 +938,42 @@ int efa_com_validate_version(struct efa_com_dev *edev) EFA_REGS_CONTROLLER_VERSION_OFF); ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n", - (ver & EFA_REGS_VERSION_MAJOR_VERSION_MASK) >> - EFA_REGS_VERSION_MAJOR_VERSION_SHIFT, - ver & EFA_REGS_VERSION_MINOR_VERSION_MASK); - - if (ver < MIN_EFA_VER) { + EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION), + EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION)); + + EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION, + EFA_ADMIN_API_VERSION_MAJOR); + EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION, + EFA_ADMIN_API_VERSION_MINOR); + if (ver < min_ver) { ibdev_err(edev->efa_dev, "EFA version is lower than the minimal version the driver supports\n"); return -EOPNOTSUPP; } - ibdev_dbg(edev->efa_dev, - "efa controller version: %d.%d.%d implementation version %d\n", - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> - EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> - EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> - EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); + ibdev_dbg( + edev->efa_dev, + "efa controller version: %d.%d.%d implementation version %d\n", + EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION), + EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION), + EFA_GET(&ctrl_ver, + EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION), + EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID)); ctrl_ver_masked = - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); - + EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) | + EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) | + EFA_GET(&ctrl_ver, + EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION); + + EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION, + EFA_CTRL_MAJOR); + EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION, + EFA_CTRL_MINOR); + EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION, + EFA_CTRL_SUB_MINOR); /* Validate the ctrl version without the implementation ID */ - if (ctrl_ver_masked < MIN_EFA_CTRL_VER) { + if (ctrl_ver_masked < min_ctrl_ver) { ibdev_err(edev->efa_dev, "EFA ctrl version is lower than the minimal ctrl version the driver supports\n"); return -EOPNOTSUPP; @@ -1002,8 +996,7 @@ int efa_com_get_dma_width(struct efa_com_dev *edev) u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF); int width; - width = (caps & EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> - EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; + width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH); ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width); @@ -1017,16 +1010,14 @@ int efa_com_get_dma_width(struct efa_com_dev *edev) return width; } -static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, - u16 exp_state) +static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on) { u32 val, i; for (i = 0; i < timeout; i++) { val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF); - if ((val & EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == - exp_state) + if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on) return 0; ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val); @@ -1046,36 +1037,34 @@ static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int efa_com_dev_reset(struct efa_com_dev *edev, enum efa_regs_reset_reason_types reset_reason) { - u32 stat, timeout, cap, reset_val; + u32 stat, timeout, cap; + u32 reset_val = 0; int err; stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF); cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF); - if (!(stat & EFA_REGS_DEV_STS_READY_MASK)) { + if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) { ibdev_err(edev->efa_dev, "Device isn't ready, can't reset device\n"); return -EINVAL; } - timeout = (cap & EFA_REGS_CAPS_RESET_TIMEOUT_MASK) >> - EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT; + timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT); if (!timeout) { ibdev_err(edev->efa_dev, "Invalid timeout value\n"); return -EINVAL; } /* start reset */ - reset_val = EFA_REGS_DEV_CTL_DEV_RESET_MASK; - reset_val |= (reset_reason << EFA_REGS_DEV_CTL_RESET_REASON_SHIFT) & - EFA_REGS_DEV_CTL_RESET_REASON_MASK; + EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1); + EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason); writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF); /* reset clears the mmio readless address, restore it */ efa_com_mmio_reg_read_resp_addr_init(edev); - err = wait_for_reset_state(edev, timeout, - EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); + err = wait_for_reset_state(edev, timeout, 1); if (err) { ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n"); return err; @@ -1089,8 +1078,7 @@ int efa_com_dev_reset(struct efa_com_dev *edev, return err; } - timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> - EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; + timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO); if (timeout) /* the resolution of timeout reg is 100ms */ edev->aq.completion_timeout = timeout * 100000; diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index e20bd84a1014..eea5574a62e8 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* - * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include "efa_com.h" @@ -161,8 +161,9 @@ int efa_com_create_cq(struct efa_com_dev *edev, int err; create_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_CQ; - create_cmd.cq_caps_2 = (params->entry_size_in_bytes / 4) & - EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; + EFA_SET(&create_cmd.cq_caps_2, + EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS, + params->entry_size_in_bytes / 4); create_cmd.cq_depth = params->cq_depth; create_cmd.num_sub_cqs = params->num_sub_cqs; create_cmd.uar = params->uarn; @@ -227,8 +228,8 @@ int efa_com_register_mr(struct efa_com_dev *edev, mr_cmd.aq_common_desc.opcode = EFA_ADMIN_REG_MR; mr_cmd.pd = params->pd; mr_cmd.mr_length = params->mr_length_in_bytes; - mr_cmd.flags |= params->page_shift & - EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK; + EFA_SET(&mr_cmd.flags, EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT, + params->page_shift); mr_cmd.iova = params->iova; mr_cmd.permissions = params->permissions; @@ -242,11 +243,11 @@ int efa_com_register_mr(struct efa_com_dev *edev, params->pbl.pbl.address.mem_addr_low; mr_cmd.pbl.pbl.address.mem_addr_high = params->pbl.pbl.address.mem_addr_high; - mr_cmd.aq_common_desc.flags |= - EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK; + EFA_SET(&mr_cmd.aq_common_desc.flags, + EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1); if (params->indirect) - mr_cmd.aq_common_desc.flags |= - EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + EFA_SET(&mr_cmd.aq_common_desc.flags, + EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1); } err = efa_com_cmd_exec(aq, @@ -386,9 +387,8 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, get_cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_FEATURE; if (control_buff_size) - get_cmd.aq_common_descriptor.flags = - EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; - + EFA_SET(&get_cmd.aq_common_descriptor.flags, + EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1); efa_com_set_dma_addr(control_buf_dma_addr, &get_cmd.control_buffer.address.mem_addr_high, @@ -538,8 +538,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, set_cmd->aq_common_descriptor.opcode = EFA_ADMIN_SET_FEATURE; if (control_buff_size) { - set_cmd->aq_common_descriptor.flags = - EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + set_cmd->aq_common_descriptor.flags = 0; + EFA_SET(&set_cmd->aq_common_descriptor.flags, + EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1); efa_com_set_dma_addr(control_buf_dma_addr, &set_cmd->control_buffer.address.mem_addr_high, &set_cmd->control_buffer.address.mem_addr_low); diff --git a/drivers/infiniband/hw/efa/efa_common_defs.h b/drivers/infiniband/hw/efa/efa_common_defs.h index c559ec08898e..90af1c82c9c6 100644 --- a/drivers/infiniband/hw/efa/efa_common_defs.h +++ b/drivers/infiniband/hw/efa/efa_common_defs.h @@ -1,14 +1,25 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_COMMON_H_ #define _EFA_COMMON_H_ +#include + #define EFA_COMMON_SPEC_VERSION_MAJOR 2 #define EFA_COMMON_SPEC_VERSION_MINOR 0 +#define EFA_GET(ptr, mask) FIELD_GET(mask##_MASK, *(ptr)) + +#define EFA_SET(ptr, mask, value) \ + ({ \ + typeof(ptr) _ptr = ptr; \ + *_ptr = (*_ptr & ~(mask##_MASK)) | \ + FIELD_PREP(mask##_MASK, value); \ + }) + struct efa_common_mem_addr { u32 mem_addr_low; diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h index bb9cad3d6a15..322a2c0d4ef9 100644 --- a/drivers/infiniband/hw/efa/efa_regs_defs.h +++ b/drivers/infiniband/hw/efa/efa_regs_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_REGS_H_ @@ -45,69 +45,49 @@ enum efa_regs_reset_reason_types { /* version register */ #define EFA_REGS_VERSION_MINOR_VERSION_MASK 0xff -#define EFA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 #define EFA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 /* controller_version register */ #define EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff -#define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 #define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 -#define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 #define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 -#define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 #define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 /* caps register */ #define EFA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 -#define EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 #define EFA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e -#define EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 #define EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 -#define EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 #define EFA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 /* aq_caps register */ #define EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff -#define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 #define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 /* acq_caps register */ #define EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff -#define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 #define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xff0000 -#define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT 24 #define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK 0xff000000 /* aenq_caps register */ #define EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff -#define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 #define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xff0000 -#define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT 24 #define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK 0xff000000 /* dev_ctl register */ #define EFA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 -#define EFA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 #define EFA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 -#define EFA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 #define EFA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 /* dev_sts register */ #define EFA_REGS_DEV_STS_READY_MASK 0x1 -#define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 #define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 -#define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 #define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 -#define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 #define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 -#define EFA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 #define EFA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 -#define EFA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 #define EFA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 /* mmio_reg_read register */ #define EFA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff -#define EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 #define EFA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 #endif /* _EFA_REGS_H_ */ -- cgit v1.2.3-58-ga151 From 56a7a721dd54fc8cead3d0eeeec4336db24b00fa Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 25 Feb 2020 13:40:09 +0200 Subject: RDMA/efa: Properly document the interrupt mask register The fact that the LSB in the register is the enable bit should not be an implicit assumption between the driver and the device, properly document that in the register definition. Link: https://lore.kernel.org/r/20200225114010.21790-3-galpress@amazon.com Reviewed-by: Firas JahJah Reviewed-by: Yossi Leybovich Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_com.c | 4 +--- drivers/infiniband/hw/efa/efa_regs_defs.h | 3 +++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c index 9e6399bfcf6d..7fce69f5568f 100644 --- a/drivers/infiniband/hw/efa/efa_com.c +++ b/drivers/infiniband/hw/efa/efa_com.c @@ -23,8 +23,6 @@ #define EFA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) #define EFA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) -#define EFA_REGS_ADMIN_INTR_MASK 1 - enum efa_cmd_status { EFA_CMD_SUBMITTED, EFA_CMD_COMPLETED, @@ -689,7 +687,7 @@ void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling) u32 mask_value = 0; if (polling) - mask_value = EFA_REGS_ADMIN_INTR_MASK; + EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1); writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF); if (polling) diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h index 322a2c0d4ef9..4017982fe13b 100644 --- a/drivers/infiniband/hw/efa/efa_regs_defs.h +++ b/drivers/infiniband/hw/efa/efa_regs_defs.h @@ -73,6 +73,9 @@ enum efa_regs_reset_reason_types { #define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xff0000 #define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK 0xff000000 +/* intr_mask register */ +#define EFA_REGS_INTR_MASK_EN_MASK 0x1 + /* dev_ctl register */ #define EFA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 #define EFA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 -- cgit v1.2.3-58-ga151 From ff6629f88c529b07d9704c656c64dae76910e3e9 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 25 Feb 2020 13:40:10 +0200 Subject: RDMA/efa: Do not delay freeing of DMA pages When destroying a DMA mmapped object, there is no need to artificially delay the freeing of the pages to the mmap entry removal. Since the vma keeps a reference count on these pages, free_pages_exact can be called on the destroy verb as it won't really free the pages until the reference count is cleared (in case the user hasn't called munmap yet). Remove the special handling of DMA pages and call free_pages_exact on destroy_qp/cq. The mmap entry removal is moved to the beginning of the destroy flows, so the driver can safely free the pages. Link: https://lore.kernel.org/r/20200225114010.21790-4-galpress@amazon.com Reviewed-by: Firas JahJah Reviewed-by: Yossi Leybovich Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_verbs.c | 44 +++++++++++++++++------------------ 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index ec5545870554..bf3120f140f7 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* - * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include @@ -169,6 +169,14 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr, return addr; } +static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr, + dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir) +{ + dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir); + free_pages_exact(cpu_addr, size); +} + int efa_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *udata) @@ -402,6 +410,9 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) int err; ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num); + + efa_qp_user_mmap_entries_remove(qp); + err = efa_destroy_qp_handle(dev, qp->qp_handle); if (err) return err; @@ -411,11 +422,10 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n", qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr); - dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size, - DMA_TO_DEVICE); + efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, + qp->rq_size, DMA_TO_DEVICE); } - efa_qp_user_mmap_entries_remove(qp); kfree(qp); return 0; } @@ -720,13 +730,9 @@ err_remove_mmap_entries: err_destroy_qp: efa_destroy_qp_handle(dev, create_qp_resp.qp_handle); err_free_mapped: - if (qp->rq_size) { - dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size, - DMA_TO_DEVICE); - - if (!qp->rq_mmap_entry) - free_pages_exact(qp->rq_cpu_addr, qp->rq_size); - } + if (qp->rq_size) + efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, + qp->rq_size, DMA_TO_DEVICE); err_free_qp: kfree(qp); err_out: @@ -845,10 +851,10 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n", cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); - efa_destroy_cq_idx(dev, cq->cq_idx); - dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, - DMA_FROM_DEVICE); rdma_user_mmap_entry_remove(cq->mmap_entry); + efa_destroy_cq_idx(dev, cq->cq_idx); + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, + DMA_FROM_DEVICE); } static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, @@ -985,10 +991,8 @@ err_remove_mmap: err_destroy_cq: efa_destroy_cq_idx(dev, cq->cq_idx); err_free_mapped: - dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, - DMA_FROM_DEVICE); - if (!cq->mmap_entry) - free_pages_exact(cq->cpu_addr, cq->size); + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, + DMA_FROM_DEVICE); err_out: atomic64_inc(&dev->stats.sw_stats.create_cq_err); @@ -1550,10 +1554,6 @@ void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry) { struct efa_user_mmap_entry *entry = to_emmap(rdma_entry); - /* DMA mapping is already gone, now free the pages */ - if (entry->mmap_flag == EFA_MMAP_DMA_PAGE) - free_pages_exact(phys_to_virt(entry->address), - entry->rdma_entry.npages * PAGE_SIZE); kfree(entry); } -- cgit v1.2.3-58-ga151 From c2b777a9592395bf68f17bcfa76813eb507a001c Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Wed, 26 Feb 2020 07:45:31 -0800 Subject: RDMA/bnxt_re: Refactor device add/remove functionalities - bnxt_re_ib_reg() handles two main functionalities - initializing the device and registering with the IB stack. Split it into 2 functions i.e. bnxt_re_dev_init() and bnxt_re_ib_init() to account for the same thereby improve modularity. Do the same for bnxt_re_ib_unreg()i.e. split into two functions - bnxt_re_dev_uninit() and bnxt_re_ib_uninit(). - Simplify the code by combining the different steps to add and remove the device into two functions. - Report correct netdev link state during device register Link: https://lore.kernel.org/r/1582731932-26574-2-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/main.c | 139 +++++++++++++++++++++-------------- 1 file changed, 82 insertions(+), 57 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index b5128cce8e21..5f8fd74f5629 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -78,7 +78,8 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); /* Mutex to protect the list of bnxt_re devices added */ static DEFINE_MUTEX(bnxt_re_dev_lock); static struct workqueue_struct *bnxt_re_wq; -static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev); +static void bnxt_re_remove_device(struct bnxt_re_dev *rdev); +static void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev); static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) { @@ -237,7 +238,9 @@ static void bnxt_re_shutdown(void *p) if (!rdev) return; - bnxt_re_ib_unreg(rdev); + bnxt_re_ib_uninit(rdev); + ASSERT_RTNL(); + bnxt_re_remove_device(rdev); } static void bnxt_re_stop_irq(void *handle) @@ -1317,7 +1320,41 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) le16_to_cpu(resp.hwrm_intf_patch); } -static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) +static void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev) +{ + /* Cleanup ib dev */ + if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { + ib_unregister_device(&rdev->ibdev); + clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + } +} + +int bnxt_re_ib_init(struct bnxt_re_dev *rdev) +{ + int rc = 0; + u32 event; + + /* Register ib dev */ + rc = bnxt_re_register_ib(rdev); + if (rc) { + pr_err("Failed to register with IB: %#x\n", rc); + return rc; + } + set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + dev_info(rdev_to_dev(rdev), "Device registered successfully"); + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, + &rdev->active_width); + set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); + + event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ? + IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event); + + return rc; +} + +static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev) { u8 type; int rc; @@ -1373,20 +1410,15 @@ static void bnxt_re_worker(struct work_struct *work) schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); } -static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) +static int bnxt_re_dev_init(struct bnxt_re_dev *rdev) { struct bnxt_qplib_creq_ctx *creq; struct bnxt_re_ring_attr rattr; u32 db_offt; - bool locked; int vid; u8 type; int rc; - /* Acquire rtnl lock through out this function */ - rtnl_lock(); - locked = true; - /* Registered a new RoCE device instance to netdev */ memset(&rattr, 0, sizeof(rattr)); rc = bnxt_re_register_netdev(rdev); @@ -1514,23 +1546,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); } - rtnl_unlock(); - locked = false; - - /* Register ib dev */ - rc = bnxt_re_register_ib(rdev); - if (rc) { - ibdev_err(&rdev->ibdev, - "Failed to register with IB: %#x\n", rc); - goto fail; - } - set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); - ibdev_info(&rdev->ibdev, "Device registered successfully"); - ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, - &rdev->active_width); - set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); - bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); - return 0; free_sctx: bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); @@ -1544,10 +1559,7 @@ free_ring: free_rcfw: bnxt_qplib_free_rcfw_channel(&rdev->rcfw); fail: - if (!locked) - rtnl_lock(); - bnxt_re_ib_unreg(rdev); - rtnl_unlock(); + bnxt_re_dev_uninit(rdev); return rc; } @@ -1589,9 +1601,35 @@ exit: return rc; } -static void bnxt_re_remove_one(struct bnxt_re_dev *rdev) +static void bnxt_re_remove_device(struct bnxt_re_dev *rdev) { + bnxt_re_dev_uninit(rdev); pci_dev_put(rdev->en_dev->pdev); + bnxt_re_dev_unreg(rdev); +} + +static int bnxt_re_add_device(struct bnxt_re_dev **rdev, + struct net_device *netdev) +{ + int rc; + + rc = bnxt_re_dev_reg(rdev, netdev); + if (rc == -ENODEV) + return rc; + if (rc) { + pr_err("Failed to register with the device %s: %#x\n", + netdev->name, rc); + return rc; + } + + pci_dev_get((*rdev)->en_dev->pdev); + rc = bnxt_re_dev_init(*rdev); + if (rc) { + pci_dev_put((*rdev)->en_dev->pdev); + bnxt_re_dev_unreg(*rdev); + } + + return rc; } /* Handle all deferred netevents tasks */ @@ -1606,16 +1644,17 @@ static void bnxt_re_task(struct work_struct *work) if (re_work->event != NETDEV_REGISTER && !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) - return; + goto done; switch (re_work->event) { case NETDEV_REGISTER: - rc = bnxt_re_ib_reg(rdev); + rc = bnxt_re_ib_init(rdev); if (rc) { ibdev_err(&rdev->ibdev, "Failed to register with IB: %#x", rc); - bnxt_re_remove_one(rdev); - bnxt_re_dev_unreg(rdev); + rtnl_lock(); + bnxt_re_remove_device(rdev); + rtnl_unlock(); goto exit; } break; @@ -1638,17 +1677,13 @@ static void bnxt_re_task(struct work_struct *work) default: break; } +done: smp_mb__before_atomic(); atomic_dec(&rdev->sched_count); exit: kfree(re_work); } -static void bnxt_re_init_one(struct bnxt_re_dev *rdev) -{ - pci_dev_get(rdev->en_dev->pdev); -} - /* * "Notifier chain callback can be invoked for the same chain from * different CPUs at the same time". @@ -1686,17 +1721,9 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, case NETDEV_REGISTER: if (rdev) break; - rc = bnxt_re_dev_reg(&rdev, real_dev); - if (rc == -ENODEV) - break; - if (rc) { - ibdev_err(&rdev->ibdev, - "Failed to register with the device %s: %#x\n", - real_dev->name, rc); - break; - } - bnxt_re_init_one(rdev); - sch_work = true; + rc = bnxt_re_add_device(&rdev, real_dev); + if (!rc) + sch_work = true; break; case NETDEV_UNREGISTER: @@ -1705,9 +1732,8 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, */ if (atomic_read(&rdev->sched_count) > 0) goto exit; - bnxt_re_ib_unreg(rdev); - bnxt_re_remove_one(rdev); - bnxt_re_dev_unreg(rdev); + bnxt_re_ib_uninit(rdev); + bnxt_re_remove_device(rdev); break; default: @@ -1784,12 +1810,11 @@ static void __exit bnxt_re_mod_exit(void) */ flush_workqueue(bnxt_re_wq); bnxt_re_dev_stop(rdev); + bnxt_re_ib_uninit(rdev); /* Acquire the rtnl_lock as the L2 resources are freed here */ rtnl_lock(); - bnxt_re_ib_unreg(rdev); + bnxt_re_remove_device(rdev); rtnl_unlock(); - bnxt_re_remove_one(rdev); - bnxt_re_dev_unreg(rdev); } unregister_netdevice_notifier(&bnxt_re_netdev_notifier); if (bnxt_re_wq) -- cgit v1.2.3-58-ga151 From 66832705c4d01e52df78570e72a9392a1271d2e9 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Wed, 26 Feb 2020 07:45:32 -0800 Subject: RDMA/bnxt_re: Use driver_unregister and unregistration API Using the new unregister APIs provided by the core. Provide the dealloc_driver hook for the core to callback at the time of device un-registration. bnxt_re VF resources are created by the corresponding PF driver. During ib_unregister_driver, PF might get removed before VF and this could cause failure when VFs are removed. Driver is explicitly queuing the removal of VF devices before calling ib_unregister_driver. Link: https://lore.kernel.org/r/1582731932-26574-3-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/main.c | 106 ++++++++++++++--------------------- 1 file changed, 42 insertions(+), 64 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 5f8fd74f5629..415693f8015a 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -79,7 +79,8 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); static DEFINE_MUTEX(bnxt_re_dev_lock); static struct workqueue_struct *bnxt_re_wq; static void bnxt_re_remove_device(struct bnxt_re_dev *rdev); -static void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev); +static void bnxt_re_dealloc_driver(struct ib_device *ib_dev); +static void bnxt_re_stop_irq(void *handle); static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) { @@ -237,10 +238,10 @@ static void bnxt_re_shutdown(void *p) if (!rdev) return; - - bnxt_re_ib_uninit(rdev); ASSERT_RTNL(); - bnxt_re_remove_device(rdev); + /* Release the MSIx vectors before queuing unregister */ + bnxt_re_stop_irq(rdev); + ib_unregister_device_queued(&rdev->ibdev); } static void bnxt_re_stop_irq(void *handle) @@ -542,17 +543,12 @@ static bool is_bnxt_re_dev(struct net_device *netdev) static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev) { - struct bnxt_re_dev *rdev; + struct ib_device *ibdev = + ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE); + if (!ibdev) + return NULL; - rcu_read_lock(); - list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) { - if (rdev->netdev == netdev) { - rcu_read_unlock(); - return rdev; - } - } - rcu_read_unlock(); - return NULL; + return container_of(ibdev, struct bnxt_re_dev, ibdev); } static void bnxt_re_dev_unprobe(struct net_device *netdev, @@ -626,11 +622,6 @@ static const struct attribute_group bnxt_re_dev_attr_group = { .attrs = bnxt_re_attributes, }; -static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev) -{ - ib_unregister_device(&rdev->ibdev); -} - static const struct ib_device_ops bnxt_re_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_BNXT_RE, @@ -645,6 +636,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = { .create_cq = bnxt_re_create_cq, .create_qp = bnxt_re_create_qp, .create_srq = bnxt_re_create_srq, + .dealloc_driver = bnxt_re_dealloc_driver, .dealloc_pd = bnxt_re_dealloc_pd, .dealloc_ucontext = bnxt_re_dealloc_ucontext, .del_gid = bnxt_re_del_gid, @@ -741,15 +733,11 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) { dev_put(rdev->netdev); rdev->netdev = NULL; - mutex_lock(&bnxt_re_dev_lock); list_del_rcu(&rdev->list); mutex_unlock(&bnxt_re_dev_lock); synchronize_rcu(); - - ib_dealloc_device(&rdev->ibdev); - /* rdev is gone */ } static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev, @@ -1320,15 +1308,6 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) le16_to_cpu(resp.hwrm_intf_patch); } -static void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev) -{ - /* Cleanup ib dev */ - if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { - ib_unregister_device(&rdev->ibdev); - clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); - } -} - int bnxt_re_ib_init(struct bnxt_re_dev *rdev) { int rc = 0; @@ -1359,10 +1338,6 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev) u8 type; int rc; - if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { - /* Cleanup ib dev */ - bnxt_re_unregister_ib(rdev); - } if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) cancel_delayed_work_sync(&rdev->worker); @@ -1632,6 +1607,19 @@ static int bnxt_re_add_device(struct bnxt_re_dev **rdev, return rc; } +static void bnxt_re_dealloc_driver(struct ib_device *ib_dev) +{ + struct bnxt_re_dev *rdev = + container_of(ib_dev, struct bnxt_re_dev, ibdev); + + clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + dev_info(rdev_to_dev(rdev), "Unregistering Device"); + + rtnl_lock(); + bnxt_re_remove_device(rdev); + rtnl_unlock(); +} + /* Handle all deferred netevents tasks */ static void bnxt_re_task(struct work_struct *work) { @@ -1706,6 +1694,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, struct bnxt_re_dev *rdev; int rc = 0; bool sch_work = false; + bool release = true; real_dev = rdma_vlan_dev_real_dev(netdev); if (!real_dev) @@ -1713,7 +1702,8 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, rdev = bnxt_re_from_netdev(real_dev); if (!rdev && event != NETDEV_REGISTER) - goto exit; + return NOTIFY_OK; + if (real_dev != netdev) goto exit; @@ -1724,6 +1714,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, rc = bnxt_re_add_device(&rdev, real_dev); if (!rc) sch_work = true; + release = false; break; case NETDEV_UNREGISTER: @@ -1732,8 +1723,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, */ if (atomic_read(&rdev->sched_count) > 0) goto exit; - bnxt_re_ib_uninit(rdev); - bnxt_re_remove_device(rdev); + ib_unregister_device_queued(&rdev->ibdev); break; default: @@ -1755,6 +1745,8 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, } exit: + if (rdev && release) + ib_device_put(&rdev->ibdev); return NOTIFY_DONE; } @@ -1790,35 +1782,21 @@ err_netdev: static void __exit bnxt_re_mod_exit(void) { - struct bnxt_re_dev *rdev, *next; - LIST_HEAD(to_be_deleted); + struct bnxt_re_dev *rdev; - mutex_lock(&bnxt_re_dev_lock); - /* Free all adapter allocated resources */ - if (!list_empty(&bnxt_re_dev_list)) - list_splice_init(&bnxt_re_dev_list, &to_be_deleted); - mutex_unlock(&bnxt_re_dev_lock); - /* - * Cleanup the devices in reverse order so that the VF device - * cleanup is done before PF cleanup - */ - list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { - ibdev_info(&rdev->ibdev, "Unregistering Device"); - /* - * Flush out any scheduled tasks before destroying the - * resources - */ - flush_workqueue(bnxt_re_wq); - bnxt_re_dev_stop(rdev); - bnxt_re_ib_uninit(rdev); - /* Acquire the rtnl_lock as the L2 resources are freed here */ - rtnl_lock(); - bnxt_re_remove_device(rdev); - rtnl_unlock(); - } unregister_netdevice_notifier(&bnxt_re_netdev_notifier); if (bnxt_re_wq) destroy_workqueue(bnxt_re_wq); + list_for_each_entry(rdev, &bnxt_re_dev_list, list) { + /* VF device removal should be called before the removal + * of PF device. Queue VFs unregister first, so that VFs + * shall be removed before the PF during the call of + * ib_unregister_driver. + */ + if (rdev->is_virtfn) + ib_unregister_device(&rdev->ibdev); + } + ib_unregister_driver(RDMA_DRIVER_BNXT_RE); } module_init(bnxt_re_mod_init); -- cgit v1.2.3-58-ga151 From 6be2067d1e31477354b08d1ac85ff9ec6dec898e Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 27 Feb 2020 06:42:09 +0000 Subject: RDMA/bnxt_re: Remove set but not used variable 'pg_size' Fixes gcc '-Wunused-but-set-variable' warning: drivers/infiniband/hw/bnxt_re/qplib_res.c: In function '__alloc_pbl': drivers/infiniband/hw/bnxt_re/qplib_res.c:109:13: warning: variable 'pg_size' set but not used [-Wunused-but-set-variable] commit 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation") involved this, but not used, so remove it. Link: https://lore.kernel.org/r/20200227064209.87893-1-yuehaibing@huawei.com Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_res.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index fc5909c7f2e0..cab1adf1fed9 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -106,13 +106,12 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, struct pci_dev *pdev = res->pdev; struct scatterlist *sghead; bool is_umem = false; - u32 pages, pg_size; + u32 pages; int i; if (sginfo->nopte) return 0; pages = sginfo->npages; - pg_size = sginfo->pgsize; sghead = sginfo->sghead; /* page ptr arrays */ pbl->pg_arr = vmalloc(pages * sizeof(void *)); -- cgit v1.2.3-58-ga151 From a0b404a98e274b5fc0cfb7c108d99127d482e5ff Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 27 Feb 2020 06:45:42 +0000 Subject: RDMA/bnxt_re: Remove set but not used variable 'dev_attr' Fixes gcc '-Wunused-but-set-variable' warning: drivers/infiniband/hw/bnxt_re/ib_verbs.c: In function 'bnxt_re_create_gsi_qp': drivers/infiniband/hw/bnxt_re/ib_verbs.c:1283:30: warning: variable 'dev_attr' set but not used [-Wunused-but-set-variable] commit 8dae419f9ec7 ("RDMA/bnxt_re: Refactor queue pair creation code") involved this, but not used, so remove it. Link: https://lore.kernel.org/r/20200227064542.91205-1-yuehaibing@huawei.com Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ad3e524187e3..7e74efd15d6d 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1280,14 +1280,12 @@ out: static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, struct ib_qp_init_attr *init_attr) { - struct bnxt_qplib_dev_attr *dev_attr; struct bnxt_re_dev *rdev; struct bnxt_qplib_qp *qplqp; int rc = 0; rdev = qp->rdev; qplqp = &qp->qplib_qp; - dev_attr = &rdev->dev_attr; qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; -- cgit v1.2.3-58-ga151 From 75d03665081e00881e76eaa3a7635c9202a82600 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 27 Feb 2020 06:49:00 +0000 Subject: RDMA/bnxt_re: Remove set but not used variables 'pg' and 'idx' Fixes gcc '-Wunused-but-set-variable' warning: drivers/infiniband/hw/bnxt_re/qplib_rcfw.c: In function '__send_message': drivers/infiniband/hw/bnxt_re/qplib_rcfw.c:101:10: warning: variable 'idx' set but not used [-Wunused-but-set-variable] drivers/infiniband/hw/bnxt_re/qplib_rcfw.c:101:6: warning: variable 'pg' set but not used [-Wunused-but-set-variable] commit cee0c7bba486 ("RDMA/bnxt_re: Refactor command queue management code") involved this, but not used. Link: https://lore.kernel.org/r/20200227064900.92255-1-yuehaibing@huawei.com Signed-off-by: YueHaibing Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index b0b050e5cd12..f01e864bb611 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -98,7 +98,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, unsigned long flags; u32 size, opcode; u16 cookie, cbit; - int pg, idx; u8 *preq; pdev = rcfw->pdev; @@ -167,9 +166,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, hwq_ptr = (struct bnxt_qplib_cmdqe **)hwq->pbl_ptr; preq = (u8 *)req; do { - pg = 0; - idx = 0; - /* Locate the next cmdq slot */ sw_prod = HWQ_CMP(hwq->prod, hwq); cmdqe = &hwq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)] -- cgit v1.2.3-58-ga151 From 5e29d1443c46b6ca70a4c940a67e8c09f05dcb7e Mon Sep 17 00:00:00 2001 From: Michael Guralnik Date: Thu, 27 Feb 2020 13:38:34 +0200 Subject: RDMA/mlx5: Prevent UMR usage with RO only when we have RO caps Relaxed ordering is not supported in UMR so we are disabling UMR usage when user passes relaxed ordering access flag. Enable using UMR when user requested relaxed ordering but there are no relaxed ordering capabilities. This will prevent user from unnecessarily registering a new mkey. Fixes: d6de0bb1850f ("RDMA/mlx5: Set relaxed ordering when requested") Link: https://lore.kernel.org/r/20200227113834.94233-1-leon@kernel.org Signed-off-by: Michael Guralnik Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d9bffcc93587..f21d446249b8 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1532,7 +1532,9 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) return false; - if (access_flags & IB_ACCESS_RELAXED_ORDERING) + if (access_flags & IB_ACCESS_RELAXED_ORDERING && + (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) || + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))) return false; return true; -- cgit v1.2.3-58-ga151 From bb8865f435d81223596f1abd6dec0b12ed122af0 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Thu, 30 Jan 2020 10:20:49 +0200 Subject: RDMA/providers: Fix return value when QP type isn't supported The proper return code is "-EOPNOTSUPP" when the requested QP type is not supported by the provider. Link: https://lore.kernel.org/r/20200130082049.463-1-kamalheib1@gmail.com Signed-off-by: Kamal Heib Reviewed-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 2 +- drivers/infiniband/hw/cxgb4/qp.c | 2 +- drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 2 +- drivers/infiniband/hw/mlx5/qp.c | 2 +- drivers/infiniband/hw/mthca/mthca_provider.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- drivers/infiniband/hw/qedr/verbs.c | 2 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 2 +- drivers/infiniband/sw/rdmavt/qp.c | 2 +- drivers/infiniband/sw/siw/siw_verbs.c | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7e74efd15d6d..47b0b50b71e7 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1141,7 +1141,7 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, qptype = __from_ib_qp_type(init_attr->qp_type); if (qptype == IB_QPT_MAX) { ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype); - qptype = -EINVAL; + qptype = -EOPNOTSUPP; goto out; } diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index bbcac539777a..708216d82852 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2127,7 +2127,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, pr_debug("ib_pd %p\n", pd); if (attrs->qp_type != IB_QPT_RC) - return ERR_PTR(-EINVAL); + return ERR_PTR(-EOPNOTSUPP); php = to_c4iw_pd(pd); rhp = php->rhp; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 2a7535534ea8..7bec0ceb160b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1261,7 +1261,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, default:{ ibdev_err(ibdev, "not support QP type %d\n", init_attr->qp_type); - return ERR_PTR(-EINVAL); + return ERR_PTR(-EOPNOTSUPP); } } diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index c335de91508f..fa1292932b88 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -617,7 +617,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; if (init_attr->qp_type != IB_QPT_RC) { - err_code = -EINVAL; + err_code = -EOPNOTSUPP; goto error; } if (iwdev->push_mode) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 26425dd2d960..2f9f78912267 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1636,7 +1636,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, } default: /* Don't support raw QPs */ - return ERR_PTR(-EINVAL); + return ERR_PTR(-EOPNOTSUPP); } return &qp->ibqp; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index a4f8e7030787..a597c9043b1d 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -2789,7 +2789,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, mlx5_ib_dbg(dev, "unsupported qp type %d\n", init_attr->qp_type); /* Don't support raw QPs */ - return ERR_PTR(-EINVAL); + return ERR_PTR(-EOPNOTSUPP); } if (verbs_init_attr->qp_type == IB_QPT_DRIVER) diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index ac19d57803b5..69a3e4f62fb1 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -561,7 +561,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, } default: /* Don't support raw QPs */ - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } if (err) { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index d47ea675734b..10e343894595 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1111,7 +1111,7 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, (attrs->qp_type != IB_QPT_UD)) { pr_err("%s(%d) unsupported qp type=0x%x requested\n", __func__, dev->id, attrs->qp_type); - return -EINVAL; + return -EOPNOTSUPP; } /* Skip the check for QP1 to support CM size of 128 */ if ((attrs->qp_type != IB_QPT_GSI) && diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 484b555150e0..a5bd3adaf90a 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1186,7 +1186,7 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, DP_DEBUG(dev, QEDR_MSG_QP, "create qp: unsupported qp type=0x%x requested\n", attrs->qp_type); - return -EINVAL; + return -EOPNOTSUPP; } if (attrs->cap.max_send_wr > qattr->max_sqe) { diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 556b8e44a51c..71f82339446c 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -504,7 +504,7 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, if (init_attr->qp_type != IB_QPT_UD) { usnic_err("%s asked to make a non-UD QP: %d\n", dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type); - return ERR_PTR(-EINVAL); + return ERR_PTR(-EOPNOTSUPP); } trans_spec = cmd.spec; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 9de1281f9a3b..afcc2abcf55c 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -217,7 +217,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, init_attr->qp_type != IB_QPT_GSI) { dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n", init_attr->qp_type); - return ERR_PTR(-EINVAL); + return ERR_PTR(-EOPNOTSUPP); } if (is_srq && !dev->dsr->caps.max_srq) { diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 3cdf75d0c7a4..762d4dc11c41 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1196,7 +1196,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, default: /* Don't support raw QPs */ - return ERR_PTR(-EINVAL); + return ERR_PTR(-EOPNOTSUPP); } init_attr->cap.max_inline_data = 0; diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index d5390d498c61..aeb842bc7a1e 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -323,7 +323,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, } if (attrs->qp_type != IB_QPT_RC) { siw_dbg(base_dev, "only RC QP's supported\n"); - rv = -EINVAL; + rv = -EOPNOTSUPP; goto err_out; } if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) || -- cgit v1.2.3-58-ga151 From 91b74bf5310b22ac3286d8e9b5354b77f41af178 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 17 Feb 2020 10:36:29 +0300 Subject: IB/mlx5: Optimize u64 division on 32-bit arches Commit f164be8c0366 ("IB/mlx5: Extend caps stage to handle VAR capabilities") introduced a straight "/" division of the u64 variable "bar_size". This was fixed with commit 685eff513183 ("IB/mlx5: Use div64_u64 for num_var_hw_entries calculation"). However, div64_u64() is redundant here as mlx5_var_table::stride_size is of type u32. Make the actual code way more optimized on 32-bit kernels using div_u64() and fix 80 chars break-through by the way. Fixes: 685eff513183 ("IB/mlx5: Use div64_u64 for num_var_hw_entries calculation") Link: https://lore.kernel.org/r/20200217073629.8051-1-alobakin@dlink.ru Signed-off-by: Alexander Lobakin Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e4bcfa81b70a..026391e4ceb4 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6545,7 +6545,8 @@ static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev) doorbell_bar_offset); bar_size = (1ULL << log_doorbell_bar_size) * 4096; var_table->stride_size = 1ULL << log_doorbell_stride; - var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size); + var_table->num_var_hw_entries = div_u64(bar_size, + var_table->stride_size); mutex_init(&var_table->bitmap_lock); var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries, GFP_KERNEL); -- cgit v1.2.3-58-ga151 From 9e3aaf6883b3b47d9f14579c1a03c911dd17cf10 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 27 Feb 2020 14:52:46 +0200 Subject: IB/mlx5: Add np_min_time_between_cnps and rp_max_rate debug params Add two debugfs parameters described below. np_min_time_between_cnps - Minimum time between sending CNPs from the port. Unit = microseconds. Default = 0 (no min wait time; generated based on incoming ECN marked packets). rp_max_rate - Maximum rate at which reaction point node can transmit. Once this limit is reached, RP is no longer rate limited. Unit = Mbits/sec Default = 0 (full speed) Link: https://lore.kernel.org/r/20200227125246.99472-1-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/cong.c | 20 ++++++++++++++++++++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 ++ 2 files changed, 22 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c index 8ba439fabf7f..de4da92b81a6 100644 --- a/drivers/infiniband/hw/mlx5/cong.c +++ b/drivers/infiniband/hw/mlx5/cong.c @@ -47,6 +47,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = { "rp_byte_reset", "rp_threshold", "rp_ai_rate", + "rp_max_rate", "rp_hai_rate", "rp_min_dec_fac", "rp_min_rate", @@ -56,6 +57,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = { "rp_rate_reduce_monitor_period", "rp_initial_alpha_value", "rp_gd", + "np_min_time_between_cnps", "np_cnp_dscp", "np_cnp_prio_mode", "np_cnp_prio", @@ -66,6 +68,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = { #define MLX5_IB_RP_TIME_RESET_ATTR BIT(3) #define MLX5_IB_RP_BYTE_RESET_ATTR BIT(4) #define MLX5_IB_RP_THRESHOLD_ATTR BIT(5) +#define MLX5_IB_RP_MAX_RATE_ATTR BIT(6) #define MLX5_IB_RP_AI_RATE_ATTR BIT(7) #define MLX5_IB_RP_HAI_RATE_ATTR BIT(8) #define MLX5_IB_RP_MIN_DEC_FAC_ATTR BIT(9) @@ -77,6 +80,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = { #define MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR BIT(15) #define MLX5_IB_RP_GD_ATTR BIT(16) +#define MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR BIT(2) #define MLX5_IB_NP_CNP_DSCP_ATTR BIT(3) #define MLX5_IB_NP_CNP_PRIO_MODE_ATTR BIT(4) @@ -111,6 +115,9 @@ static u32 mlx5_get_cc_param_val(void *field, int offset) case MLX5_IB_DBG_CC_RP_AI_RATE: return MLX5_GET(cong_control_r_roce_ecn_rp, field, rpg_ai_rate); + case MLX5_IB_DBG_CC_RP_MAX_RATE: + return MLX5_GET(cong_control_r_roce_ecn_rp, field, + rpg_max_rate); case MLX5_IB_DBG_CC_RP_HAI_RATE: return MLX5_GET(cong_control_r_roce_ecn_rp, field, rpg_hai_rate); @@ -138,6 +145,9 @@ static u32 mlx5_get_cc_param_val(void *field, int offset) case MLX5_IB_DBG_CC_RP_GD: return MLX5_GET(cong_control_r_roce_ecn_rp, field, rpg_gd); + case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS: + return MLX5_GET(cong_control_r_roce_ecn_np, field, + min_time_between_cnps); case MLX5_IB_DBG_CC_NP_CNP_DSCP: return MLX5_GET(cong_control_r_roce_ecn_np, field, cnp_dscp); @@ -186,6 +196,11 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset, MLX5_SET(cong_control_r_roce_ecn_rp, field, rpg_ai_rate, var); break; + case MLX5_IB_DBG_CC_RP_MAX_RATE: + *attr_mask |= MLX5_IB_RP_MAX_RATE_ATTR; + MLX5_SET(cong_control_r_roce_ecn_rp, field, + rpg_max_rate, var); + break; case MLX5_IB_DBG_CC_RP_HAI_RATE: *attr_mask |= MLX5_IB_RP_HAI_RATE_ATTR; MLX5_SET(cong_control_r_roce_ecn_rp, field, @@ -231,6 +246,11 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset, MLX5_SET(cong_control_r_roce_ecn_rp, field, rpg_gd, var); break; + case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS: + *attr_mask |= MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR; + MLX5_SET(cong_control_r_roce_ecn_np, field, + min_time_between_cnps, var); + break; case MLX5_IB_DBG_CC_NP_CNP_DSCP: *attr_mask |= MLX5_IB_NP_CNP_DSCP_ATTR; MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_dscp, var); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index f21d446249b8..3976071a5dc9 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -792,6 +792,7 @@ enum mlx5_ib_dbg_cc_types { MLX5_IB_DBG_CC_RP_BYTE_RESET, MLX5_IB_DBG_CC_RP_THRESHOLD, MLX5_IB_DBG_CC_RP_AI_RATE, + MLX5_IB_DBG_CC_RP_MAX_RATE, MLX5_IB_DBG_CC_RP_HAI_RATE, MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, MLX5_IB_DBG_CC_RP_MIN_RATE, @@ -801,6 +802,7 @@ enum mlx5_ib_dbg_cc_types { MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, MLX5_IB_DBG_CC_RP_GD, + MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS, MLX5_IB_DBG_CC_NP_CNP_DSCP, MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, MLX5_IB_DBG_CC_NP_CNP_PRIO, -- cgit v1.2.3-58-ga151 From 79db784e794b6e7b7fb9b1dd464a34e4c0c039af Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 27 Feb 2020 14:54:07 +0200 Subject: IB/mlx5: Fix missing congestion control debugfs on rep rdma device Cited commit missed to include low level congestion control related debugfs stage initialization. This resulted in missing debugfs entries for cc_params of a RDMA device. Add them back. Fixes: b5ca15ad7e61 ("IB/mlx5: Add proper representors support") Link: https://lore.kernel.org/r/20200227125407.99803-1-leon@kernel.org Signed-off-by: Parav Pandit Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 026391e4ceb4..709ef3f57a06 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -7078,6 +7078,9 @@ const struct mlx5_ib_profile raw_eth_profile = { STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, mlx5_ib_stage_counters_init, mlx5_ib_stage_counters_cleanup), + STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, + mlx5_ib_stage_cong_debugfs_init, + mlx5_ib_stage_cong_debugfs_cleanup), STAGE_CREATE(MLX5_IB_STAGE_UAR, mlx5_ib_stage_uar_init, mlx5_ib_stage_uar_cleanup), -- cgit v1.2.3-58-ga151 From 33fb27fd54465c74cbffba6315b2f043e90cec4c Mon Sep 17 00:00:00 2001 From: Bernard Metzler Date: Fri, 28 Feb 2020 18:35:34 +0100 Subject: RDMA/siw: Fix passive connection establishment Holding the rtnl_lock while iterating a devices interface address list potentially causes deadlocks with the cma_netdev_callback. While this was implemented to limit the scope of a wildcard listen to addresses of the current device only, a better solution limits the scope of the socket to the device. This completely avoiding locking, and also results in significant code simplification. Fixes: c421651fa229 ("RDMA/siw: Add missing rtnl_lock around access to ifa") Link: https://lore.kernel.org/r/20200228173534.26815-1-bmt@zurich.ibm.com Reported-by: syzbot+55de90ab5f44172b0c90@syzkaller.appspotmail.com Suggested-by: Jason Gunthorpe Signed-off-by: Bernard Metzler Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/siw/siw_cm.c | 137 +++++++++---------------------------- 1 file changed, 31 insertions(+), 106 deletions(-) diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index c5651a96b196..559e5fd3bad8 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1769,14 +1769,23 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len) return 0; } -static int siw_listen_address(struct iw_cm_id *id, int backlog, - struct sockaddr *laddr, int addr_family) +/* + * siw_create_listen - Create resources for a listener's IWCM ID @id + * + * Starts listen on the socket address id->local_addr. + * + */ +int siw_create_listen(struct iw_cm_id *id, int backlog) { struct socket *s; struct siw_cep *cep = NULL; struct siw_device *sdev = to_siw_dev(id->device); + int addr_family = id->local_addr.ss_family; int rv = 0, s_val; + if (addr_family != AF_INET && addr_family != AF_INET6) + return -EAFNOSUPPORT; + rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s); if (rv < 0) return rv; @@ -1791,9 +1800,25 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, siw_dbg(id->device, "setsockopt error: %d\n", rv); goto error; } - rv = s->ops->bind(s, laddr, addr_family == AF_INET ? - sizeof(struct sockaddr_in) : - sizeof(struct sockaddr_in6)); + if (addr_family == AF_INET) { + struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr); + + /* For wildcard addr, limit binding to current device only */ + if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) + s->sk->sk_bound_dev_if = sdev->netdev->ifindex; + + rv = s->ops->bind(s, (struct sockaddr *)laddr, + sizeof(struct sockaddr_in)); + } else { + struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr); + + /* For wildcard addr, limit binding to current device only */ + if (ipv6_addr_any(&laddr->sin6_addr)) + s->sk->sk_bound_dev_if = sdev->netdev->ifindex; + + rv = s->ops->bind(s, (struct sockaddr *)laddr, + sizeof(struct sockaddr_in6)); + } if (rv) { siw_dbg(id->device, "socket bind error: %d\n", rv); goto error; @@ -1852,7 +1877,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, list_add_tail(&cep->listenq, (struct list_head *)id->provider_data); cep->state = SIW_EPSTATE_LISTENING; - siw_dbg(id->device, "Listen at laddr %pISp\n", laddr); + siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr); return 0; @@ -1910,106 +1935,6 @@ static void siw_drop_listeners(struct iw_cm_id *id) } } -/* - * siw_create_listen - Create resources for a listener's IWCM ID @id - * - * Listens on the socket address id->local_addr. - * - * If the listener's @id provides a specific local IP address, at most one - * listening socket is created and associated with @id. - * - * If the listener's @id provides the wildcard (zero) local IP address, - * a separate listen is performed for each local IP address of the device - * by creating a listening socket and binding to that local IP address. - * - */ -int siw_create_listen(struct iw_cm_id *id, int backlog) -{ - struct net_device *dev = to_siw_dev(id->device)->netdev; - int rv = 0, listeners = 0; - - siw_dbg(id->device, "backlog %d\n", backlog); - - /* - * For each attached address of the interface, create a - * listening socket, if id->local_addr is the wildcard - * IP address or matches the IP address. - */ - if (id->local_addr.ss_family == AF_INET) { - struct in_device *in_dev = in_dev_get(dev); - struct sockaddr_in s_laddr; - const struct in_ifaddr *ifa; - - if (!in_dev) { - rv = -ENODEV; - goto out; - } - memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr)); - - siw_dbg(id->device, "laddr %pISp\n", &s_laddr); - - rtnl_lock(); - in_dev_for_each_ifa_rtnl(ifa, in_dev) { - if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) || - s_laddr.sin_addr.s_addr == ifa->ifa_address) { - s_laddr.sin_addr.s_addr = ifa->ifa_address; - - rv = siw_listen_address(id, backlog, - (struct sockaddr *)&s_laddr, - AF_INET); - if (!rv) - listeners++; - } - } - rtnl_unlock(); - in_dev_put(in_dev); - } else if (id->local_addr.ss_family == AF_INET6) { - struct inet6_dev *in6_dev = in6_dev_get(dev); - struct inet6_ifaddr *ifp; - struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr); - - if (!in6_dev) { - rv = -ENODEV; - goto out; - } - siw_dbg(id->device, "laddr %pISp\n", &s_laddr); - - rtnl_lock(); - list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { - if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) - continue; - if (ipv6_addr_any(&s_laddr->sin6_addr) || - ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) { - struct sockaddr_in6 bind_addr = { - .sin6_family = AF_INET6, - .sin6_port = s_laddr->sin6_port, - .sin6_flowinfo = 0, - .sin6_addr = ifp->addr, - .sin6_scope_id = dev->ifindex }; - - rv = siw_listen_address(id, backlog, - (struct sockaddr *)&bind_addr, - AF_INET6); - if (!rv) - listeners++; - } - } - rtnl_unlock(); - in6_dev_put(in6_dev); - } else { - rv = -EAFNOSUPPORT; - } -out: - if (listeners) - rv = 0; - else if (!rv) - rv = -EINVAL; - - siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK"); - - return rv; -} - int siw_destroy_listen(struct iw_cm_id *id) { if (!id->provider_data) { -- cgit v1.2.3-58-ga151 From 0aeb3622ea6f14f36232a33f1d08c2ff02f4048b Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Mar 2020 09:10:45 +0100 Subject: RDMA/hns: fix spelling mistake "attatch" -> "attach" There is a spelling mistake in an error message. Fix it. Link: https://lore.kernel.org/r/20200304081045.81164-1-colin.king@canonical.com Signed-off-by: Colin Ian King Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 7bec0ceb160b..5a28d62008e4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -812,7 +812,7 @@ static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, hr_qp->regions, region_count); if (ret) - ibdev_err(ibdev, "Failed to attatch WQE's mtr\n"); + ibdev_err(ibdev, "Failed to attach WQE's mtr\n"); goto done; -- cgit v1.2.3-58-ga151 From 30f2fe40c72bfbdde7bc066cb862bd05014be9f1 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Wed, 19 Feb 2020 21:05:18 +0200 Subject: IB/mlx5: Introduce UAPIs to manage packet pacing Introduce packet pacing uobject and its alloc and destroy methods. This uobject holds mlx5 packet pacing context according to the device specification and enables managing packet pacing device entries that are needed by DEVX applications. Link: https://lore.kernel.org/r/20200219190518.200912-3-leon@kernel.org Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/Makefile | 1 + drivers/infiniband/hw/mlx5/main.c | 1 + drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 ++ drivers/infiniband/hw/mlx5/qos.c | 136 ++++++++++++++++++++++++++++++ include/uapi/rdma/mlx5_user_ioctl_cmds.h | 17 ++++ include/uapi/rdma/mlx5_user_ioctl_verbs.h | 4 + 6 files changed, 165 insertions(+) create mode 100644 drivers/infiniband/hw/mlx5/qos.c diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile index d0a043ccbe58..2a334800f109 100644 --- a/drivers/infiniband/hw/mlx5/Makefile +++ b/drivers/infiniband/hw/mlx5/Makefile @@ -8,3 +8,4 @@ mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o +mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += qos.o diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e4bcfa81b70a..7fc5ce9bdf3d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6251,6 +6251,7 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE( static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN(mlx5_ib_devx_defs), UAPI_DEF_CHAIN(mlx5_ib_flow_defs), + UAPI_DEF_CHAIN(mlx5_ib_qos_defs), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, &mlx5_ib_flow_action), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d9bffcc93587..09ce80febec7 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -203,6 +203,11 @@ struct mlx5_ib_flow_matcher { u8 match_criteria_enable; }; +struct mlx5_ib_pp { + u16 index; + struct mlx5_core_dev *mdev; +}; + struct mlx5_ib_flow_db { struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; @@ -1381,6 +1386,7 @@ int mlx5_ib_fill_stat_entry(struct sk_buff *msg, extern const struct uapi_definition mlx5_ib_devx_defs[]; extern const struct uapi_definition mlx5_ib_flow_defs[]; +extern const struct uapi_definition mlx5_ib_qos_defs[]; #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user); diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c new file mode 100644 index 000000000000..f822b06e7c9e --- /dev/null +++ b/drivers/infiniband/hw/mlx5/qos.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "mlx5_ib.h" + +#define UVERBS_MODULE_NAME mlx5_ib +#include + +static bool pp_is_supported(struct ib_device *device) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + + return (MLX5_CAP_GEN(dev->mdev, qos) && + MLX5_CAP_QOS(dev->mdev, packet_pacing) && + MLX5_CAP_QOS(dev->mdev, packet_pacing_uid)); +} + +static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)( + struct uverbs_attr_bundle *attrs) +{ + u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {}; + struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, + MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE); + struct mlx5_ib_dev *dev; + struct mlx5_ib_ucontext *c; + struct mlx5_ib_pp *pp_entry; + void *in_ctx; + u16 uid; + int inlen; + u32 flags; + int err; + + c = to_mucontext(ib_uverbs_get_ucontext(attrs)); + if (IS_ERR(c)) + return PTR_ERR(c); + + /* The allocated entry can be used only by a DEVX context */ + if (!c->devx_uid) + return -EINVAL; + + dev = to_mdev(c->ibucontext.device); + pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL); + if (IS_ERR(pp_entry)) + return PTR_ERR(pp_entry); + + in_ctx = uverbs_attr_get_alloced_ptr(attrs, + MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX); + inlen = uverbs_attr_get_len(attrs, + MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX); + memcpy(rl_raw, in_ctx, inlen); + err = uverbs_get_flags32(&flags, attrs, + MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS, + MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX); + if (err) + goto err; + + uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ? + c->devx_uid : MLX5_SHARED_RESOURCE_UID; + + err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid, + (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX), + &pp_entry->index); + if (err) + goto err; + + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX, + &pp_entry->index, sizeof(pp_entry->index)); + if (err) + goto clean; + + pp_entry->mdev = dev->mdev; + uobj->object = pp_entry; + return 0; + +clean: + mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index); +err: + kfree(pp_entry); + return err; +} + +static int pp_obj_cleanup(struct ib_uobject *uobject, + enum rdma_remove_reason why, + struct uverbs_attr_bundle *attrs) +{ + struct mlx5_ib_pp *pp_entry = uobject->object; + + mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index); + kfree(pp_entry); + return 0; +} + +DECLARE_UVERBS_NAMED_METHOD( + MLX5_IB_METHOD_PP_OBJ_ALLOC, + UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE, + MLX5_IB_OBJECT_PP, + UVERBS_ACCESS_NEW, + UA_MANDATORY), + UVERBS_ATTR_PTR_IN( + MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX, + UVERBS_ATTR_SIZE(1, + MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)), + UA_MANDATORY, + UA_ALLOC_AND_COPY), + UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS, + enum mlx5_ib_uapi_pp_alloc_flags, + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX, + UVERBS_ATTR_TYPE(u16), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + MLX5_IB_METHOD_PP_OBJ_DESTROY, + UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE, + MLX5_IB_OBJECT_PP, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP, + UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup), + &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC), + &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY)); + + +const struct uapi_definition mlx5_ib_qos_defs[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( + MLX5_IB_OBJECT_PP, + UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)), + {}, +}; diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index afe7da6f2b8e..8f4a417fc70a 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -143,6 +143,22 @@ enum mlx5_ib_devx_umem_dereg_attrs { MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), }; +enum mlx5_ib_pp_obj_methods { + MLX5_IB_METHOD_PP_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_METHOD_PP_OBJ_DESTROY, +}; + +enum mlx5_ib_pp_alloc_attrs { + MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX, + MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS, + MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX, +}; + +enum mlx5_ib_pp_obj_destroy_attrs { + MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + enum mlx5_ib_devx_umem_methods { MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_DEVX_UMEM_DEREG, @@ -173,6 +189,7 @@ enum mlx5_ib_objects { MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, MLX5_IB_OBJECT_VAR, + MLX5_IB_OBJECT_PP, }; enum mlx5_ib_flow_matcher_create_attrs { diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 88b6ca70c2fe..b4641a7865f7 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -73,5 +73,9 @@ struct mlx5_ib_uapi_devx_async_event_hdr { __u8 out_data[]; }; +enum mlx5_ib_uapi_pp_alloc_flags { + MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX = 1 << 0, +}; + #endif -- cgit v1.2.3-58-ga151 From 679824148364773c8390abbf122bd7d9825e81eb Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Thu, 20 Feb 2020 12:08:19 +0200 Subject: RDMA/rw: map P2P memory correctly for signature operations Since RDMA rw API support operations with P2P memory sg list, make sure to map/unmap the scatter list for signature operation correctly. Link: https://lore.kernel.org/r/20200220100819.41860-2-maxg@mellanox.com Signed-off-by: Max Gurtovoy Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/rw.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 06e5b6787443..557efbf29197 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -391,13 +391,13 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, return -EINVAL; } - ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); + ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); if (!ret) return -ENOMEM; sg_cnt = ret; if (prot_sg_cnt) { - ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir); + ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir); if (!ret) { ret = -ENOMEM; goto out_unmap_sg; @@ -466,9 +466,9 @@ out_free_ctx: kfree(ctx->reg); out_unmap_prot_sg: if (prot_sg_cnt) - ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); + rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); out_unmap_sg: - ib_dma_unmap_sg(dev, sg, sg_cnt, dir); + rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); return ret; } EXPORT_SYMBOL(rdma_rw_ctx_signature_init); @@ -628,9 +628,9 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); kfree(ctx->reg); - ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); if (prot_sg_cnt) - ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); + rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); + rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); } EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); -- cgit v1.2.3-58-ga151 From 32ac9e4399b12d3e54d312a0e0e30ed5cd19bd4e Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Feb 2020 16:36:51 -0400 Subject: RDMA/cma: Teach lockdep about the order of rtnl and lock This lock ordering only happens when bonding is enabled and a certain bonding related event fires. However, since it can happen this is a global restriction on lock ordering. Teach lockdep about the order directly and unconditionally so bugs here are found quickly. See https://syzkaller.appspot.com/bug?extid=55de90ab5f44172b0c90 Link: https://lore.kernel.org/r/20200227203651.GA27185@ziepe.ca Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 468814e8c504..4df75ab4ee9d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4796,6 +4796,19 @@ static int __init cma_init(void) { int ret; + /* + * There is a rare lock ordering dependency in cma_netdev_callback() + * that only happens when bonding is enabled. Teach lockdep that rtnl + * must never be nested under lock so it can find these without having + * to test with bonding. + */ + if (IS_ENABLED(CONFIG_LOCKDEP)) { + rtnl_lock(); + mutex_lock(&lock); + mutex_unlock(&lock); + rtnl_unlock(); + } + cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); if (!cma_wq) return -ENOMEM; -- cgit v1.2.3-58-ga151 From 24a5b0ce714210c69a8870d4a11ab6a8cff650c6 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 8 Mar 2020 07:54:42 +0100 Subject: RDMA/bnxt_re: Remove a redundant 'memset' 'wqe' is already zeroed at the top of the 'while' loop, just a few lines below, and is not used outside of the loop. So there is no need to zero it again, or for the variable to be declared outside the loop. Link: https://lore.kernel.org/r/20200308065442.5415-1-christophe.jaillet@wanadoo.fr Signed-off-by: Christophe JAILLET Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 47b0b50b71e7..95f6d493d1b9 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2470,15 +2470,12 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, const struct ib_send_wr *wr) { - struct bnxt_qplib_swqe wqe; int rc = 0, payload_sz = 0; unsigned long flags; spin_lock_irqsave(&qp->sq_lock, flags); - memset(&wqe, 0, sizeof(wqe)); while (wr) { - /* House keeping */ - memset(&wqe, 0, sizeof(wqe)); + struct bnxt_qplib_swqe wqe = {}; /* Common */ wqe.num_sge = wr->num_sge; -- cgit v1.2.3-58-ga151 From 2d870c5bd0acd42eced3860c3911ed6005654a2d Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Tue, 10 Mar 2020 11:16:56 +0200 Subject: RDMA/core: Remove the duplicate header file The header file rdma_core.h is duplicate, so let's remove it. Fixes: 622db5b6439a ("RDMA/core: Add trace points to follow MR allocation") Link: https://lore.kernel.org/r/20200310091656.249696-1-leon@kernel.org Signed-off-by: Zhu Yanjun Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 610ce9a6c6b9..56a71337112c 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -54,8 +54,6 @@ #include "core_priv.h" #include -#include - static int ib_resolve_eth_dmac(struct ib_device *device, struct rdma_ah_attr *ah_attr); -- cgit v1.2.3-58-ga151 From 0897f301bc285409ab6453839fa6a43d9ceb26e0 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Tue, 10 Mar 2020 09:57:06 +0200 Subject: RDMA/mlx5: Remove duplicate definitions of SW_ICM macros Those macros are already defined in include/linux/mlx5/driver.h, so delete their duplicate variants. Link: https://lore.kernel.org/r/20200310075706.238592-1-leon@kernel.org Signed-off-by: Ariel Levkovich Signed-off-by: Yevgeny Kliteynik Signed-off-by: Erez Shitrit Reviewed-by: Alex Vesker Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 798366bf6dd5..2e42258e6fce 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -128,10 +128,6 @@ enum mlx5_ib_mmap_type { MLX5_IB_MMAP_TYPE_VAR = 2, }; -#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \ - (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) -#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) - struct mlx5_ib_ucontext { struct ib_ucontext ibucontext; struct list_head db_page_list; -- cgit v1.2.3-58-ga151 From 41e684ef3f37ce6e5eac3fb5b9c7c1853f4b0447 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 5 Mar 2020 14:38:41 +0200 Subject: IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads Until now the flex parser capability was used in ib_query_device() to indicate tunnel_offloads_caps support for mpls_over_gre/mpls_over_udp. Newer devices and firmware will have configurations with the flexparser but without mpls support. Testing for the flex parser capability was a mistake, the tunnel_stateless capability was intended for detecting mpls and was introduced at the same time as the flex parser capability. Otherwise userspace will be incorrectly informed that a future device supports MPLS when it does not. Link: https://lore.kernel.org/r/20200305123841.196086-1-leon@kernel.org Cc: # 4.17 Fixes: e818e255a58d ("IB/mlx5: Expose MPLS related tunneling offloads") Signed-off-by: Alex Vesker Reviewed-by: Ariel Levkovich Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 6 ++---- include/linux/mlx5/mlx5_ifc.h | 6 +++++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index eba7604eaa76..9c3993c7e9a1 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1192,12 +1192,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_GRE; - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_GRE) + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_UDP) + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f9bcbe653fda..f3a7189f9d6d 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -877,7 +877,11 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp_csum[0x1]; u8 swp_lso[0x1]; u8 cqe_checksum_full[0x1]; - u8 reserved_at_24[0x5]; + u8 tunnel_stateless_geneve_tx[0x1]; + u8 tunnel_stateless_mpls_over_udp[0x1]; + u8 tunnel_stateless_mpls_over_gre[0x1]; + u8 tunnel_stateless_vxlan_gpe[0x1]; + u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ip_over_ip[0x1]; u8 reserved_at_2a[0x6]; u8 max_vxlan_udp_ports[0x8]; -- cgit v1.2.3-58-ga151 From 282e79c1c61a119a8b41bf0c1d82a0ac7cc76488 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 10 Mar 2020 11:14:29 +0200 Subject: RDMA/mlx4: Delete duplicated offsetofend implementation Convert mlx4 to use in-kernel offsetofend() instead of its duplicated implementation. Link: https://lore.kernel.org/r/20200310091438.248429-3-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/main.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 2f5d9b181848..a66518a5c938 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -434,9 +434,6 @@ int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, return real_index; } -#define field_avail(type, fld, sz) (offsetof(type, fld) + \ - sizeof(((type *)0)->fld) <= (sz)) - static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) @@ -447,7 +444,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, int err; int have_ib_ports; struct mlx4_uverbs_ex_query_device cmd; - struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0}; + struct mlx4_uverbs_ex_query_device_resp resp = {}; struct mlx4_clock_params clock_params; if (uhw->inlen) { @@ -602,7 +599,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, sizeof(struct mlx4_wqe_data_seg); } - if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { + if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) { if (props->rss_caps.supported_qpts) { resp.rss_caps.rx_hash_function = MLX4_IB_RX_HASH_FUNC_TOEPLITZ; @@ -626,7 +623,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, sizeof(resp.rss_caps); } - if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { + if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) { if (dev->dev->caps.max_gso_sz && ((mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) || -- cgit v1.2.3-58-ga151 From a762d460a06abc8d462ac513ba57dc3c31dd8c73 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 10 Mar 2020 11:14:31 +0200 Subject: RDMA/mlx5: Use offsetofend() instead of duplicated variant Convert mlx5 driver to use offsetofend() instead of its duplicated variant. Link: https://lore.kernel.org/r/20200310091438.248429-5-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 42 ++++++++++++++++++------------------ drivers/infiniband/hw/mlx5/mlx5_ib.h | 16 ++++++-------- 2 files changed, 27 insertions(+), 31 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9c3993c7e9a1..de5275a34da3 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -898,7 +898,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->raw_packet_caps |= IB_RAW_PACKET_CAP_CVLAN_STRIPPING; - if (field_avail(typeof(resp), tso_caps, uhw_outlen)) { + if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) { max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); if (max_tso) { resp.tso_caps.max_tso = 1 << max_tso; @@ -908,7 +908,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } } - if (field_avail(typeof(resp), rss_caps, uhw_outlen)) { + if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) { resp.rss_caps.rx_hash_function = MLX5_RX_HASH_FUNC_TOEPLITZ; resp.rss_caps.rx_hash_fields_mask = @@ -928,9 +928,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, resp.response_length += sizeof(resp.rss_caps); } } else { - if (field_avail(typeof(resp), tso_caps, uhw_outlen)) + if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) resp.response_length += sizeof(resp.tso_caps); - if (field_avail(typeof(resp), rss_caps, uhw_outlen)) + if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) resp.response_length += sizeof(resp.rss_caps); } @@ -1072,7 +1072,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, MLX5_MAX_CQ_PERIOD; } - if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) { + if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) { resp.response_length += sizeof(resp.cqe_comp_caps); if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) { @@ -1090,7 +1090,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } } - if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) && + if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen && raw_support) { if (MLX5_CAP_QOS(mdev, packet_pacing) && MLX5_CAP_GEN(mdev, qos)) { @@ -1108,8 +1108,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, resp.response_length += sizeof(resp.packet_pacing_caps); } - if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, - uhw_outlen)) { + if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <= + uhw_outlen) { if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) resp.mlx5_ib_support_multi_pkt_send_wqes = MLX5_IB_ALLOW_MPW; @@ -1122,7 +1122,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); } - if (field_avail(typeof(resp), flags, uhw_outlen)) { + if (offsetofend(typeof(resp), flags) <= uhw_outlen) { resp.response_length += sizeof(resp.flags); if (MLX5_CAP_GEN(mdev, cqe_compression_128)) @@ -1138,7 +1138,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; } - if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) { + if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) { resp.response_length += sizeof(resp.sw_parsing_caps); if (MLX5_CAP_ETH(mdev, swp)) { resp.sw_parsing_caps.sw_parsing_offloads |= @@ -1158,7 +1158,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } } - if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) && + if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen && raw_support) { resp.response_length += sizeof(resp.striding_rq_caps); if (MLX5_CAP_GEN(mdev, striding_rq)) { @@ -1181,7 +1181,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } } - if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) { + if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) { resp.response_length += sizeof(resp.tunnel_offloads_caps); if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan)) resp.tunnel_offloads_caps |= @@ -1899,16 +1899,16 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, resp.tot_bfregs = req.total_num_bfregs; resp.num_ports = dev->num_ports; - if (field_avail(typeof(resp), cqe_version, udata->outlen)) + if (offsetofend(typeof(resp), cqe_version) <= udata->outlen) resp.response_length += sizeof(resp.cqe_version); - if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { + if (offsetofend(typeof(resp), cmds_supp_uhw) <= udata->outlen) { resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; resp.response_length += sizeof(resp.cmds_supp_uhw); } - if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) { + if (offsetofend(typeof(resp), eth_min_inline) <= udata->outlen) { if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline); resp.eth_min_inline++; @@ -1916,7 +1916,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, resp.response_length += sizeof(resp.eth_min_inline); } - if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) { + if (offsetofend(typeof(resp), clock_info_versions) <= udata->outlen) { if (mdev->clock_info) resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1); resp.response_length += sizeof(resp.clock_info_versions); @@ -1928,7 +1928,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, * pretend we don't support reading the HCA's core clock. This is also * forced by mmap function. */ - if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { + if (offsetofend(typeof(resp), hca_core_clock_offset) <= udata->outlen) { if (PAGE_SIZE <= 4096) { resp.comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; @@ -1938,18 +1938,18 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, resp.response_length += sizeof(resp.hca_core_clock_offset); } - if (field_avail(typeof(resp), log_uar_size, udata->outlen)) + if (offsetofend(typeof(resp), log_uar_size) <= udata->outlen) resp.response_length += sizeof(resp.log_uar_size); - if (field_avail(typeof(resp), num_uars_per_page, udata->outlen)) + if (offsetofend(typeof(resp), num_uars_per_page) <= udata->outlen) resp.response_length += sizeof(resp.num_uars_per_page); - if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) { + if (offsetofend(typeof(resp), num_dyn_bfregs) <= udata->outlen) { resp.num_dyn_bfregs = bfregi->num_dyn_bfregs; resp.response_length += sizeof(resp.num_dyn_bfregs); } - if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) { + if (offsetofend(typeof(resp), dump_fill_mkey) <= udata->outlen) { if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { resp.dump_fill_mkey = dump_fill_mkey; resp.comp_mask |= diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 2e42258e6fce..4b7d0dfabea2 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -64,8 +64,6 @@ dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ __LINE__, current->pid, ##arg) -#define field_avail(type, fld, sz) (offsetof(type, fld) + \ - sizeof(((type *)0)->fld) <= (sz)) #define MLX5_IB_DEFAULT_UIDX 0xffffff #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) @@ -1475,12 +1473,11 @@ static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, { u8 cqe_version = ucontext->cqe_version; - if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) && - !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) + if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && + (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) return 0; - if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) != - !!cqe_version)) + if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) return -EINVAL; return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); @@ -1493,12 +1490,11 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, { u8 cqe_version = ucontext->cqe_version; - if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) && - !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) + if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && + (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) return 0; - if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) != - !!cqe_version)) + if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) return -EINVAL; return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); -- cgit v1.2.3-58-ga151 From a4f994a05926c96845ad40fb661d566a4eacf2b9 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 10 Mar 2020 11:14:32 +0200 Subject: RDMA/cm: Delete not implemented CM peer to peer communication Peer to peer support was never implemented, so delete it to make code less clutter. Link: https://lore.kernel.org/r/20200310091438.248429-6-leon@kernel.org Signed-off-by: Leon Romanovsky Reviewed-by: Mark Zhang Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 7 ------- include/rdma/ib_cm.h | 1 - 2 files changed, 8 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 5c58619b2399..21ede1c08eea 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -261,7 +261,6 @@ struct cm_id_private { __be16 pkey; u8 private_data_len; u8 max_cm_retries; - u8 peer_to_peer; u8 responder_resources; u8 initiator_depth; u8 retry_count; @@ -1381,10 +1380,6 @@ static void cm_format_req(struct cm_req_msg *req_msg, static int cm_validate_req_param(struct ib_cm_req_param *param) { - /* peer-to-peer not supported */ - if (param->peer_to_peer) - return -EINVAL; - if (!param->primary_path) return -EINVAL; @@ -2437,8 +2432,6 @@ static int cm_rep_handler(struct cm_work *work) cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); - /* todo: handle peer_to_peer */ - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 8ec482e391aa..058cfbc2b37f 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -360,7 +360,6 @@ struct ib_cm_req_param { u32 starting_psn; const void *private_data; u8 private_data_len; - u8 peer_to_peer; u8 responder_resources; u8 initiator_depth; u8 remote_cm_response_timeout; -- cgit v1.2.3-58-ga151 From f743ff3b37dfc8e8cf4a32d4254cddd5f0f1add8 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 10 Mar 2020 10:22:29 +0200 Subject: RDMA/mlx5: Replace spinlock protected write with atomic var mkey variant calculation was spinlock protected to make it atomic, replace that with one atomic variable. Link: https://lore.kernel.org/r/20200310082238.239865-4-leon@kernel.org Signed-off-by: Saeed Mahameed Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 2 +- drivers/infiniband/hw/mlx5/mlx5_ib.h | 5 +---- drivers/infiniband/hw/mlx5/mr.c | 6 +----- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index fce863621414..e4f8bee486c2 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6390,7 +6390,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) spin_lock_init(&dev->reset_flow_resource_lock); xa_init(&dev->odp_mkeys); xa_init(&dev->sig_mrs); - spin_lock_init(&dev->mkey_lock); + atomic_set(&dev->mkey_var, 0); spin_lock_init(&dev->dm.lock); dev->dm.dev = mdev; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 89a050e516a8..3445402b23cc 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -993,10 +993,7 @@ struct mlx5_ib_dev { */ struct mlx5_ib_resources devr; - /* protect mkey key part */ - spinlock_t mkey_lock; - u8 mkey_key; - + atomic_t mkey_var; struct mlx5_mr_cache cache; struct timer_list delay_timer; /* Prevents soft lock on massive reg MRs */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 8508af500972..a1e6ab9b0bed 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -54,12 +54,8 @@ static void assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, u32 *in) { + u8 key = atomic_inc_return(&dev->mkey_var); void *mkc; - u8 key; - - spin_lock_irq(&dev->mkey_lock); - key = dev->mkey_key++; - spin_unlock_irq(&dev->mkey_lock); mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, mkey_7_0, key); -- cgit v1.2.3-58-ga151 From 7c8691a396bd2084c7abc02d7aa34dade597814d Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 10:22:31 +0200 Subject: RDMA/mlx5: Rename the tracking variables for the MR cache The old names do not clearly indicate the intent. Link: https://lore.kernel.org/r/20200310082238.239865-6-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 19 ++++++++++--- drivers/infiniband/hw/mlx5/mr.c | 54 ++++++++++++++++++------------------ 2 files changed, 42 insertions(+), 31 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 3445402b23cc..731b0f7bbe5b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -699,15 +699,26 @@ struct mlx5_cache_ent { u32 access_mode; u32 page; - u32 size; - u32 cur; + /* + * - available_mrs is the length of list head, ie the number of MRs + * available for immediate allocation. + * - total_mrs is available_mrs plus all in use MRs that could be + * returned to the cache. + * - limit is the low water mark for available_mrs, 2* limit is the + * upper water mark. + * - pending is the number of MRs currently being created + */ + u32 total_mrs; + u32 available_mrs; + u32 limit; + u32 pending; + + /* Statistics */ u32 miss; - u32 limit; struct mlx5_ib_dev *dev; struct work_struct work; struct delayed_work dwork; - int pending; struct completion compl; }; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index a1e6ab9b0bed..9f5afa24896d 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -144,8 +144,8 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) spin_lock_irqsave(&ent->lock, flags); list_add_tail(&mr->list, &ent->head); - ent->cur++; - ent->size++; + ent->available_mrs++; + ent->total_mrs++; spin_unlock_irqrestore(&ent->lock, flags); if (!completion_done(&ent->compl)) @@ -231,8 +231,8 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_move(&mr->list, &del_list); - ent->cur--; - ent->size--; + ent->available_mrs--; + ent->total_mrs--; spin_unlock_irq(&ent->lock); mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); } @@ -265,16 +265,16 @@ static ssize_t size_write(struct file *filp, const char __user *buf, if (var < ent->limit) return -EINVAL; - if (var > ent->size) { + if (var > ent->total_mrs) { do { - err = add_keys(dev, c, var - ent->size); + err = add_keys(dev, c, var - ent->total_mrs); if (err && err != -EAGAIN) return err; usleep_range(3000, 5000); } while (err); - } else if (var < ent->size) { - remove_keys(dev, c, ent->size - var); + } else if (var < ent->total_mrs) { + remove_keys(dev, c, ent->total_mrs - var); } return count; @@ -287,7 +287,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count, char lbuf[20]; int err; - err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); + err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); if (err < 0) return err; @@ -320,13 +320,13 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, if (sscanf(lbuf, "%u", &var) != 1) return -EINVAL; - if (var > ent->size) + if (var > ent->total_mrs) return -EINVAL; ent->limit = var; - if (ent->cur < ent->limit) { - err = add_keys(dev, c, 2 * ent->limit - ent->cur); + if (ent->available_mrs < ent->limit) { + err = add_keys(dev, c, 2 * ent->limit - ent->available_mrs); if (err) return err; } @@ -360,7 +360,7 @@ static int someone_adding(struct mlx5_mr_cache *cache) int i; for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { - if (cache->ent[i].cur < cache->ent[i].limit) + if (cache->ent[i].available_mrs < cache->ent[i].limit) return 1; } @@ -378,9 +378,9 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) return; ent = &dev->cache.ent[i]; - if (ent->cur < 2 * ent->limit && !dev->fill_delay) { + if (ent->available_mrs < 2 * ent->limit && !dev->fill_delay) { err = add_keys(dev, i, 1); - if (ent->cur < 2 * ent->limit) { + if (ent->available_mrs < 2 * ent->limit) { if (err == -EAGAIN) { mlx5_ib_dbg(dev, "returned eagain, order %d\n", i + 2); @@ -395,7 +395,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) queue_work(cache->wq, &ent->work); } } - } else if (ent->cur > 2 * ent->limit) { + } else if (ent->available_mrs > 2 * ent->limit) { /* * The remove_keys() logic is performed as garbage collection * task. Such task is intended to be run when no other active @@ -411,7 +411,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) if (!need_resched() && !someone_adding(cache) && time_after(jiffies, cache->last_add + 300 * HZ)) { remove_keys(dev, i, 1); - if (ent->cur > ent->limit) + if (ent->available_mrs > ent->limit) queue_work(cache->wq, &ent->work); } else { queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); @@ -462,9 +462,9 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); - ent->cur--; + ent->available_mrs--; spin_unlock_irq(&ent->lock); - if (ent->cur < ent->limit) + if (ent->available_mrs < ent->limit) queue_work(cache->wq, &ent->work); return mr; } @@ -497,9 +497,9 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); - ent->cur--; + ent->available_mrs--; spin_unlock_irq(&ent->lock); - if (ent->cur < ent->limit) + if (ent->available_mrs < ent->limit) queue_work(cache->wq, &ent->work); break; } @@ -531,7 +531,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) mr->allocated_from_cache = false; destroy_mkey(dev, mr); ent = &cache->ent[c]; - if (ent->cur < ent->limit) + if (ent->available_mrs < ent->limit) queue_work(cache->wq, &ent->work); return; } @@ -539,8 +539,8 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ent = &cache->ent[c]; spin_lock_irq(&ent->lock); list_add_tail(&mr->list, &ent->head); - ent->cur++; - if (ent->cur > 2 * ent->limit) + ent->available_mrs++; + if (ent->available_mrs > 2 * ent->limit) shrink = 1; spin_unlock_irq(&ent->lock); @@ -565,8 +565,8 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_move(&mr->list, &del_list); - ent->cur--; - ent->size--; + ent->available_mrs--; + ent->total_mrs--; spin_unlock_irq(&ent->lock); mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); } @@ -604,7 +604,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) dir = debugfs_create_dir(ent->name, cache->root); debugfs_create_file("size", 0600, dir, ent, &size_fops); debugfs_create_file("limit", 0600, dir, ent, &limit_fops); - debugfs_create_u32("cur", 0400, dir, &ent->cur); + debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); debugfs_create_u32("miss", 0600, dir, &ent->miss); } } -- cgit v1.2.3-58-ga151 From b91e1751fbcee7692e45308e74d8816c43802ede Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 10:22:32 +0200 Subject: RDMA/mlx5: Simplify how the MR cache bucket is located There are many bad APIs here that are accepting a cache bucket index instead of a bucket pointer. Many of the callers already have a bucket pointer, so this results in a lot of confusing uses of order2idx(). Pass the struct mlx5_cache_ent into add_keys(), remove_keys(), and alloc_cached_mr(). Once the MR is in the cache, store the cache bucket pointer directly in the MR, replacing the 'bool allocated_from cache'. In the end there is only one place that needs to form index from order, alloc_mr_from_cache(). Increase the safety of this function by disallowing it from accessing cache entries in the ODP special area. Link: https://lore.kernel.org/r/20200310082238.239865-7-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 7 +- drivers/infiniband/hw/mlx5/mr.c | 160 +++++++++++++++-------------------- drivers/infiniband/hw/mlx5/odp.c | 2 +- 3 files changed, 71 insertions(+), 98 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 731b0f7bbe5b..7208946d2787 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -617,8 +617,8 @@ struct mlx5_ib_mr { struct ib_umem *umem; struct mlx5_shared_mr_info *smr_info; struct list_head list; - int order; - bool allocated_from_cache; + unsigned int order; + struct mlx5_cache_ent *cache_ent; int npages; struct mlx5_ib_dev *dev; u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; @@ -1274,7 +1274,8 @@ int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); -struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry); +struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, + unsigned int entry); void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 9f5afa24896d..55e31f6effda 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -99,16 +99,6 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); } -static int order2idx(struct mlx5_ib_dev *dev, int order) -{ - struct mlx5_mr_cache *cache = &dev->cache; - - if (order < cache->ent[0].order) - return 0; - else - return order - cache->ent[0].order; -} - static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) { return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= @@ -120,9 +110,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) struct mlx5_ib_mr *mr = container_of(context, struct mlx5_ib_mr, cb_work); struct mlx5_ib_dev *dev = mr->dev; - struct mlx5_mr_cache *cache = &dev->cache; - int c = order2idx(dev, mr->order); - struct mlx5_cache_ent *ent = &cache->ent[c]; + struct mlx5_cache_ent *ent = mr->cache_ent; unsigned long flags; spin_lock_irqsave(&ent->lock, flags); @@ -140,7 +128,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) mr->mmkey.key |= mlx5_idx_to_mkey( MLX5_GET(create_mkey_out, mr->out, mkey_index)); - cache->last_add = jiffies; + dev->cache.last_add = jiffies; spin_lock_irqsave(&ent->lock, flags); list_add_tail(&mr->list, &ent->head); @@ -152,10 +140,8 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) complete(&ent->compl); } -static int add_keys(struct mlx5_ib_dev *dev, int c, int num) +static int add_keys(struct mlx5_cache_ent *ent, int num) { - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_cache_ent *ent = &cache->ent[c]; int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_ib_mr *mr; void *mkc; @@ -180,8 +166,8 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) break; } mr->order = ent->order; - mr->allocated_from_cache = true; - mr->dev = dev; + mr->cache_ent = ent; + mr->dev = ent->dev; MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, umr_en, 1); @@ -196,15 +182,15 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) spin_lock_irq(&ent->lock); ent->pending++; spin_unlock_irq(&ent->lock); - err = mlx5_ib_create_mkey_cb(dev, &mr->mmkey, - &dev->async_ctx, in, inlen, - mr->out, sizeof(mr->out), - &mr->cb_work); + err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, + &ent->dev->async_ctx, in, inlen, + mr->out, sizeof(mr->out), + &mr->cb_work); if (err) { spin_lock_irq(&ent->lock); ent->pending--; spin_unlock_irq(&ent->lock); - mlx5_ib_warn(dev, "create mkey failed %d\n", err); + mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); kfree(mr); break; } @@ -214,10 +200,8 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) return err; } -static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) +static void remove_keys(struct mlx5_cache_ent *ent, int num) { - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_ib_mr *tmp_mr; struct mlx5_ib_mr *mr; LIST_HEAD(del_list); @@ -234,7 +218,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ent->available_mrs--; ent->total_mrs--; spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); + mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); } list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { @@ -247,18 +231,14 @@ static ssize_t size_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct mlx5_cache_ent *ent = filp->private_data; - struct mlx5_ib_dev *dev = ent->dev; char lbuf[20] = {0}; u32 var; int err; - int c; count = min(count, sizeof(lbuf) - 1); if (copy_from_user(lbuf, buf, count)) return -EFAULT; - c = order2idx(dev, ent->order); - if (sscanf(lbuf, "%u", &var) != 1) return -EINVAL; @@ -267,14 +247,14 @@ static ssize_t size_write(struct file *filp, const char __user *buf, if (var > ent->total_mrs) { do { - err = add_keys(dev, c, var - ent->total_mrs); + err = add_keys(ent, var - ent->total_mrs); if (err && err != -EAGAIN) return err; usleep_range(3000, 5000); } while (err); } else if (var < ent->total_mrs) { - remove_keys(dev, c, ent->total_mrs - var); + remove_keys(ent, ent->total_mrs - var); } return count; @@ -305,18 +285,14 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct mlx5_cache_ent *ent = filp->private_data; - struct mlx5_ib_dev *dev = ent->dev; char lbuf[20] = {0}; u32 var; int err; - int c; count = min(count, sizeof(lbuf) - 1); if (copy_from_user(lbuf, buf, count)) return -EFAULT; - c = order2idx(dev, ent->order); - if (sscanf(lbuf, "%u", &var) != 1) return -EINVAL; @@ -326,7 +302,7 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, ent->limit = var; if (ent->available_mrs < ent->limit) { - err = add_keys(dev, c, 2 * ent->limit - ent->available_mrs); + err = add_keys(ent, 2 * ent->limit - ent->available_mrs); if (err) return err; } @@ -371,24 +347,22 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) { struct mlx5_ib_dev *dev = ent->dev; struct mlx5_mr_cache *cache = &dev->cache; - int i = order2idx(dev, ent->order); int err; if (cache->stopped) return; - ent = &dev->cache.ent[i]; if (ent->available_mrs < 2 * ent->limit && !dev->fill_delay) { - err = add_keys(dev, i, 1); + err = add_keys(ent, 1); if (ent->available_mrs < 2 * ent->limit) { if (err == -EAGAIN) { mlx5_ib_dbg(dev, "returned eagain, order %d\n", - i + 2); + ent->order); queue_delayed_work(cache->wq, &ent->dwork, msecs_to_jiffies(3)); } else if (err) { mlx5_ib_warn(dev, "command failed order %d, err %d\n", - i + 2, err); + ent->order, err); queue_delayed_work(cache->wq, &ent->dwork, msecs_to_jiffies(1000)); } else { @@ -410,7 +384,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) */ if (!need_resched() && !someone_adding(cache) && time_after(jiffies, cache->last_add + 300 * HZ)) { - remove_keys(dev, i, 1); + remove_keys(ent, 1); if (ent->available_mrs > ent->limit) queue_work(cache->wq, &ent->work); } else { @@ -435,17 +409,18 @@ static void cache_work_func(struct work_struct *work) __cache_work_func(ent); } -struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) +/* Allocate a special entry from the cache */ +struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, + unsigned int entry) { struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_cache_ent *ent; struct mlx5_ib_mr *mr; int err; - if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) { - mlx5_ib_err(dev, "cache entry %d is out of range\n", entry); + if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY || + entry >= ARRAY_SIZE(cache->ent))) return ERR_PTR(-EINVAL); - } ent = &cache->ent[entry]; while (1) { @@ -453,7 +428,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) if (list_empty(&ent->head)) { spin_unlock_irq(&ent->lock); - err = add_keys(dev, entry, 1); + err = add_keys(ent, 1); if (err && err != -EAGAIN) return ERR_PTR(err); @@ -471,26 +446,16 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) } } -static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) +static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent) { - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_ib_dev *dev = req_ent->dev; struct mlx5_ib_mr *mr = NULL; - struct mlx5_cache_ent *ent; - int last_umr_cache_entry; - int c; - int i; - - c = order2idx(dev, order); - last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev)); - if (c < 0 || c > last_umr_cache_entry) { - mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); - return NULL; - } + struct mlx5_cache_ent *ent = req_ent; - for (i = c; i <= last_umr_cache_entry; i++) { - ent = &cache->ent[i]; - - mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); + /* Try larger MR pools from the cache to satisfy the allocation */ + for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { + mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, + ent - dev->cache.ent); spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { @@ -500,43 +465,36 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) ent->available_mrs--; spin_unlock_irq(&ent->lock); if (ent->available_mrs < ent->limit) - queue_work(cache->wq, &ent->work); + queue_work(dev->cache.wq, &ent->work); break; } spin_unlock_irq(&ent->lock); - queue_work(cache->wq, &ent->work); + queue_work(dev->cache.wq, &ent->work); } if (!mr) - cache->ent[c].miss++; + req_ent->miss++; return mr; } void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_cache_ent *ent; + struct mlx5_cache_ent *ent = mr->cache_ent; int shrink = 0; - int c; - if (!mr->allocated_from_cache) + if (!ent) return; - c = order2idx(dev, mr->order); - WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); - if (mlx5_mr_cache_invalidate(mr)) { - mr->allocated_from_cache = false; + mr->cache_ent = NULL; destroy_mkey(dev, mr); - ent = &cache->ent[c]; if (ent->available_mrs < ent->limit) - queue_work(cache->wq, &ent->work); + queue_work(dev->cache.wq, &ent->work); return; } - ent = &cache->ent[c]; spin_lock_irq(&ent->lock); list_add_tail(&mr->list, &ent->head); ent->available_mrs++; @@ -545,7 +503,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) spin_unlock_irq(&ent->lock); if (shrink) - queue_work(cache->wq, &ent->work); + queue_work(dev->cache.wq, &ent->work); } static void clean_keys(struct mlx5_ib_dev *dev, int c) @@ -872,22 +830,38 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, return err; } -static struct mlx5_ib_mr *alloc_mr_from_cache( - struct ib_pd *pd, struct ib_umem *umem, - u64 virt_addr, u64 len, int npages, - int page_shift, int order, int access_flags) +static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev, + unsigned int order) +{ + struct mlx5_mr_cache *cache = &dev->cache; + + if (order < cache->ent[0].order) + return &cache->ent[0]; + order = order - cache->ent[0].order; + if (order > MR_CACHE_LAST_STD_ENTRY) + return NULL; + return &cache->ent[order]; +} + +static struct mlx5_ib_mr * +alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, + u64 len, int npages, int page_shift, unsigned int order, + int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order); struct mlx5_ib_mr *mr; int err = 0; int i; + if (!ent) + return ERR_PTR(-E2BIG); for (i = 0; i < 1; i++) { - mr = alloc_cached_mr(dev, order); + mr = alloc_cached_mr(ent); if (mr) break; - err = add_keys(dev, order2idx(dev, order), 1); + err = add_keys(ent, 1); if (err && err != -EAGAIN) { mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); break; @@ -1470,7 +1444,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, /* * UMR can't be used - MKey needs to be replaced. */ - if (mr->allocated_from_cache) + if (mr->cache_ent) err = mlx5_mr_cache_invalidate(mr); else err = destroy_mkey(dev, mr); @@ -1486,7 +1460,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, goto err; } - mr->allocated_from_cache = false; + mr->cache_ent = NULL; } else { /* * Send a UMR WQE @@ -1573,8 +1547,6 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr) static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - int allocated_from_cache = mr->allocated_from_cache; - if (mr->sig) { if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) @@ -1589,7 +1561,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) mr->sig = NULL; } - if (!allocated_from_cache) { + if (!mr->cache_ent) { destroy_mkey(dev, mr); mlx5_free_priv_descs(mr); } @@ -1606,7 +1578,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) else clean_mr(dev, mr); - if (mr->allocated_from_cache) + if (mr->cache_ent) mlx5_mr_cache_free(dev, mr); else kfree(mr); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 4216814ba871..224f480fc441 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -197,7 +197,7 @@ static void dma_fence_odp_mr(struct mlx5_ib_mr *mr) odp->private = NULL; mutex_unlock(&odp->umem_mutex); - if (!mr->allocated_from_cache) { + if (!mr->cache_ent) { mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey); WARN_ON(mr->descs); } -- cgit v1.2.3-58-ga151 From 1769c4c575489be28891c98f1e3f0a4252ca750a Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 10:22:33 +0200 Subject: RDMA/mlx5: Always remove MRs from the cache before destroying them The cache bucket tracks the total number of MRs that exists, both inside and outside of the cache. Removing a MR from the cache (by setting cache_ent to NULL) without updating total_mrs will cause the tracking to leak and be inflated. Further fix the rereg_mr path to always destroy the MR. reg_create will always overwrite all the MR data in mlx5_ib_mr, so the MR must be completely destroyed, in all cases, before this function can be called. Detach the MR from the cache and unconditionally destroy it to avoid leaking HW mkeys. Fixes: afd1417404fb ("IB/mlx5: Use direct mkey destroy command upon UMR unreg failure") Fixes: 56e11d628c5d ("IB/mlx5: Added support for re-registration of MRs") Link: https://lore.kernel.org/r/20200310082238.239865-8-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mr.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 55e31f6effda..9b980ef326b4 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -479,6 +479,16 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent) return mr; } +static void detach_mr_from_cache(struct mlx5_ib_mr *mr) +{ + struct mlx5_cache_ent *ent = mr->cache_ent; + + mr->cache_ent = NULL; + spin_lock_irq(&ent->lock); + ent->total_mrs--; + spin_unlock_irq(&ent->lock); +} + void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { struct mlx5_cache_ent *ent = mr->cache_ent; @@ -488,7 +498,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return; if (mlx5_mr_cache_invalidate(mr)) { - mr->cache_ent = NULL; + detach_mr_from_cache(mr); destroy_mkey(dev, mr); if (ent->available_mrs < ent->limit) queue_work(dev->cache.wq, &ent->work); @@ -1445,9 +1455,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, * UMR can't be used - MKey needs to be replaced. */ if (mr->cache_ent) - err = mlx5_mr_cache_invalidate(mr); - else - err = destroy_mkey(dev, mr); + detach_mr_from_cache(mr); + err = destroy_mkey(dev, mr); if (err) goto err; @@ -1459,8 +1468,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, mr = to_mmr(ib_mr); goto err; } - - mr->cache_ent = NULL; } else { /* * Send a UMR WQE -- cgit v1.2.3-58-ga151 From a1d8854aae4ee19df6161a276a99d3c9c2abc4f3 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 10:22:34 +0200 Subject: RDMA/mlx5: Fix MR cache size and limit debugfs The size_write function is supposed to adjust the total_mr's to match the user's request, but lacks locking and safety checking. total_mrs can only be adjusted by at most available_mrs. mrs already assigned to users cannot be revoked. Ensure that the user provides a target value within the range of available_mrs and within the high/low water mark. limit_write has confusing and wrong sanity checking, and doesn't have the ability to deallocate on limit reduction. Since both functions use the same algorithm to adjust the available_mrs, consolidate it into one function and write it correctly. Fix the locking and by holding the spinlock for all accesses to ent->X. Always fail if the user provides a malformed string. Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Link: https://lore.kernel.org/r/20200310082238.239865-9-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mr.c | 152 +++++++++++++++++++++++----------------- 1 file changed, 88 insertions(+), 64 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 9b980ef326b4..091e24c58e2c 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -140,7 +140,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) complete(&ent->compl); } -static int add_keys(struct mlx5_cache_ent *ent, int num) +static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) { int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_ib_mr *mr; @@ -200,30 +200,54 @@ static int add_keys(struct mlx5_cache_ent *ent, int num) return err; } -static void remove_keys(struct mlx5_cache_ent *ent, int num) +static void remove_cache_mr(struct mlx5_cache_ent *ent) { - struct mlx5_ib_mr *tmp_mr; struct mlx5_ib_mr *mr; - LIST_HEAD(del_list); - int i; - for (i = 0; i < num; i++) { - spin_lock_irq(&ent->lock); - if (list_empty(&ent->head)) { - spin_unlock_irq(&ent->lock); - break; - } - mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); - list_move(&mr->list, &del_list); - ent->available_mrs--; - ent->total_mrs--; + spin_lock_irq(&ent->lock); + if (list_empty(&ent->head)) { spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); + return; } + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); + list_del(&mr->list); + ent->available_mrs--; + ent->total_mrs--; + spin_unlock_irq(&ent->lock); + mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); + kfree(mr); +} - list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { - list_del(&mr->list); - kfree(mr); +static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, + bool limit_fill) +{ + int err; + + lockdep_assert_held(&ent->lock); + + while (true) { + if (limit_fill) + target = ent->limit * 2; + if (target == ent->available_mrs + ent->pending) + return 0; + if (target > ent->available_mrs + ent->pending) { + u32 todo = target - (ent->available_mrs + ent->pending); + + spin_unlock_irq(&ent->lock); + err = add_keys(ent, todo); + if (err == -EAGAIN) + usleep_range(3000, 5000); + spin_lock_irq(&ent->lock); + if (err) { + if (err != -EAGAIN) + return err; + } else + return 0; + } else { + spin_unlock_irq(&ent->lock); + remove_cache_mr(ent); + spin_lock_irq(&ent->lock); + } } } @@ -231,33 +255,38 @@ static ssize_t size_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct mlx5_cache_ent *ent = filp->private_data; - char lbuf[20] = {0}; - u32 var; + u32 target; int err; - count = min(count, sizeof(lbuf) - 1); - if (copy_from_user(lbuf, buf, count)) - return -EFAULT; - - if (sscanf(lbuf, "%u", &var) != 1) - return -EINVAL; - - if (var < ent->limit) - return -EINVAL; - - if (var > ent->total_mrs) { - do { - err = add_keys(ent, var - ent->total_mrs); - if (err && err != -EAGAIN) - return err; + err = kstrtou32_from_user(buf, count, 0, &target); + if (err) + return err; - usleep_range(3000, 5000); - } while (err); - } else if (var < ent->total_mrs) { - remove_keys(ent, ent->total_mrs - var); + /* + * Target is the new value of total_mrs the user requests, however we + * cannot free MRs that are in use. Compute the target value for + * available_mrs. + */ + spin_lock_irq(&ent->lock); + if (target < ent->total_mrs - ent->available_mrs) { + err = -EINVAL; + goto err_unlock; + } + target = target - (ent->total_mrs - ent->available_mrs); + if (target < ent->limit || target > ent->limit*2) { + err = -EINVAL; + goto err_unlock; } + err = resize_available_mrs(ent, target, false); + if (err) + goto err_unlock; + spin_unlock_irq(&ent->lock); return count; + +err_unlock: + spin_unlock_irq(&ent->lock); + return err; } static ssize_t size_read(struct file *filp, char __user *buf, size_t count, @@ -285,28 +314,23 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct mlx5_cache_ent *ent = filp->private_data; - char lbuf[20] = {0}; u32 var; int err; - count = min(count, sizeof(lbuf) - 1); - if (copy_from_user(lbuf, buf, count)) - return -EFAULT; - - if (sscanf(lbuf, "%u", &var) != 1) - return -EINVAL; - - if (var > ent->total_mrs) - return -EINVAL; + err = kstrtou32_from_user(buf, count, 0, &var); + if (err) + return err; + /* + * Upon set we immediately fill the cache to high water mark implied by + * the limit. + */ + spin_lock_irq(&ent->lock); ent->limit = var; - - if (ent->available_mrs < ent->limit) { - err = add_keys(ent, 2 * ent->limit - ent->available_mrs); - if (err) - return err; - } - + err = resize_available_mrs(ent, 0, true); + spin_unlock_irq(&ent->lock); + if (err) + return err; return count; } @@ -371,20 +395,20 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) } } else if (ent->available_mrs > 2 * ent->limit) { /* - * The remove_keys() logic is performed as garbage collection - * task. Such task is intended to be run when no other active - * processes are running. + * The remove_cache_mr() logic is performed as garbage + * collection task. Such task is intended to be run when no + * other active processes are running. * * The need_resched() will return TRUE if there are user tasks * to be activated in near future. * - * In such case, we don't execute remove_keys() and postpone - * the garbage collection work to try to run in next cycle, - * in order to free CPU resources to other tasks. + * In such case, we don't execute remove_cache_mr() and postpone + * the garbage collection work to try to run in next cycle, in + * order to free CPU resources to other tasks. */ if (!need_resched() && !someone_adding(cache) && time_after(jiffies, cache->last_add + 300 * HZ)) { - remove_keys(ent, 1); + remove_cache_mr(ent); if (ent->available_mrs > ent->limit) queue_work(cache->wq, &ent->work); } else { -- cgit v1.2.3-58-ga151 From ad2d3ef46d2a88f2906d8d0cc6b912199ec3f1d6 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 10:22:35 +0200 Subject: RDMA/mlx5: Lock access to ent->available_mrs/limit when doing queue_work Accesses to these members needs to be locked. There is no reason not to hold a spinlock while calling queue_work(), so move the tests into a helper and always call it under lock. The helper should be called when available_mrs is adjusted. Link: https://lore.kernel.org/r/20200310082238.239865-10-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mr.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 091e24c58e2c..b46039d86b98 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -134,6 +134,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) list_add_tail(&mr->list, &ent->head); ent->available_mrs++; ent->total_mrs++; + /* + * Creating is always done in response to some demand, so do not call + * queue_adjust_cache_locked(). + */ spin_unlock_irqrestore(&ent->lock, flags); if (!completion_done(&ent->compl)) @@ -367,6 +371,20 @@ static int someone_adding(struct mlx5_mr_cache *cache) return 0; } +/* + * Check if the bucket is outside the high/low water mark and schedule an async + * update. The cache refill has hysteresis, once the low water mark is hit it is + * refilled up to the high mark. + */ +static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) +{ + lockdep_assert_held(&ent->lock); + + if (ent->available_mrs < ent->limit || + ent->available_mrs > 2 * ent->limit) + queue_work(ent->dev->cache.wq, &ent->work); +} + static void __cache_work_func(struct mlx5_cache_ent *ent) { struct mlx5_ib_dev *dev = ent->dev; @@ -462,9 +480,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, list); list_del(&mr->list); ent->available_mrs--; + queue_adjust_cache_locked(ent); spin_unlock_irq(&ent->lock); - if (ent->available_mrs < ent->limit) - queue_work(cache->wq, &ent->work); return mr; } } @@ -487,14 +504,12 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent) list); list_del(&mr->list); ent->available_mrs--; + queue_adjust_cache_locked(ent); spin_unlock_irq(&ent->lock); - if (ent->available_mrs < ent->limit) - queue_work(dev->cache.wq, &ent->work); break; } + queue_adjust_cache_locked(ent); spin_unlock_irq(&ent->lock); - - queue_work(dev->cache.wq, &ent->work); } if (!mr) @@ -516,7 +531,6 @@ static void detach_mr_from_cache(struct mlx5_ib_mr *mr) void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { struct mlx5_cache_ent *ent = mr->cache_ent; - int shrink = 0; if (!ent) return; @@ -524,20 +538,14 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) if (mlx5_mr_cache_invalidate(mr)) { detach_mr_from_cache(mr); destroy_mkey(dev, mr); - if (ent->available_mrs < ent->limit) - queue_work(dev->cache.wq, &ent->work); return; } spin_lock_irq(&ent->lock); list_add_tail(&mr->list, &ent->head); ent->available_mrs++; - if (ent->available_mrs > 2 * ent->limit) - shrink = 1; + queue_adjust_cache_locked(ent); spin_unlock_irq(&ent->lock); - - if (shrink) - queue_work(dev->cache.wq, &ent->work); } static void clean_keys(struct mlx5_ib_dev *dev, int c) @@ -653,7 +661,9 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->limit = dev->mdev->profile->mr_cache[i].limit; else ent->limit = 0; - queue_work(cache->wq, &ent->work); + spin_lock_irq(&ent->lock); + queue_adjust_cache_locked(ent); + spin_unlock_irq(&ent->lock); } mlx5_mr_cache_debugfs_init(dev); -- cgit v1.2.3-58-ga151 From b9358bdbc713cb64b8701bcbd450edc155d761a1 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 10:22:36 +0200 Subject: RDMA/mlx5: Fix locking in MR cache work queue All of the members of mlx5_cache_ent must be accessed while holding the spinlock, add the missing spinlock in the __cache_work_func(). Using cache->stopped and flush_workqueue() is an inherently racy way to shutdown self-scheduling work on a queue. Replace it with ent->disabled under lock, and always check disabled before queuing any new work. Use cancel_work_sync() to shutdown the queue. Use READ_ONCE/WRITE_ONCE for dev->last_add to manage concurrency as coherency is less important here. Split fill_delay from the bitfield. C bitfield updates are not atomic and this is just a mess. Use READ_ONCE/WRITE_ONCE, but this could also use test_bit()/set_bit(). Link: https://lore.kernel.org/r/20200310082238.239865-11-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 5 +- drivers/infiniband/hw/mlx5/mr.c | 121 ++++++++++++++++++++++------------- 2 files changed, 80 insertions(+), 46 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 7208946d2787..a22932ffb9c8 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -699,6 +699,8 @@ struct mlx5_cache_ent { u32 access_mode; u32 page; + u8 disabled:1; + /* * - available_mrs is the length of list head, ie the number of MRs * available for immediate allocation. @@ -725,7 +727,6 @@ struct mlx5_cache_ent { struct mlx5_mr_cache { struct workqueue_struct *wq; struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; - int stopped; struct dentry *root; unsigned long last_add; }; @@ -995,10 +996,10 @@ struct mlx5_ib_dev { */ struct mutex cap_mask_mutex; u8 ib_active:1; - u8 fill_delay:1; u8 is_rep:1; u8 lag_active:1; u8 wc_support:1; + u8 fill_delay; struct umr_common umrc; /* sync used page count stats */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index b46039d86b98..424ce3de3865 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -113,13 +113,13 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) struct mlx5_cache_ent *ent = mr->cache_ent; unsigned long flags; - spin_lock_irqsave(&ent->lock, flags); - ent->pending--; - spin_unlock_irqrestore(&ent->lock, flags); if (status) { mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); kfree(mr); - dev->fill_delay = 1; + spin_lock_irqsave(&ent->lock, flags); + ent->pending--; + WRITE_ONCE(dev->fill_delay, 1); + spin_unlock_irqrestore(&ent->lock, flags); mod_timer(&dev->delay_timer, jiffies + HZ); return; } @@ -128,12 +128,13 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) mr->mmkey.key |= mlx5_idx_to_mkey( MLX5_GET(create_mkey_out, mr->out, mkey_index)); - dev->cache.last_add = jiffies; + WRITE_ONCE(dev->cache.last_add, jiffies); spin_lock_irqsave(&ent->lock, flags); list_add_tail(&mr->list, &ent->head); ent->available_mrs++; ent->total_mrs++; + ent->pending--; /* * Creating is always done in response to some demand, so do not call * queue_adjust_cache_locked(). @@ -159,11 +160,6 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); for (i = 0; i < num; i++) { - if (ent->pending >= MAX_PENDING_REG_MR) { - err = -EAGAIN; - break; - } - mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { err = -ENOMEM; @@ -184,6 +180,12 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) MLX5_SET(mkc, mkc, log_page_size, ent->page); spin_lock_irq(&ent->lock); + if (ent->pending >= MAX_PENDING_REG_MR) { + err = -EAGAIN; + spin_unlock_irq(&ent->lock); + kfree(mr); + break; + } ent->pending++; spin_unlock_irq(&ent->lock); err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, @@ -204,15 +206,13 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) return err; } -static void remove_cache_mr(struct mlx5_cache_ent *ent) +static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) { struct mlx5_ib_mr *mr; - spin_lock_irq(&ent->lock); - if (list_empty(&ent->head)) { - spin_unlock_irq(&ent->lock); + lockdep_assert_held(&ent->lock); + if (list_empty(&ent->head)) return; - } mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); list_del(&mr->list); ent->available_mrs--; @@ -220,6 +220,7 @@ static void remove_cache_mr(struct mlx5_cache_ent *ent) spin_unlock_irq(&ent->lock); mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); kfree(mr); + spin_lock_irq(&ent->lock); } static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, @@ -248,9 +249,7 @@ static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, } else return 0; } else { - spin_unlock_irq(&ent->lock); - remove_cache_mr(ent); - spin_lock_irq(&ent->lock); + remove_cache_mr_locked(ent); } } } @@ -359,16 +358,21 @@ static const struct file_operations limit_fops = { .read = limit_read, }; -static int someone_adding(struct mlx5_mr_cache *cache) +static bool someone_adding(struct mlx5_mr_cache *cache) { - int i; + unsigned int i; for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { - if (cache->ent[i].available_mrs < cache->ent[i].limit) - return 1; - } + struct mlx5_cache_ent *ent = &cache->ent[i]; + bool ret; - return 0; + spin_lock_irq(&ent->lock); + ret = ent->available_mrs < ent->limit; + spin_unlock_irq(&ent->lock); + if (ret) + return true; + } + return false; } /* @@ -380,6 +384,8 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) { lockdep_assert_held(&ent->lock); + if (ent->disabled) + return; if (ent->available_mrs < ent->limit || ent->available_mrs > 2 * ent->limit) queue_work(ent->dev->cache.wq, &ent->work); @@ -391,27 +397,42 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) struct mlx5_mr_cache *cache = &dev->cache; int err; - if (cache->stopped) - return; + spin_lock_irq(&ent->lock); + if (ent->disabled) + goto out; - if (ent->available_mrs < 2 * ent->limit && !dev->fill_delay) { + if (ent->available_mrs + ent->pending < 2 * ent->limit && + !READ_ONCE(dev->fill_delay)) { + spin_unlock_irq(&ent->lock); err = add_keys(ent, 1); - if (ent->available_mrs < 2 * ent->limit) { + + spin_lock_irq(&ent->lock); + if (ent->disabled) + goto out; + if (err) { if (err == -EAGAIN) { mlx5_ib_dbg(dev, "returned eagain, order %d\n", ent->order); queue_delayed_work(cache->wq, &ent->dwork, msecs_to_jiffies(3)); - } else if (err) { - mlx5_ib_warn(dev, "command failed order %d, err %d\n", - ent->order, err); + } else { + mlx5_ib_warn( + dev, + "command failed order %d, err %d\n", + ent->order, err); queue_delayed_work(cache->wq, &ent->dwork, msecs_to_jiffies(1000)); - } else { - queue_work(cache->wq, &ent->work); } } + /* + * Once we start populating due to hitting a low water mark + * continue until we pass the high water mark. + */ + if (ent->available_mrs + ent->pending < 2 * ent->limit) + queue_work(cache->wq, &ent->work); } else if (ent->available_mrs > 2 * ent->limit) { + bool need_delay; + /* * The remove_cache_mr() logic is performed as garbage * collection task. Such task is intended to be run when no @@ -424,15 +445,20 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) * the garbage collection work to try to run in next cycle, in * order to free CPU resources to other tasks. */ - if (!need_resched() && !someone_adding(cache) && - time_after(jiffies, cache->last_add + 300 * HZ)) { - remove_cache_mr(ent); - if (ent->available_mrs > ent->limit) - queue_work(cache->wq, &ent->work); - } else { + spin_unlock_irq(&ent->lock); + need_delay = need_resched() || someone_adding(cache) || + time_after(jiffies, + READ_ONCE(cache->last_add) + 300 * HZ); + spin_lock_irq(&ent->lock); + if (ent->disabled) + goto out; + if (need_delay) queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); - } + remove_cache_mr_locked(ent); + queue_adjust_cache_locked(ent); } +out: + spin_unlock_irq(&ent->lock); } static void delayed_cache_work_func(struct work_struct *work) @@ -613,7 +639,7 @@ static void delay_time_func(struct timer_list *t) { struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); - dev->fill_delay = 0; + WRITE_ONCE(dev->fill_delay, 0); } int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) @@ -673,13 +699,20 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) { - int i; + unsigned int i; if (!dev->cache.wq) return 0; - dev->cache.stopped = 1; - flush_workqueue(dev->cache.wq); + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + struct mlx5_cache_ent *ent = &dev->cache.ent[i]; + + spin_lock_irq(&ent->lock); + ent->disabled = true; + spin_unlock_irq(&ent->lock); + cancel_work_sync(&ent->work); + cancel_delayed_work_sync(&ent->dwork); + } mlx5_mr_cache_debugfs_cleanup(dev); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); -- cgit v1.2.3-58-ga151 From 1c78a21a0c6f8df98a2281c1c0232f9c85e1c44d Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 10:22:37 +0200 Subject: RDMA/mlx5: Revise how the hysteresis scheme works for cache filling Currently if the work queue is running then it is in 'hysteresis' mode and will fill until the cache reaches the high water mark. This implicit state is very tricky and doesn't interact with pending very well. Instead of self re-scheduling the work queue after the add_keys() has started to create the new MR, have the queue scheduled from reg_mr_callback() only after the requested MR has been added. This avoids the bad design of an in-rush of queue'd work doing back to back add_keys() until EAGAIN then sleeping. The add_keys() will be paced one at a time as they complete, slowly filling up the cache. Also, fix pending to be only manipulated under lock. Link: https://lore.kernel.org/r/20200310082238.239865-12-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + drivers/infiniband/hw/mlx5/mr.c | 41 +++++++++++++++++++++++------------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index a22932ffb9c8..1216575292a7 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -700,6 +700,7 @@ struct mlx5_cache_ent { u32 page; u8 disabled:1; + u8 fill_to_high_water:1; /* * - available_mrs is the length of list head, ie the number of MRs diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 424ce3de3865..afacaf8981fa 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -86,6 +86,7 @@ mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev, static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int mr_cache_max_order(struct mlx5_ib_dev *dev); +static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent); static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) { @@ -134,11 +135,9 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) list_add_tail(&mr->list, &ent->head); ent->available_mrs++; ent->total_mrs++; + /* If we are doing fill_to_high_water then keep going. */ + queue_adjust_cache_locked(ent); ent->pending--; - /* - * Creating is always done in response to some demand, so do not call - * queue_adjust_cache_locked(). - */ spin_unlock_irqrestore(&ent->lock, flags); if (!completion_done(&ent->compl)) @@ -384,11 +383,29 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) { lockdep_assert_held(&ent->lock); - if (ent->disabled) + if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) return; - if (ent->available_mrs < ent->limit || - ent->available_mrs > 2 * ent->limit) + if (ent->available_mrs < ent->limit) { + ent->fill_to_high_water = true; + queue_work(ent->dev->cache.wq, &ent->work); + } else if (ent->fill_to_high_water && + ent->available_mrs + ent->pending < 2 * ent->limit) { + /* + * Once we start populating due to hitting a low water mark + * continue until we pass the high water mark. + */ queue_work(ent->dev->cache.wq, &ent->work); + } else if (ent->available_mrs == 2 * ent->limit) { + ent->fill_to_high_water = false; + } else if (ent->available_mrs > 2 * ent->limit) { + /* Queue deletion of excess entries */ + ent->fill_to_high_water = false; + if (ent->pending) + queue_delayed_work(ent->dev->cache.wq, &ent->dwork, + msecs_to_jiffies(1000)); + else + queue_work(ent->dev->cache.wq, &ent->work); + } } static void __cache_work_func(struct mlx5_cache_ent *ent) @@ -401,11 +418,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) if (ent->disabled) goto out; - if (ent->available_mrs + ent->pending < 2 * ent->limit && + if (ent->fill_to_high_water && + ent->available_mrs + ent->pending < 2 * ent->limit && !READ_ONCE(dev->fill_delay)) { spin_unlock_irq(&ent->lock); err = add_keys(ent, 1); - spin_lock_irq(&ent->lock); if (ent->disabled) goto out; @@ -424,12 +441,6 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) msecs_to_jiffies(1000)); } } - /* - * Once we start populating due to hitting a low water mark - * continue until we pass the high water mark. - */ - if (ent->available_mrs + ent->pending < 2 * ent->limit) - queue_work(cache->wq, &ent->work); } else if (ent->available_mrs > 2 * ent->limit) { bool need_delay; -- cgit v1.2.3-58-ga151 From aad719dcf379f1413dcb168413a53fea66e2ef90 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 10:22:38 +0200 Subject: RDMA/mlx5: Allow MRs to be created in the cache synchronously If the cache is completely out of MRs, and we are running in cache mode, then directly, and synchronously, create an MR that is compatible with the cache bucket using a sleeping mailbox command. This ensures that the thread that is waiting for the MR absolutely will get one. When a MR allocated in this way becomes freed then it is compatible with the cache bucket and will be recycled back into it. Deletes the very buggy ent->compl scheme to create a synchronous MR allocation. Link: https://lore.kernel.org/r/20200310082238.239865-13-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 - drivers/infiniband/hw/mlx5/mr.c | 147 +++++++++++++++++++++-------------- 2 files changed, 87 insertions(+), 61 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 1216575292a7..a5da2d5cf659 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -722,7 +722,6 @@ struct mlx5_cache_ent { struct mlx5_ib_dev *dev; struct work_struct work; struct delayed_work dwork; - struct completion compl; }; struct mlx5_mr_cache { diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index afacaf8981fa..a401931189b7 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -139,14 +139,34 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) queue_adjust_cache_locked(ent); ent->pending--; spin_unlock_irqrestore(&ent->lock, flags); +} + +static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc) +{ + struct mlx5_ib_mr *mr; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return NULL; + mr->order = ent->order; + mr->cache_ent = ent; + mr->dev = ent->dev; + + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); + MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); - if (!completion_done(&ent->compl)) - complete(&ent->compl); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); + MLX5_SET(mkc, mkc, log_page_size, ent->page); + return mr; } +/* Asynchronously schedule new MRs to be populated in the cache. */ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) { - int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_ib_mr *mr; void *mkc; u32 *in; @@ -159,25 +179,11 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); for (i = 0; i < num; i++) { - mr = kzalloc(sizeof(*mr), GFP_KERNEL); + mr = alloc_cache_mr(ent, mkc); if (!mr) { err = -ENOMEM; break; } - mr->order = ent->order; - mr->cache_ent = ent; - mr->dev = ent->dev; - - MLX5_SET(mkc, mkc, free, 1); - MLX5_SET(mkc, mkc, umr_en, 1); - MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); - MLX5_SET(mkc, mkc, access_mode_4_2, - (ent->access_mode >> 2) & 0x7); - - MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); - MLX5_SET(mkc, mkc, log_page_size, ent->page); - spin_lock_irq(&ent->lock); if (ent->pending >= MAX_PENDING_REG_MR) { err = -EAGAIN; @@ -205,6 +211,44 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) return err; } +/* Synchronously create a MR in the cache */ +static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) +{ + size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + struct mlx5_ib_mr *mr; + void *mkc; + u32 *in; + int err; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return ERR_PTR(-ENOMEM); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + mr = alloc_cache_mr(ent, mkc); + if (!mr) { + err = -ENOMEM; + goto free_in; + } + + err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); + if (err) + goto free_mr; + + mr->mmkey.type = MLX5_MKEY_MR; + WRITE_ONCE(ent->dev->cache.last_add, jiffies); + spin_lock_irq(&ent->lock); + ent->total_mrs++; + spin_unlock_irq(&ent->lock); + kfree(in); + return mr; +free_mr: + kfree(mr); +free_in: + kfree(in); + return ERR_PTR(err); +} + static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) { struct mlx5_ib_mr *mr; @@ -427,12 +471,12 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) if (ent->disabled) goto out; if (err) { - if (err == -EAGAIN) { - mlx5_ib_dbg(dev, "returned eagain, order %d\n", - ent->order); - queue_delayed_work(cache->wq, &ent->dwork, - msecs_to_jiffies(3)); - } else { + /* + * EAGAIN only happens if pending is positive, so we + * will be rescheduled from reg_mr_callback(). The only + * failure path here is ENOMEM. + */ + if (err != -EAGAIN) { mlx5_ib_warn( dev, "command failed order %d, err %d\n", @@ -495,36 +539,30 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_cache_ent *ent; struct mlx5_ib_mr *mr; - int err; if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY || entry >= ARRAY_SIZE(cache->ent))) return ERR_PTR(-EINVAL); ent = &cache->ent[entry]; - while (1) { - spin_lock_irq(&ent->lock); - if (list_empty(&ent->head)) { - spin_unlock_irq(&ent->lock); - - err = add_keys(ent, 1); - if (err && err != -EAGAIN) - return ERR_PTR(err); - - wait_for_completion(&ent->compl); - } else { - mr = list_first_entry(&ent->head, struct mlx5_ib_mr, - list); - list_del(&mr->list); - ent->available_mrs--; - queue_adjust_cache_locked(ent); - spin_unlock_irq(&ent->lock); + spin_lock_irq(&ent->lock); + if (list_empty(&ent->head)) { + spin_unlock_irq(&ent->lock); + mr = create_cache_mr(ent); + if (IS_ERR(mr)) return mr; - } + } else { + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); + list_del(&mr->list); + ent->available_mrs--; + queue_adjust_cache_locked(ent); + spin_unlock_irq(&ent->lock); } + return mr; } -static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent) +/* Return a MR already available in the cache */ +static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent) { struct mlx5_ib_dev *dev = req_ent->dev; struct mlx5_ib_mr *mr = NULL; @@ -676,7 +714,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->dev = dev; ent->limit = 0; - init_completion(&ent->compl); INIT_WORK(&ent->work, cache_work_func); INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); @@ -939,26 +976,16 @@ alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order); struct mlx5_ib_mr *mr; - int err = 0; - int i; if (!ent) return ERR_PTR(-E2BIG); - for (i = 0; i < 1; i++) { - mr = alloc_cached_mr(ent); - if (mr) - break; - - err = add_keys(ent, 1); - if (err && err != -EAGAIN) { - mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); - break; - } + mr = get_cache_mr(ent); + if (!mr) { + mr = create_cache_mr(ent); + if (IS_ERR(mr)) + return mr; } - if (!mr) - return ERR_PTR(-EAGAIN); - mr->ibmr.pd = pd; mr->umem = umem; mr->access_flags = access_flags; -- cgit v1.2.3-58-ga151 From 9e57a9aa694cfc28f9549d2ce587309fdac15bfb Mon Sep 17 00:00:00 2001 From: Weihang Li Date: Tue, 10 Mar 2020 21:06:09 +0800 Subject: RDMA/hns: Fix wrong judgments of udata->outlen These judgments were used to keep the compatibility with older versions of userspace that don't have the field named "cap_flags" in structure hns_roce_ib_create_cq_resp. But it will be wrong to compare outlen with the size of resp if another new field were added in resp. oulen should be compared with the end offset of cap_flags in resp. Fixes: 4f8f0d5e33dd ("RDMA/hns: Package the flow of creating cq") Link: https://lore.kernel.org/r/1583845569-47257-1-git-send-email-liweihang@huawei.com Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_cq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 5ffe4c996ed3..5bfb52ffd590 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -257,8 +257,8 @@ static int create_user_cq(struct hns_roce_dev *hr_dev, return ret; } - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(*resp))) { + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB && + udata->outlen >= offsetofend(typeof(*resp), cap_flags)) { ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, &hr_cq->db); if (ret) { @@ -321,8 +321,8 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev, struct hns_roce_ucontext *context = rdma_udata_to_drv_context( udata, struct hns_roce_ucontext, ibucontext); - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(*resp))) + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB && + udata->outlen >= offsetofend(typeof(*resp), cap_flags)) hns_roce_db_unmap_user(context, &hr_cq->db); hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt); -- cgit v1.2.3-58-ga151 From e8dc4e885c459343970b25acd9320fe9ee5492e7 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:31 +0200 Subject: RDMA/cm: Fix ordering of xa_alloc_cyclic() in ib_create_cm_id() xa_alloc_cyclic() is a SMP release to be paired with some later acquire during xa_load() as part of cm_acquire_id(). As such, xa_alloc_cyclic() must be done after the cm_id is fully initialized, in particular, it absolutely must be after the refcount_set(), otherwise the refcount_inc() in cm_acquire_id() may not see the set. As there are several cases where a reader will be able to use the id.local_id after cm_acquire_id in the IB_CM_IDLE state there needs to be an unfortunate split into a NULL allocate and a finalizing xa_store. Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation") Link: https://lore.kernel.org/r/20200310092545.251365-2-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 21ede1c08eea..db627fa3cd39 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -571,18 +571,6 @@ static int cm_init_av_by_path(struct sa_path_rec *path, return 0; } -static int cm_alloc_id(struct cm_id_private *cm_id_priv) -{ - int err; - u32 id; - - err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv, - xa_limit_32b, &cm.local_id_next, GFP_KERNEL); - - cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; - return err; -} - static u32 cm_local_id(__be32 local_id) { return (__force u32) (local_id ^ cm.random_id_operand); @@ -824,6 +812,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, void *context) { struct cm_id_private *cm_id_priv; + u32 id; int ret; cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); @@ -835,9 +824,6 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.context = context; cm_id_priv->id.remote_cm_qpn = 1; - ret = cm_alloc_id(cm_id_priv); - if (ret) - goto error; spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); @@ -846,11 +832,20 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, INIT_LIST_HEAD(&cm_id_priv->altr_list); atomic_set(&cm_id_priv->work_count, -1); refcount_set(&cm_id_priv->refcount, 1); + + ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b, + &cm.local_id_next, GFP_KERNEL); + if (ret) + goto error; + cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; + xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), + cm_id_priv, GFP_KERNEL); + return &cm_id_priv->id; error: kfree(cm_id_priv); - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } EXPORT_SYMBOL(ib_create_cm_id); -- cgit v1.2.3-58-ga151 From ca21cb7fb127fcba013acc66f9ebe2e3a9ae1e49 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:32 +0200 Subject: RDMA/cm: Fix checking for allowed duplicate listens The test here typod the cm_id_priv to use, it used the one that was freshly allocated. By definition the allocated one has the matching cm_handler and zero context, so the condition was always true. Instead check that the existing listening ID is compatible with the proposed handler so that it can be shared, as was originally intended. Fixes: 067b171b8679 ("IB/cm: Share listening CM IDs") Link: https://lore.kernel.org/r/20200310092545.251365-3-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index db627fa3cd39..ba35cad951e6 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1181,7 +1181,8 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, /* Find an existing ID */ cm_id_priv = cm_find_listen(device, service_id); if (cm_id_priv) { - if (cm_id->cm_handler != cm_handler || cm_id->context) { + if (cm_id_priv->id.cm_handler != cm_handler || + cm_id_priv->id.context) { /* Sharing an ib_cm_id with different handlers is not * supported */ spin_unlock_irqrestore(&cm.lock, flags); -- cgit v1.2.3-58-ga151 From bede86a39d9dc3387ac00dcb8e1ac221676b2f25 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:33 +0200 Subject: RDMA/cm: Remove a race freeing timewait_info When creating a cm_id during REQ the id immediately becomes visible to the other MAD handlers, and shortly after the state is moved to IB_CM_REQ_RCVD This allows cm_rej_handler() to run concurrently and free the work: CPU 0 CPU1 cm_req_handler() ib_create_cm_id() cm_match_req() id_priv->state = IB_CM_REQ_RCVD cm_rej_handler() cm_acquire_id() spin_lock(&id_priv->lock) switch (id_priv->state) case IB_CM_REQ_RCVD: cm_reset_to_idle() kfree(id_priv->timewait_info); goto destroy destroy: kfree(id_priv->timewait_info); id_priv->timewait_info = NULL Causing a double free or worse. Do not free the timewait_info without also holding the id_priv->lock. Simplify this entire flow by making the free unconditional during cm_destroy_id() and removing the confusing special case error unwind during creation of the timewait_info. This also fixes a leak of the timewait if cm_destroy_id() is called in IB_CM_ESTABLISHED with an XRC TGT QP. The state machine will be left in ESTABLISHED while it needed to transition through IB_CM_TIMEWAIT to release the timewait pointer. Also fix a leak of the timewait_info if the caller mis-uses the API and does ib_send_cm_reqs(). Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation") Link: https://lore.kernel.org/r/20200310092545.251365-4-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ba35cad951e6..0685cb2a88c0 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1054,14 +1054,22 @@ retest: break; } - spin_lock_irq(&cm.lock); + spin_lock_irq(&cm_id_priv->lock); + spin_lock(&cm.lock); + /* Required for cleanup paths related cm_req_handler() */ + if (cm_id_priv->timewait_info) { + cm_cleanup_timewait(cm_id_priv->timewait_info); + kfree(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; + } if (!list_empty(&cm_id_priv->altr_list) && (!cm_id_priv->altr_send_port_not_ready)) list_del(&cm_id_priv->altr_list); if (!list_empty(&cm_id_priv->prim_list) && (!cm_id_priv->prim_send_port_not_ready)) list_del(&cm_id_priv->prim_list); - spin_unlock_irq(&cm.lock); + spin_unlock(&cm.lock); + spin_unlock_irq(&cm_id_priv->lock); cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); @@ -1410,7 +1418,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id->state != IB_CM_IDLE) { + if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL; goto out; @@ -1428,12 +1436,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, param->ppath_sgid_attr, &cm_id_priv->av, cm_id_priv); if (ret) - goto error1; + goto out; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av, cm_id_priv); if (ret) - goto error1; + goto out; } cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); @@ -1451,7 +1459,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); if (ret) - goto error1; + goto out; req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; cm_format_req(req_msg, cm_id_priv, param); @@ -1474,7 +1482,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, return 0; error2: cm_free_msg(cm_id_priv->msg); -error1: kfree(cm_id_priv->timewait_info); out: return ret; } EXPORT_SYMBOL(ib_send_cm_req); @@ -2003,7 +2010,7 @@ static int cm_req_handler(struct cm_work *work) pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__, be32_to_cpu(cm_id->local_id)); ret = -EINVAL; - goto free_timeinfo; + goto destroy; } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; @@ -2093,8 +2100,6 @@ static int cm_req_handler(struct cm_work *work) rejected: refcount_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); -free_timeinfo: - kfree(cm_id_priv->timewait_info); destroy: ib_destroy_cm_id(cm_id); return ret; -- cgit v1.2.3-58-ga151 From 2305d6864aa54c1887563d9096e4b746bc776d04 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:34 +0200 Subject: RDMA/cm: Make the destroy_id flow more robust Too much of the destruction is very carefully sensitive to the state and various other things. Move more code to the unconditional path and add several WARN_ONs to check consistency. Link: https://lore.kernel.org/r/20200310092545.251365-5-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 0685cb2a88c0..cad3c58e660c 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -825,6 +825,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, cm_id_priv->id.context = context; cm_id_priv->id.remote_cm_qpn = 1; + RB_CLEAR_NODE(&cm_id_priv->service_node); + RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); @@ -982,11 +984,13 @@ retest: spin_lock_irq(&cm.lock); if (--cm_id_priv->listen_sharecount > 0) { /* The id is still shared. */ + WARN_ON(refcount_read(&cm_id_priv->refcount) == 1); cm_deref_id(cm_id_priv); spin_unlock_irq(&cm.lock); return; } rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); + RB_CLEAR_NODE(&cm_id_priv->service_node); spin_unlock_irq(&cm.lock); break; case IB_CM_SIDR_REQ_SENT: @@ -997,11 +1001,6 @@ retest: case IB_CM_SIDR_REQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); - spin_lock_irq(&cm.lock); - if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) - rb_erase(&cm_id_priv->sidr_id_node, - &cm.remote_sidr_table); - spin_unlock_irq(&cm.lock); break; case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: @@ -1068,6 +1067,10 @@ retest: if (!list_empty(&cm_id_priv->prim_list) && (!cm_id_priv->prim_send_port_not_ready)) list_del(&cm_id_priv->prim_list); + WARN_ON(cm_id_priv->listen_sharecount); + WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node)); + if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) + rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); -- cgit v1.2.3-58-ga151 From 98f67156a80f37db70ec64787020b1f9bc8aea8c Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:35 +0200 Subject: RDMA/cm: Simplify establishing a listen cm_id Any manipulation of cm_id->state must be done under the cm_id_priv->lock, the two routines that added listens did not follow this rule, because they never participate in any concurrent access around the state. However, since this exception makes the code hard to understand, simplify the flow so that it can be fully locked: - Move manipulation of listen_sharecount into cm_insert_listen() so it is trivially under the cm.lock without having to expose the cm.lock to the caller. - Push the cm.lock down into cm_insert_listen() and have the function increment the reference count before returning an existing pointer. - Split ib_cm_listen() into an cm_init_listen() and do not call ib_cm_listen() from ib_cm_insert_listen() - Make both ib_cm_listen() and ib_cm_insert_listen() directly call cm_insert_listen() under their cm_id_priv->lock which does both a collision detect and, if needed, the insert (atomically) - Enclose all state manipulation within the cm_id_priv->lock, notice this set can be done safely after cm_insert_listen() as no reader is allowed to read the state without holding the lock. - Do not set the listen cm_id in the xarray, as it is never correct to look it up. This makes the concurrency simpler to understand. Many needless error unwinds are removed in the process. Link: https://lore.kernel.org/r/20200310092545.251365-6-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 199 +++++++++++++++++++++++++------------------ 1 file changed, 116 insertions(+), 83 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index cad3c58e660c..d2783d5dd4dc 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -620,22 +620,44 @@ static int be64_gt(__be64 a, __be64 b) return (__force u64) a > (__force u64) b; } -static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) +/* + * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv + * if the new ID was inserted, NULL if it could not be inserted due to a + * collision, or the existing cm_id_priv ready for shared usage. + */ +static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv, + ib_cm_handler shared_handler) { struct rb_node **link = &cm.listen_service_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; __be64 service_id = cm_id_priv->id.service_id; __be64 service_mask = cm_id_priv->id.service_mask; + unsigned long flags; + spin_lock_irqsave(&cm.lock, flags); while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, service_node); if ((cur_cm_id_priv->id.service_mask & service_id) == (service_mask & cur_cm_id_priv->id.service_id) && - (cm_id_priv->id.device == cur_cm_id_priv->id.device)) + (cm_id_priv->id.device == cur_cm_id_priv->id.device)) { + /* + * Sharing an ib_cm_id with different handlers is not + * supported + */ + if (cur_cm_id_priv->id.cm_handler != shared_handler || + cur_cm_id_priv->id.context || + WARN_ON(!cur_cm_id_priv->id.cm_handler)) { + spin_unlock_irqrestore(&cm.lock, flags); + return NULL; + } + refcount_inc(&cur_cm_id_priv->refcount); + cur_cm_id_priv->listen_sharecount++; + spin_unlock_irqrestore(&cm.lock, flags); return cur_cm_id_priv; + } if (cm_id_priv->id.device < cur_cm_id_priv->id.device) link = &(*link)->rb_left; @@ -648,9 +670,11 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) else link = &(*link)->rb_right; } + cm_id_priv->listen_sharecount++; rb_link_node(&cm_id_priv->service_node, parent, link); rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); - return NULL; + spin_unlock_irqrestore(&cm.lock, flags); + return cm_id_priv; } static struct cm_id_private * cm_find_listen(struct ib_device *device, @@ -807,9 +831,9 @@ static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); } -struct ib_cm_id *ib_create_cm_id(struct ib_device *device, - ib_cm_handler cm_handler, - void *context) +static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device, + ib_cm_handler cm_handler, + void *context) { struct cm_id_private *cm_id_priv; u32 id; @@ -840,15 +864,37 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, if (ret) goto error; cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; - xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), - cm_id_priv, GFP_KERNEL); - return &cm_id_priv->id; + return cm_id_priv; error: kfree(cm_id_priv); return ERR_PTR(ret); } + +/* + * Make the ID visible to the MAD handlers and other threads that use the + * xarray. + */ +static void cm_finalize_id(struct cm_id_private *cm_id_priv) +{ + xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), + cm_id_priv, GFP_KERNEL); +} + +struct ib_cm_id *ib_create_cm_id(struct ib_device *device, + ib_cm_handler cm_handler, + void *context) +{ + struct cm_id_private *cm_id_priv; + + cm_id_priv = cm_alloc_id_priv(device, cm_handler, context); + if (IS_ERR(cm_id_priv)) + return ERR_CAST(cm_id_priv); + + cm_finalize_id(cm_id_priv); + return &cm_id_priv->id; +} EXPORT_SYMBOL(ib_create_cm_id); static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) @@ -1092,8 +1138,27 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id) } EXPORT_SYMBOL(ib_destroy_cm_id); +static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id, + __be64 service_mask) +{ + service_mask = service_mask ? service_mask : ~cpu_to_be64(0); + service_id &= service_mask; + if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && + (service_id != IB_CM_ASSIGN_SERVICE_ID)) + return -EINVAL; + + if (service_id == IB_CM_ASSIGN_SERVICE_ID) { + cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++); + cm_id_priv->id.service_mask = ~cpu_to_be64(0); + } else { + cm_id_priv->id.service_id = service_id; + cm_id_priv->id.service_mask = service_mask; + } + return 0; +} + /** - * __ib_cm_listen - Initiates listening on the specified service ID for + * ib_cm_listen - Initiates listening on the specified service ID for * connection and service ID resolution requests. * @cm_id: Connection identifier associated with the listen request. * @service_id: Service identifier matched against incoming connection @@ -1105,51 +1170,33 @@ EXPORT_SYMBOL(ib_destroy_cm_id); * exactly. This parameter is ignored if %service_id is set to * IB_CM_ASSIGN_SERVICE_ID. */ -static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, - __be64 service_mask) +int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask) { - struct cm_id_private *cm_id_priv, *cur_cm_id_priv; - int ret = 0; - - service_mask = service_mask ? service_mask : ~cpu_to_be64(0); - service_id &= service_mask; - if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && - (service_id != IB_CM_ASSIGN_SERVICE_ID)) - return -EINVAL; - - cm_id_priv = container_of(cm_id, struct cm_id_private, id); - if (cm_id->state != IB_CM_IDLE) - return -EINVAL; - - cm_id->state = IB_CM_LISTEN; - ++cm_id_priv->listen_sharecount; + struct cm_id_private *cm_id_priv = + container_of(cm_id, struct cm_id_private, id); + unsigned long flags; + int ret; - if (service_id == IB_CM_ASSIGN_SERVICE_ID) { - cm_id->service_id = cpu_to_be64(cm.listen_service_id++); - cm_id->service_mask = ~cpu_to_be64(0); - } else { - cm_id->service_id = service_id; - cm_id->service_mask = service_mask; + spin_lock_irqsave(&cm_id_priv->lock, flags); + if (cm_id_priv->id.state != IB_CM_IDLE) { + ret = -EINVAL; + goto out; } - cur_cm_id_priv = cm_insert_listen(cm_id_priv); - if (cur_cm_id_priv) { - cm_id->state = IB_CM_IDLE; - --cm_id_priv->listen_sharecount; + ret = cm_init_listen(cm_id_priv, service_id, service_mask); + if (ret) + goto out; + + if (!cm_insert_listen(cm_id_priv, NULL)) { ret = -EBUSY; + goto out; } - return ret; -} -int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&cm.lock, flags); - ret = __ib_cm_listen(cm_id, service_id, service_mask); - spin_unlock_irqrestore(&cm.lock, flags); + cm_id_priv->id.state = IB_CM_LISTEN; + ret = 0; +out: + spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_cm_listen); @@ -1174,52 +1221,38 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, ib_cm_handler cm_handler, __be64 service_id) { + struct cm_id_private *listen_id_priv; struct cm_id_private *cm_id_priv; - struct ib_cm_id *cm_id; - unsigned long flags; int err = 0; /* Create an ID in advance, since the creation may sleep */ - cm_id = ib_create_cm_id(device, cm_handler, NULL); - if (IS_ERR(cm_id)) - return cm_id; - - spin_lock_irqsave(&cm.lock, flags); + cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL); + if (IS_ERR(cm_id_priv)) + return ERR_CAST(cm_id_priv); - if (service_id == IB_CM_ASSIGN_SERVICE_ID) - goto new_id; + err = cm_init_listen(cm_id_priv, service_id, 0); + if (err) + return ERR_PTR(err); - /* Find an existing ID */ - cm_id_priv = cm_find_listen(device, service_id); - if (cm_id_priv) { - if (cm_id_priv->id.cm_handler != cm_handler || - cm_id_priv->id.context) { - /* Sharing an ib_cm_id with different handlers is not - * supported */ - spin_unlock_irqrestore(&cm.lock, flags); - ib_destroy_cm_id(cm_id); + spin_lock_irq(&cm_id_priv->lock); + listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler); + if (listen_id_priv != cm_id_priv) { + spin_unlock_irq(&cm_id_priv->lock); + ib_destroy_cm_id(&cm_id_priv->id); + if (!listen_id_priv) return ERR_PTR(-EINVAL); - } - refcount_inc(&cm_id_priv->refcount); - ++cm_id_priv->listen_sharecount; - spin_unlock_irqrestore(&cm.lock, flags); - - ib_destroy_cm_id(cm_id); - cm_id = &cm_id_priv->id; - return cm_id; + return &listen_id_priv->id; } + cm_id_priv->id.state = IB_CM_LISTEN; + spin_unlock_irq(&cm_id_priv->lock); -new_id: - /* Use newly created ID */ - err = __ib_cm_listen(cm_id, service_id, 0); - - spin_unlock_irqrestore(&cm.lock, flags); + /* + * A listen ID does not need to be in the xarray since it does not + * receive mads, is not placed in the remote_id or remote_qpn rbtree, + * and does not enter timewait. + */ - if (err) { - ib_destroy_cm_id(cm_id); - return ERR_PTR(err); - } - return cm_id; + return &cm_id_priv->id; } EXPORT_SYMBOL(ib_cm_insert_listen); -- cgit v1.2.3-58-ga151 From 153a2e432e3d323b633155633429dcaf71ea7c53 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:36 +0200 Subject: RDMA/cm: Read id.state under lock when doing pr_debug() The lock should not be dropped before doing the pr_debug() print as it is accessing data protected by the lock, such as id.state. Fixes: 119bf81793ea ("IB/cm: Add debug prints to ib_cm") Link: https://lore.kernel.org/r/20200310092545.251365-7-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index d2783d5dd4dc..79ad3d64837e 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2394,13 +2394,13 @@ static int cm_rep_handler(struct cm_work *work) case IB_CM_MRA_REQ_RCVD: break; default: - spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; pr_debug( "%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n", __func__, cm_id_priv->id.state, IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); + spin_unlock_irq(&cm_id_priv->lock); goto error; } @@ -2666,10 +2666,10 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id, cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_DREQ_RCVD) { - spin_unlock_irqrestore(&cm_id_priv->lock, flags); - kfree(data); pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n", __func__, be32_to_cpu(cm_id->local_id), cm_id->state); + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + kfree(data); return -EINVAL; } @@ -3005,10 +3005,10 @@ static int cm_rej_handler(struct cm_work *work) } /* fall through */ default: - spin_unlock_irq(&cm_id_priv->lock); pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", __func__, be32_to_cpu(cm_id_priv->id.local_id), cm_id_priv->id.state); + spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; goto out; } -- cgit v1.2.3-58-ga151 From 083bfdbfd5ee2d008d256975d962ef75ddbbc353 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:37 +0200 Subject: RDMA/cm: Make it clear that there is no concurrency in cm_sidr_req_handler() ib_create_cm_id() immediately places the id in the xarray, so it is visible to network traffic. The state is initially set to IB_CM_IDLE and all the MAD handlers will test this state under lock and refuse to advance from IDLE, so adding to the xarray is harmless. Further, the set to IB_CM_SIDR_REQ_RCVD also excludes all MAD handlers. However, the local_id isn't even used for SIDR mode, and there will be no input MADs related to the newly created ID. So, make the whole flow simpler so it can be understood: - Do not put the SIDR cm_id in the xarray. This directly shows that there is no concurrency - Delete the confusing work_count and pending_list manipulations. This mechanism is only used by MAD handlers and timewait, neither of which apply to SIDR. - Add a few comments and rename 'cur_cm_id_priv' to 'listen_cm_id_priv' - Move other loose sets up to immediately after cm_id creation so that the cm_id is fully configured right away. This fixes an oversight where the service_id will not be returned back on a IB_SIDR_UNSUPPORTED reject. Link: https://lore.kernel.org/r/20200310092545.251365-8-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 64 +++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 79ad3d64837e..36e7ce522d04 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3535,20 +3535,27 @@ static void cm_format_sidr_req_event(struct cm_work *work, static int cm_sidr_req_handler(struct cm_work *work) { - struct ib_cm_id *cm_id; - struct cm_id_private *cm_id_priv, *cur_cm_id_priv; + struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_sidr_req_msg *sidr_req_msg; struct ib_wc *wc; int ret; - cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); - if (IS_ERR(cm_id)) - return PTR_ERR(cm_id); - cm_id_priv = container_of(cm_id, struct cm_id_private, id); + cm_id_priv = + cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL); + if (IS_ERR(cm_id_priv)) + return PTR_ERR(cm_id_priv); /* Record SGID/SLID and request ID for lookup. */ sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; + + cm_id_priv->id.remote_id = + cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg)); + cm_id_priv->id.service_id = + cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); + cm_id_priv->id.service_mask = ~cpu_to_be64(0); + cm_id_priv->tid = sidr_req_msg->hdr.tid; + wc = work->mad_recv_wc->wc; cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); cm_id_priv->av.dgid.global.interface_id = 0; @@ -3558,41 +3565,44 @@ static int cm_sidr_req_handler(struct cm_work *work) if (ret) goto out; - cm_id_priv->id.remote_id = - cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg)); - cm_id_priv->tid = sidr_req_msg->hdr.tid; - atomic_inc(&cm_id_priv->work_count); - spin_lock_irq(&cm.lock); - cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); - if (cur_cm_id_priv) { + listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); + if (listen_cm_id_priv) { spin_unlock_irq(&cm.lock); atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; - cur_cm_id_priv = cm_find_listen( - cm_id->device, - cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg))); - if (!cur_cm_id_priv) { + listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, + cm_id_priv->id.service_id); + if (!listen_cm_id_priv) { spin_unlock_irq(&cm.lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); goto out; /* No match. */ } - refcount_inc(&cur_cm_id_priv->refcount); - refcount_inc(&cm_id_priv->refcount); + refcount_inc(&listen_cm_id_priv->refcount); spin_unlock_irq(&cm.lock); - cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; - cm_id_priv->id.context = cur_cm_id_priv->id.context; - cm_id_priv->id.service_id = - cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); - cm_id_priv->id.service_mask = ~cpu_to_be64(0); + cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; + cm_id_priv->id.context = listen_cm_id_priv->id.context; - cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id); - cm_process_work(cm_id_priv, work); - cm_deref_id(cur_cm_id_priv); + /* + * A SIDR ID does not need to be in the xarray since it does not receive + * mads, is not placed in the remote_id or remote_qpn rbtree, and does + * not enter timewait. + */ + + cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id); + ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); + cm_free_work(work); + /* + * A pointer to the listen_cm_id is held in the event, so this deref + * must be after the event is delivered above. + */ + cm_deref_id(listen_cm_id_priv); + if (ret) + cm_destroy_id(&cm_id_priv->id, ret); return 0; out: ib_destroy_cm_id(&cm_id_priv->id); -- cgit v1.2.3-58-ga151 From c206f8bad15d30f1e35821c21a2fb146e4668ebf Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:38 +0200 Subject: RDMA/cm: Make it clearer how concurrency works in cm_req_handler() ib_crate_cm_id() immediately places the id in the xarray, and publishes it into the remote_id and remote_qpn rbtrees. This makes it visible to other threads before it is fully set up. It appears the thinking here was that the states IB_CM_IDLE and IB_CM_REQ_RCVD do not allow any MAD handler or lookup in the remote_id and remote_qpn rbtrees to advance. However, cm_rej_handler() does take an action on IB_CM_REQ_RCVD, which is not really expected by the design. Make the whole thing clearer: - Keep the new cm_id out of the xarray until it is completely set up. This directly prevents MAD handlers and all rbtree lookups from seeing the pointer. - Move all the trivial setup right to the top so it is obviously done before any concurrency begins - Move the mutation of the cm_id_priv out of cm_match_id() and into the caller so the state transition is obvious - Place the manipulation of the work_list at the end, under lock, after the cm_id is placed in the xarray. The work_count cannot change on an ID outside the xarray. - Add some comments Link: https://lore.kernel.org/r/20200310092545.251365-9-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 99 +++++++++++++++++++++++++------------------- 1 file changed, 57 insertions(+), 42 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 36e7ce522d04..3f125044d35e 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1958,14 +1958,10 @@ static struct cm_id_private * cm_match_req(struct cm_work *work, cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, NULL, 0); - goto out; + return NULL; } refcount_inc(&listen_cm_id_priv->refcount); - refcount_inc(&cm_id_priv->refcount); - cm_id_priv->id.state = IB_CM_REQ_RCVD; - atomic_inc(&cm_id_priv->work_count); spin_unlock_irq(&cm.lock); -out: return listen_cm_id_priv; } @@ -2007,7 +2003,6 @@ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) static int cm_req_handler(struct cm_work *work) { - struct ib_cm_id *cm_id; struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_req_msg *req_msg; const struct ib_global_route *grh; @@ -2016,13 +2011,33 @@ static int cm_req_handler(struct cm_work *work) req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; - cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); - if (IS_ERR(cm_id)) - return PTR_ERR(cm_id); + cm_id_priv = + cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL); + if (IS_ERR(cm_id_priv)) + return PTR_ERR(cm_id_priv); - cm_id_priv = container_of(cm_id, struct cm_id_private, id); cm_id_priv->id.remote_id = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg)); + cm_id_priv->id.service_id = + cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); + cm_id_priv->id.service_mask = ~cpu_to_be64(0); + cm_id_priv->tid = req_msg->hdr.tid; + cm_id_priv->timeout_ms = cm_convert_to_ms( + IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg)); + cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg); + cm_id_priv->remote_qpn = + cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); + cm_id_priv->initiator_depth = + IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); + cm_id_priv->responder_resources = + IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); + cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); + cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); + cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); + cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); + cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); + cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); + ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); @@ -2034,27 +2049,26 @@ static int cm_req_handler(struct cm_work *work) ret = PTR_ERR(cm_id_priv->timewait_info); goto destroy; } - cm_id_priv->timewait_info->work.remote_id = - cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg)); + cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id; cm_id_priv->timewait_info->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); - cm_id_priv->timewait_info->remote_qpn = - cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); + cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn; + + /* + * Note that the ID pointer is not in the xarray at this point, + * so this set is only visible to the local thread. + */ + cm_id_priv->id.state = IB_CM_REQ_RCVD; listen_cm_id_priv = cm_match_req(work, cm_id_priv); if (!listen_cm_id_priv) { pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__, - be32_to_cpu(cm_id->local_id)); + be32_to_cpu(cm_id_priv->id.local_id)); + cm_id_priv->id.state = IB_CM_IDLE; ret = -EINVAL; goto destroy; } - cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; - cm_id_priv->id.context = listen_cm_id_priv->id.context; - cm_id_priv->id.service_id = - cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); - cm_id_priv->id.service_mask = ~cpu_to_be64(0); - cm_process_routed_req(req_msg, work->mad_recv_wc->wc); memset(&work->path[0], 0, sizeof(work->path[0])); @@ -2092,10 +2106,10 @@ static int cm_req_handler(struct cm_work *work) work->port->port_num, 0, &work->path[0].sgid); if (err) - ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, + ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID, NULL, 0, NULL, 0); else - ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, + ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID, &work->path[0].sgid, sizeof(work->path[0].sgid), NULL, 0); @@ -2105,39 +2119,40 @@ static int cm_req_handler(struct cm_work *work) ret = cm_init_av_by_path(&work->path[1], NULL, &cm_id_priv->alt_av, cm_id_priv); if (ret) { - ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, + ib_send_cm_rej(&cm_id_priv->id, + IB_CM_REJ_INVALID_ALT_GID, &work->path[0].sgid, sizeof(work->path[0].sgid), NULL, 0); goto rejected; } } - cm_id_priv->tid = req_msg->hdr.tid; - cm_id_priv->timeout_ms = cm_convert_to_ms( - IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg)); - cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg); - cm_id_priv->remote_qpn = - cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); - cm_id_priv->initiator_depth = - IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); - cm_id_priv->responder_resources = - IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); - cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); - cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); - cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); - cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); - cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); - cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); + cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; + cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); + + /* Now MAD handlers can see the new ID */ + spin_lock_irq(&cm_id_priv->lock); + cm_finalize_id(cm_id_priv); + + /* Refcount belongs to the event, pairs with cm_process_work() */ + refcount_inc(&cm_id_priv->refcount); + atomic_inc(&cm_id_priv->work_count); + spin_unlock_irq(&cm_id_priv->lock); cm_process_work(cm_id_priv, work); + /* + * Since this ID was just created and was not made visible to other MAD + * handlers until the cm_finalize_id() above we know that the + * cm_process_work() will deliver the event and the listen_cm_id + * embedded in the event can be derefed here. + */ cm_deref_id(listen_cm_id_priv); return 0; rejected: - refcount_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); destroy: - ib_destroy_cm_id(cm_id); + ib_destroy_cm_id(&cm_id_priv->id); return ret; } -- cgit v1.2.3-58-ga151 From d1de9a88074b66482443f0cd91618d7b51a7c9b6 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:39 +0200 Subject: RDMA/cm: Add missing locking around id.state in cm_dup_req_handler All accesses to id.state must be done under the spinlock. Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation") Link: https://lore.kernel.org/r/20200310092545.251365-10-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 3f125044d35e..3ce735527938 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1872,8 +1872,12 @@ static void cm_dup_req_handler(struct cm_work *work, counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ - if (cm_id_priv->id.state == IB_CM_REQ_RCVD) + spin_lock_irq(&cm_id_priv->lock); + if (cm_id_priv->id.state == IB_CM_REQ_RCVD) { + spin_unlock_irq(&cm_id_priv->lock); return; + } + spin_unlock_irq(&cm_id_priv->lock); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) -- cgit v1.2.3-58-ga151 From 00777a68ae98931d2d10f0aa8bfa60734cc09bfd Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:40 +0200 Subject: RDMA/cm: Add some lockdep assertions for cm_id_priv->lock These functions all touch state, so must be called under the lock. Inspection shows this is currently true. Link: https://lore.kernel.org/r/20200310092545.251365-11-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 3ce735527938..cc3e90e77eec 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -974,6 +974,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) unsigned long flags; struct cm_device *cm_dev; + lockdep_assert_held(&cm_id_priv->lock); + cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); if (!cm_dev) return; @@ -1005,6 +1007,8 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) { unsigned long flags; + lockdep_assert_held(&cm_id_priv->lock); + cm_id_priv->id.state = IB_CM_IDLE; if (cm_id_priv->timewait_info) { spin_lock_irqsave(&cm.lock, flags); @@ -1823,6 +1827,8 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg, const void *private_data, u8 private_data_len) { + lockdep_assert_held(&cm_id_priv->lock); + cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.remote_id)); -- cgit v1.2.3-58-ga151 From e029fdc0684785ffdeca758a08bd860bae8c344d Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:41 +0200 Subject: RDMA/cm: Allow ib_send_cm_dreq() to be done under lock The first thing ib_send_cm_dreq() does is obtain the lock, so use the usual unlocked wrapper, locked actor pattern here. This avoids a sketchy lock/unlock sequence (which could allow state to change) during cm_destroy_id(). Link: https://lore.kernel.org/r/20200310092545.251365-12-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 54 ++++++++++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 20 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index cc3e90e77eec..00bbfa244afb 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -80,8 +80,11 @@ const char *__attribute_const__ ibcm_reject_msg(int reason) } EXPORT_SYMBOL(ibcm_reject_msg); +struct cm_id_private; static void cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device, void *client_data); +static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, + const void *private_data, u8 private_data_len); static struct ib_client cm_client = { .name = "cm", @@ -1084,10 +1087,12 @@ retest: NULL, 0, NULL, 0); break; case IB_CM_ESTABLISHED: - spin_unlock_irq(&cm_id_priv->lock); - if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) + if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) { + spin_unlock_irq(&cm_id_priv->lock); break; - ib_send_cm_dreq(cm_id, NULL, 0); + } + cm_send_dreq_locked(cm_id_priv, NULL, 0); + spin_unlock_irq(&cm_id_priv->lock); goto retest; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); @@ -2604,35 +2609,32 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, private_data_len); } -int ib_send_cm_dreq(struct ib_cm_id *cm_id, - const void *private_data, - u8 private_data_len) +static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, + const void *private_data, u8 private_data_len) { - struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; - unsigned long flags; int ret; + lockdep_assert_held(&cm_id_priv->lock); + if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) return -EINVAL; - cm_id_priv = container_of(cm_id, struct cm_id_private, id); - spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id->state != IB_CM_ESTABLISHED) { + if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__, - be32_to_cpu(cm_id->local_id), cm_id->state); - ret = -EINVAL; - goto out; + be32_to_cpu(cm_id_priv->id.local_id), + cm_id_priv->id.state); + return -EINVAL; } - if (cm_id->lap_state == IB_CM_LAP_SENT || - cm_id->lap_state == IB_CM_MRA_LAP_RCVD) + if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || + cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) { cm_enter_timewait(cm_id_priv); - goto out; + return ret; } cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, @@ -2643,14 +2645,26 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id, ret = ib_post_send_mad(msg, NULL); if (ret) { cm_enter_timewait(cm_id_priv); - spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } - cm_id->state = IB_CM_DREQ_SENT; + cm_id_priv->id.state = IB_CM_DREQ_SENT; cm_id_priv->msg = msg; -out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); + return 0; +} + +int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, + u8 private_data_len) +{ + struct cm_id_private *cm_id_priv = + container_of(cm_id, struct cm_id_private, id); + unsigned long flags; + int ret; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len); + spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_dreq); -- cgit v1.2.3-58-ga151 From 87cabf3e09fff9960743801a046bb2676427f00a Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:42 +0200 Subject: RDMA/cm: Allow ib_send_cm_drep() to be done under lock The first thing ib_send_cm_drep() does is obtain the lock, so use the usual unlocked wrapper, locked actor pattern here. This avoids a sketchy lock/unlock sequence (which could allow state to change) during cm_destroy_id(). Link: https://lore.kernel.org/r/20200310092545.251365-13-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 55 ++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 00bbfa244afb..8ed25341edd7 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -85,6 +85,8 @@ static void cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device, void *client_data); static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len); +static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, + void *private_data, u8 private_data_len); static struct ib_client cm_client = { .name = "cm", @@ -1100,8 +1102,8 @@ retest: spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_DREQ_RCVD: + cm_send_drep_locked(cm_id_priv, NULL, 0); spin_unlock_irq(&cm_id_priv->lock); - ib_send_cm_drep(cm_id, NULL, 0); break; default: spin_unlock_irq(&cm_id_priv->lock); @@ -2685,51 +2687,60 @@ static void cm_format_drep(struct cm_drep_msg *drep_msg, private_data_len); } -int ib_send_cm_drep(struct ib_cm_id *cm_id, - const void *private_data, - u8 private_data_len) +static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, + void *private_data, u8 private_data_len) { - struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; - unsigned long flags; - void *data; int ret; + lockdep_assert_held(&cm_id_priv->lock); + if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) return -EINVAL; - data = cm_copy_private_data(private_data, private_data_len); - if (IS_ERR(data)) - return PTR_ERR(data); - - cm_id_priv = container_of(cm_id, struct cm_id_private, id); - spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id->state != IB_CM_DREQ_RCVD) { - pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n", - __func__, be32_to_cpu(cm_id->local_id), cm_id->state); - spin_unlock_irqrestore(&cm_id_priv->lock, flags); - kfree(data); + if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) { + pr_debug( + "%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n", + __func__, be32_to_cpu(cm_id_priv->id.local_id), + cm_id_priv->id.state); + kfree(private_data); return -EINVAL; } - cm_set_private_data(cm_id_priv, data, private_data_len); + cm_set_private_data(cm_id_priv, private_data, private_data_len); cm_enter_timewait(cm_id_priv); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) - goto out; + return ret; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { - spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } + return 0; +} -out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); +int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data, + u8 private_data_len) +{ + struct cm_id_private *cm_id_priv = + container_of(cm_id, struct cm_id_private, id); + unsigned long flags; + void *data; + int ret; + + data = cm_copy_private_data(private_data, private_data_len); + if (IS_ERR(data)) + return PTR_ERR(data); + + spin_lock_irqsave(&cm_id_priv->lock, flags); + ret = cm_send_drep_locked(cm_id_priv, data, private_data_len); + spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_drep); -- cgit v1.2.3-58-ga151 From 81ddb41f876d488416e8aa5a740a4dbd01a83705 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:43 +0200 Subject: RDMA/cm: Allow ib_send_cm_rej() to be done under lock The first thing ib_send_cm_rej() does is obtain the lock, so use the usual unlocked wrapper, locked actor pattern here. This avoids a sketchy lock/unlock sequence (which could allow state to change) during cm_destroy_id(). While here simplify some of the logic in the implementation. Link: https://lore.kernel.org/r/20200310092545.251365-14-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 92 +++++++++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 40 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 8ed25341edd7..651e7c391762 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -87,6 +87,10 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len); static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len); +static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, + enum ib_cm_rej_reason reason, void *ari, + u8 ari_length, const void *private_data, + u8 private_data_len); static struct ib_client cm_client = { .name = "cm", @@ -1060,11 +1064,11 @@ retest: case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); + cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT, + &cm_id_priv->id.device->node_guid, + sizeof(cm_id_priv->id.device->node_guid), + NULL, 0); spin_unlock_irq(&cm_id_priv->lock); - ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, - &cm_id_priv->id.device->node_guid, - sizeof cm_id_priv->id.device->node_guid, - NULL, 0); break; case IB_CM_REQ_RCVD: if (err == -ENOMEM) { @@ -1072,9 +1076,10 @@ retest: cm_reset_to_idle(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); } else { + cm_send_rej_locked(cm_id_priv, + IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, + NULL, 0); spin_unlock_irq(&cm_id_priv->lock); - ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, - NULL, 0, NULL, 0); } break; case IB_CM_REP_SENT: @@ -1084,9 +1089,9 @@ retest: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: + cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, + 0, NULL, 0); spin_unlock_irq(&cm_id_priv->lock); - ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, - NULL, 0, NULL, 0); break; case IB_CM_ESTABLISHED: if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) { @@ -2899,65 +2904,72 @@ out: return -EINVAL; } -int ib_send_cm_rej(struct ib_cm_id *cm_id, - enum ib_cm_rej_reason reason, - void *ari, - u8 ari_length, - const void *private_data, - u8 private_data_len) +static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, + enum ib_cm_rej_reason reason, void *ari, + u8 ari_length, const void *private_data, + u8 private_data_len) { - struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; - unsigned long flags; int ret; + lockdep_assert_held(&cm_id_priv->lock); + if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) return -EINVAL; - cm_id_priv = container_of(cm_id, struct cm_id_private, id); - - spin_lock_irqsave(&cm_id_priv->lock, flags); - switch (cm_id->state) { + switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: - ret = cm_alloc_msg(cm_id_priv, &msg); - if (!ret) - cm_format_rej((struct cm_rej_msg *) msg->mad, - cm_id_priv, reason, ari, ari_length, - private_data, private_data_len); - cm_reset_to_idle(cm_id_priv); + ret = cm_alloc_msg(cm_id_priv, &msg); + if (ret) + return ret; + cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, + ari, ari_length, private_data, private_data_len); break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: - ret = cm_alloc_msg(cm_id_priv, &msg); - if (!ret) - cm_format_rej((struct cm_rej_msg *) msg->mad, - cm_id_priv, reason, ari, ari_length, - private_data, private_data_len); - cm_enter_timewait(cm_id_priv); + ret = cm_alloc_msg(cm_id_priv, &msg); + if (ret) + return ret; + cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, + ari, ari_length, private_data, private_data_len); break; default: pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__, - be32_to_cpu(cm_id_priv->id.local_id), cm_id->state); - ret = -EINVAL; - goto out; + be32_to_cpu(cm_id_priv->id.local_id), + cm_id_priv->id.state); + return -EINVAL; } - if (ret) - goto out; - ret = ib_post_send_mad(msg, NULL); - if (ret) + if (ret) { cm_free_msg(msg); + return ret; + } -out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); + return 0; +} + +int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason, + void *ari, u8 ari_length, const void *private_data, + u8 private_data_len) +{ + struct cm_id_private *cm_id_priv = + container_of(cm_id, struct cm_id_private, id); + unsigned long flags; + int ret; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length, + private_data, private_data_len); + spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rej); -- cgit v1.2.3-58-ga151 From 6a8824a74bc9dccb2cae5caa993d2ec09f4694f2 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:44 +0200 Subject: RDMA/cm: Allow ib_send_cm_sidr_rep() to be done under lock The first thing ib_send_cm_sidr_rep() does is obtain the lock, so use the usual unlocked wrapper, locked actor pattern here. Get rid of the cm_reject_sidr_req() wrapper so each call site can call the locked or unlocked version as required. This avoids a sketchy lock/unlock sequence (which could allow state to change) during cm_destroy_id(). Link: https://lore.kernel.org/r/20200310092545.251365-15-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 58 +++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 651e7c391762..9505b9ef4330 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -83,6 +83,8 @@ EXPORT_SYMBOL(ibcm_reject_msg); struct cm_id_private; static void cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device, void *client_data); +static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, + struct ib_cm_sidr_rep_param *param); static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len); static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, @@ -830,16 +832,6 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private return NULL; } -static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, - enum ib_cm_sidr_status status) -{ - struct ib_cm_sidr_rep_param param; - - memset(¶m, 0, sizeof param); - param.status = status; - ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); -} - static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device, ib_cm_handler cm_handler, void *context) @@ -1058,8 +1050,10 @@ retest: spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_SIDR_REQ_RCVD: + cm_send_sidr_rep_locked(cm_id_priv, + &(struct ib_cm_sidr_rep_param){ + .status = IB_SIDR_REJECT }); spin_unlock_irq(&cm_id_priv->lock); - cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); break; case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: @@ -3640,7 +3634,9 @@ static int cm_sidr_req_handler(struct cm_work *work) cm_id_priv->id.service_id); if (!listen_cm_id_priv) { spin_unlock_irq(&cm.lock); - cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); + ib_send_cm_sidr_rep(&cm_id_priv->id, + &(struct ib_cm_sidr_rep_param){ + .status = IB_SIDR_UNSUPPORTED }); goto out; /* No match. */ } refcount_inc(&listen_cm_id_priv->refcount); @@ -3694,50 +3690,52 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, param->private_data, param->private_data_len); } -int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, - struct ib_cm_sidr_rep_param *param) +static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, + struct ib_cm_sidr_rep_param *param) { - struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; - unsigned long flags; int ret; + lockdep_assert_held(&cm_id_priv->lock); + if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || (param->private_data && param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) return -EINVAL; - cm_id_priv = container_of(cm_id, struct cm_id_private, id); - spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { - ret = -EINVAL; - goto error; - } + if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD) + return -EINVAL; ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) - goto error; + return ret; cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, param); ret = ib_post_send_mad(msg, NULL); if (ret) { - spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } - cm_id->state = IB_CM_IDLE; - spin_unlock_irqrestore(&cm_id_priv->lock, flags); - - spin_lock_irqsave(&cm.lock, flags); + cm_id_priv->id.state = IB_CM_IDLE; if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); } - spin_unlock_irqrestore(&cm.lock, flags); return 0; +} -error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); +int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, + struct ib_cm_sidr_rep_param *param) +{ + struct cm_id_private *cm_id_priv = + container_of(cm_id, struct cm_id_private, id); + unsigned long flags; + int ret; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + ret = cm_send_sidr_rep_locked(cm_id_priv, param); + spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_rep); -- cgit v1.2.3-58-ga151 From 67b3c8dceac6644322d5707687449f6dcbdff417 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:45 +0200 Subject: RDMA/cm: Make sure the cm_id is in the IB_CM_IDLE state in destroy The first switch statement in cm_destroy_id() tries to move the ID to either IB_CM_IDLE or IB_CM_TIMEWAIT. Both states will block concurrent MAD handlers from progressing. Previous patches removed the unreliably lock/unlock sequences in this flow, this patch removes the extra locking steps and adds the missing parts to guarantee that destroy reaches IB_CM_IDLE. There is no point in leaving the ID in the IB_CM_TIMEWAIT state the memory about to be kfreed. Rework things to hold the lock across all the state transitions and directly assert when done that it ended up in IB_CM_IDLE as expected. This was accompanied by a careful audit of all the state transitions here, which generally did end up in IDLE on their success and non-racy paths. Link: https://lore.kernel.org/r/20200310092545.251365-16-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 9505b9ef4330..4794113ecd59 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1026,34 +1026,34 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err) struct cm_work *work; cm_id_priv = container_of(cm_id, struct cm_id_private, id); -retest: spin_lock_irq(&cm_id_priv->lock); +retest: switch (cm_id->state) { case IB_CM_LISTEN: - spin_unlock_irq(&cm_id_priv->lock); - - spin_lock_irq(&cm.lock); + spin_lock(&cm.lock); if (--cm_id_priv->listen_sharecount > 0) { /* The id is still shared. */ WARN_ON(refcount_read(&cm_id_priv->refcount) == 1); + spin_unlock(&cm.lock); + spin_unlock_irq(&cm_id_priv->lock); cm_deref_id(cm_id_priv); - spin_unlock_irq(&cm.lock); return; } + cm_id->state = IB_CM_IDLE; rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); RB_CLEAR_NODE(&cm_id_priv->service_node); - spin_unlock_irq(&cm.lock); + spin_unlock(&cm.lock); break; case IB_CM_SIDR_REQ_SENT: cm_id->state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); - spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_SIDR_REQ_RCVD: cm_send_sidr_rep_locked(cm_id_priv, &(struct ib_cm_sidr_rep_param){ .status = IB_SIDR_REJECT }); - spin_unlock_irq(&cm_id_priv->lock); + /* cm_send_sidr_rep_locked will not move to IDLE if it fails */ + cm_id->state = IB_CM_IDLE; break; case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: @@ -1062,18 +1062,15 @@ retest: &cm_id_priv->id.device->node_guid, sizeof(cm_id_priv->id.device->node_guid), NULL, 0); - spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_REQ_RCVD: if (err == -ENOMEM) { /* Do not reject to allow future retries. */ cm_reset_to_idle(cm_id_priv); - spin_unlock_irq(&cm_id_priv->lock); } else { cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); - spin_unlock_irq(&cm_id_priv->lock); } break; case IB_CM_REP_SENT: @@ -1085,31 +1082,35 @@ retest: case IB_CM_MRA_REP_SENT: cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); - spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_ESTABLISHED: if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) { - spin_unlock_irq(&cm_id_priv->lock); + cm_id->state = IB_CM_IDLE; break; } cm_send_dreq_locked(cm_id_priv, NULL, 0); - spin_unlock_irq(&cm_id_priv->lock); goto retest; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_enter_timewait(cm_id_priv); - spin_unlock_irq(&cm_id_priv->lock); - break; + goto retest; case IB_CM_DREQ_RCVD: cm_send_drep_locked(cm_id_priv, NULL, 0); - spin_unlock_irq(&cm_id_priv->lock); + WARN_ON(cm_id->state != IB_CM_TIMEWAIT); + goto retest; + case IB_CM_TIMEWAIT: + /* + * The cm_acquire_id in cm_timewait_handler will stop working + * once we do cm_free_id() below, so just move to idle here for + * consistency. + */ + cm_id->state = IB_CM_IDLE; break; - default: - spin_unlock_irq(&cm_id_priv->lock); + case IB_CM_IDLE: break; } + WARN_ON(cm_id->state != IB_CM_IDLE); - spin_lock_irq(&cm_id_priv->lock); spin_lock(&cm.lock); /* Required for cleanup paths related cm_req_handler() */ if (cm_id_priv->timewait_info) { -- cgit v1.2.3-58-ga151 From 3cae58047c1343949fb20c0e142845133ce0a074 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 13 Mar 2020 09:33:25 -0700 Subject: RDMA/bnxt_re: Use ib_device_try_get() There are a couple places in this driver running from a work queue that need the ib_device to be registered. Instead of using a broken internal bit rely on the new core code to guarantee device registration. Link: https://lore.kernel.org/r/1584117207-2664-2-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 - drivers/infiniband/hw/bnxt_re/main.c | 27 ++++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index c736e8254e75..407141ea0242 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -132,7 +132,6 @@ struct bnxt_re_dev { struct list_head list; unsigned long flags; #define BNXT_RE_FLAG_NETDEV_REGISTERED 0 -#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 #define BNXT_RE_FLAG_GOT_MSIX 2 #define BNXT_RE_FLAG_HAVE_L2_REF 3 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 415693f8015a..885127c1e81b 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1171,12 +1171,13 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) u16 gid_idx, index; int rc = 0; - if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) + if (!ib_device_try_get(&rdev->ibdev)) return 0; if (!sgid_tbl) { ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated"); - return -EINVAL; + rc = -EINVAL; + goto out; } for (index = 0; index < sgid_tbl->active; index++) { @@ -1196,7 +1197,8 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx, rdev->qplib_res.netdev->dev_addr); } - +out: + ib_device_put(&rdev->ibdev); return rc; } @@ -1319,7 +1321,6 @@ int bnxt_re_ib_init(struct bnxt_re_dev *rdev) pr_err("Failed to register with IB: %#x\n", rc); return rc; } - set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); dev_info(rdev_to_dev(rdev), "Device registered successfully"); ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, &rdev->active_width); @@ -1612,7 +1613,6 @@ static void bnxt_re_dealloc_driver(struct ib_device *ib_dev) struct bnxt_re_dev *rdev = container_of(ib_dev, struct bnxt_re_dev, ibdev); - clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); dev_info(rdev_to_dev(rdev), "Unregistering Device"); rtnl_lock(); @@ -1630,12 +1630,7 @@ static void bnxt_re_task(struct work_struct *work) re_work = container_of(work, struct bnxt_re_work, work); rdev = re_work->rdev; - if (re_work->event != NETDEV_REGISTER && - !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) - goto done; - - switch (re_work->event) { - case NETDEV_REGISTER: + if (re_work->event == NETDEV_REGISTER) { rc = bnxt_re_ib_init(rdev); if (rc) { ibdev_err(&rdev->ibdev, @@ -1645,7 +1640,13 @@ static void bnxt_re_task(struct work_struct *work) rtnl_unlock(); goto exit; } - break; + goto exit; + } + + if (!ib_device_try_get(&rdev->ibdev)) + goto exit; + + switch (re_work->event) { case NETDEV_UP: bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); @@ -1665,7 +1666,7 @@ static void bnxt_re_task(struct work_struct *work) default: break; } -done: + ib_device_put(&rdev->ibdev); smp_mb__before_atomic(); atomic_dec(&rdev->sched_count); exit: -- cgit v1.2.3-58-ga151 From 8a6c61704746d3a1e004e054504ae8d98ed95697 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 13 Mar 2020 09:33:26 -0700 Subject: RDMA/bnxt_re: Fix lifetimes in bnxt_re_task A work queue cannot just rely on the ib_device not being freed, it must hold a kref on the memory so that the BNXT_RE_FLAG_IBDEV_REGISTERED check works. Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver") Link: https://lore.kernel.org/r/1584117207-2664-3-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 885127c1e81b..c494e11c3fe3 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1670,6 +1670,7 @@ static void bnxt_re_task(struct work_struct *work) smp_mb__before_atomic(); atomic_dec(&rdev->sched_count); exit: + put_device(&rdev->ibdev.dev); kfree(re_work); } @@ -1735,6 +1736,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, /* Allocate for the deferred task */ re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC); if (re_work) { + get_device(&rdev->ibdev.dev); re_work->rdev = rdev; re_work->event = event; re_work->vlan_dev = (real_dev == netdev ? -- cgit v1.2.3-58-ga151 From 4e88cef11d1913d1875c7870df4facbd096a062b Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Fri, 13 Mar 2020 09:33:27 -0700 Subject: RDMA/bnxt_re: Remove unnecessary sched count Since the lifetime of bnxt_re_task is controlled by the kref of device, sched_count is no longer required. Remove it. Link: https://lore.kernel.org/r/1584117207-2664-4-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 - drivers/infiniband/hw/bnxt_re/main.c | 8 -------- 2 files changed, 9 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 407141ea0242..a300588634c5 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -176,7 +176,6 @@ struct bnxt_re_dev { atomic_t srq_count; atomic_t mr_count; atomic_t mw_count; - atomic_t sched_count; /* Max of 2 lossless traffic class supported per port */ u16 cosq[2]; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index c494e11c3fe3..4a8fb1ad74a8 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1667,8 +1667,6 @@ static void bnxt_re_task(struct work_struct *work) break; } ib_device_put(&rdev->ibdev); - smp_mb__before_atomic(); - atomic_dec(&rdev->sched_count); exit: put_device(&rdev->ibdev.dev); kfree(re_work); @@ -1720,11 +1718,6 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, break; case NETDEV_UNREGISTER: - /* netdev notifier will call NETDEV_UNREGISTER again later since - * we are still holding the reference to the netdev - */ - if (atomic_read(&rdev->sched_count) > 0) - goto exit; ib_unregister_device_queued(&rdev->ibdev); break; @@ -1742,7 +1735,6 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, re_work->vlan_dev = (real_dev == netdev ? NULL : netdev); INIT_WORK(&re_work->work, bnxt_re_task); - atomic_inc(&rdev->sched_count); queue_work(bnxt_re_wq, &re_work->work); } } -- cgit v1.2.3-58-ga151 From 6c6e39212b18bffd806423a45359c70221ce0d77 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 10 Mar 2020 19:18:00 +0800 Subject: RDMA/hns: Rename wqe buffer related functions There are serval global functions related to wqe buffer in the hns driver and are called in different files. These symbols cannot directly represent the namespace they belong to. So add prefix 'hns_roce_' to 3 wqe buffer related global functions: get_recv_wqe(), get_send_wqe(), and get_send_extend_sge(). Link: https://lore.kernel.org/r/1583839084-31579-2-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 6 +++--- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 9 +++++---- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 10 +++++----- drivers/infiniband/hw/hns/hns_roce_qp.c | 6 +++--- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index d7dcf6ebc526..b6ae12dd4024 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1238,9 +1238,9 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); -void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); -void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); -void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n); +void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n); +void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n); +void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n); bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, struct ib_cq *ib_cq); enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index c05a905c518e..2e5304502a08 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -106,7 +106,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, goto out; } - wqe = get_send_wqe(qp, wqe_idx); + wqe = hns_roce_get_send_wqe(qp, wqe_idx); qp->sq.wrid[wqe_idx] = wr->wr_id; /* Corresponding to the RC and RD type wqe process separately */ @@ -378,7 +378,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, goto out; } - ctrl = get_recv_wqe(hr_qp, wqe_idx); + ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx); roce_set_field(ctrl->rwqe_byte_12, RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M, @@ -2284,9 +2284,10 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, if (is_send) { /* SQ conrespond to CQE */ - sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4, + sq_wqe = hns_roce_get_send_wqe(*cur_qp, + roce_get_field(cqe->cqe_byte_4, CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S)& + CQE_BYTE_4_WQE_INDEX_S) & ((*cur_qp)->sq.wqe_cnt-1)); switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) { case HNS_ROCE_WQE_OPCODE_SEND: diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 82021fa46d9d..88d671a976d3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -127,7 +127,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, * should calculate how many sges in the first page and the second * page. */ - dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1)); + dseg = hns_roce_get_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1)); fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) - (uintptr_t)dseg) / sizeof(struct hns_roce_v2_wqe_data_seg); @@ -137,7 +137,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, set_data_seg_v2(dseg++, sg + i); (*sge_ind)++; } - dseg = get_send_extend_sge(qp, + dseg = hns_roce_get_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1)); for (i = 0; i < se_sge_num; i++) { set_data_seg_v2(dseg++, sg + fi_sge_num + i); @@ -329,7 +329,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, goto out; } - wqe = get_send_wqe(qp, wqe_idx); + wqe = hns_roce_get_send_wqe(qp, wqe_idx); qp->sq.wrid[wqe_idx] = wr->wr_id; owner_bit = ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); @@ -676,7 +676,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, goto out; } - wqe = get_recv_wqe(hr_qp, wqe_idx); + wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx); dseg = (struct hns_roce_v2_wqe_data_seg *)wqe; for (i = 0; i < wr->num_sge; i++) { if (!wr->sg_list[i].length) @@ -2935,7 +2935,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list; sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt; - wqe_buf = get_recv_wqe(*cur_qp, wr_cnt); + wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt); data_len = wc->byte_len; for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) { diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 5a28d62008e4..c2ea489b0df5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1470,17 +1470,17 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) return hns_roce_buf_offset(&hr_qp->hr_buf, offset); } -void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) +void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n) { return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); } -void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) +void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n) { return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); } -void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n) +void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n) { return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); -- cgit v1.2.3-58-ga151 From 00a59d30f3f953b4dbfffcae96e927e62533e6cf Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 10 Mar 2020 19:18:01 +0800 Subject: RDMA/hns: Optimize wqe buffer filling process for post send Encapsulates the wqe buffer process details for datagram seg, fast mr seg and atomic seg. Link: https://lore.kernel.org/r/1583839084-31579-3-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 63 +++++++++++++++--------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 88d671a976d3..c8c345f84c9e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -57,10 +57,10 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, } static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, - struct hns_roce_wqe_frmr_seg *fseg, - const struct ib_reg_wr *wr) + void *wqe, const struct ib_reg_wr *wr) { struct hns_roce_mr *mr = to_hr_mr(wr->mr); + struct hns_roce_wqe_frmr_seg *fseg = wqe; /* use ib_access_flags */ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S, @@ -92,16 +92,26 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0); } -static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg, - const struct ib_atomic_wr *wr) +static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe, + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + int valid_num_sge) { - if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { - aseg->fetchadd_swap_data = cpu_to_le64(wr->swap); - aseg->cmp_data = cpu_to_le64(wr->compare_add); + struct hns_roce_wqe_atomic_seg *aseg; + + set_data_seg_v2(wqe, wr->sg_list); + aseg = wqe + sizeof(struct hns_roce_v2_wqe_data_seg); + + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap); + aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add); } else { - aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add); + aseg->fetchadd_swap_data = + cpu_to_le64(atomic_wr(wr)->compare_add); aseg->cmp_data = 0; } + + roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); } static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, @@ -154,11 +164,11 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, void *wqe, unsigned int *sge_ind, - int valid_num_sge, - const struct ib_send_wr **bad_wr) + int valid_num_sge) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_v2_wqe_data_seg *dseg = wqe; + struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_qp *qp = to_hr_qp(ibqp); int j = 0; int i; @@ -166,15 +176,14 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) { if (le32_to_cpu(rc_sq_wqe->msg_len) > hr_dev->caps.max_sq_inline) { - *bad_wr = wr; - dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal", - rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline); + ibdev_err(ibdev, "inline len(1-%d)=%d, illegal", + rc_sq_wqe->msg_len, + hr_dev->caps.max_sq_inline); return -EINVAL; } if (wr->opcode == IB_WR_RDMA_READ) { - *bad_wr = wr; - dev_err(hr_dev->dev, "Not support inline data!\n"); + ibdev_err(ibdev, "Not support inline data!\n"); return -EINVAL; } @@ -285,7 +294,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct hns_roce_v2_ud_send_wqe *ud_sq_wqe; struct hns_roce_v2_rc_send_wqe *rc_sq_wqe; struct hns_roce_qp *qp = to_hr_qp(ibqp); - struct hns_roce_wqe_frmr_seg *fseg; struct device *dev = hr_dev->dev; unsigned int owner_bit; unsigned int sge_idx; @@ -547,8 +555,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, break; case IB_WR_REG_MR: hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR; - fseg = wqe; - set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr)); + set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr)); break; case IB_WR_ATOMIC_CMP_AND_SWP: hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP; @@ -582,23 +589,17 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op); if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || - wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { - struct hns_roce_v2_wqe_data_seg *dseg; - - dseg = wqe; - set_data_seg_v2(dseg, wr->sg_list); - wqe += sizeof(struct hns_roce_v2_wqe_data_seg); - set_atomic_seg(wqe, atomic_wr(wr)); - roce_set_field(rc_sq_wqe->byte_16, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) + set_atomic_seg(wr, wqe, rc_sq_wqe, valid_num_sge); - } else if (wr->opcode != IB_WR_REG_MR) { + else if (wr->opcode != IB_WR_REG_MR) { ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe, &sge_idx, - valid_num_sge, bad_wr); - if (ret) + valid_num_sge); + if (ret) { + *bad_wr = wr; goto out; + } } } else { dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); -- cgit v1.2.3-58-ga151 From e363f7de4e60eedbf7af126ed14d10ffb110c697 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 10 Mar 2020 19:18:02 +0800 Subject: RDMA/hns: Optimize the wr opcode conversion from ib to hns Simplify the wr opcode conversion from ib to hns by using a map table instead of the switch-case statement. Link: https://lore.kernel.org/r/1583839084-31579-4-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 63 +++++++++++++++++------------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c8c345f84c9e..c813c7419932 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -56,6 +56,40 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, dseg->len = cpu_to_le32(sg->length); } +/* + * mapped-value = 1 + real-value + * The hns wr opcode real value is start from 0, In order to distinguish between + * initialized and uninitialized map values, we plus 1 to the actual value when + * defining the mapping, so that the validity can be identified by checking the + * mapped value is greater than 0. + */ +#define HR_OPC_MAP(ib_key, hr_key) \ + [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key + +static const u32 hns_roce_op_code[] = { + HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE), + HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM), + HR_OPC_MAP(SEND, SEND), + HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM), + HR_OPC_MAP(RDMA_READ, RDMA_READ), + HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP), + HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD), + HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV), + HR_OPC_MAP(LOCAL_INV, LOCAL_INV), + HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP), + HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD), + HR_OPC_MAP(REG_MR, FAST_REG_PMR), +}; + +static u32 to_hr_opcode(u32 ib_opcode) +{ + if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code)) + return HNS_ROCE_V2_WQE_OP_MASK; + + return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 : + HNS_ROCE_V2_WQE_OP_MASK; +} + static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, void *wqe, const struct ib_reg_wr *wr) { @@ -303,7 +337,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, void *wqe = NULL; bool loopback; u32 tmp_len; - u32 hr_op; u8 *smac; int nreq; int ret; @@ -517,76 +550,52 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, wqe += sizeof(struct hns_roce_v2_rc_send_wqe); switch (wr->opcode) { case IB_WR_RDMA_READ: - hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_RDMA_WRITE: - hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_RDMA_WRITE_WITH_IMM: - hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; - case IB_WR_SEND: - hr_op = HNS_ROCE_V2_WQE_OP_SEND; - break; - case IB_WR_SEND_WITH_INV: - hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV; - break; - case IB_WR_SEND_WITH_IMM: - hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM; - break; case IB_WR_LOCAL_INV: - hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV; roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1); rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); break; case IB_WR_REG_MR: - hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR; set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr)); break; case IB_WR_ATOMIC_CMP_AND_SWP: - hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP; rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr); break; case IB_WR_ATOMIC_FETCH_AND_ADD: - hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD; rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr); break; - case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: - hr_op = - HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP; - break; - case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: - hr_op = - HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD; - break; default: - hr_op = HNS_ROCE_V2_WQE_OP_MASK; break; } roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op); + V2_RC_SEND_WQE_BYTE_4_OPCODE_S, + to_hr_opcode(wr->opcode)); if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) -- cgit v1.2.3-58-ga151 From 1133401412a9c05cbc2b697e692c578419c14a7c Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 10 Mar 2020 19:18:03 +0800 Subject: RDMA/hns: Optimize base address table config flow for qp buffer Currently, before the qp is created, a page size needs to be calculated for the base address table to store all base addresses in the mtr. As a result, the parameter configuration of the mtr is complex. So integrate the process of calculating the base table page size into the hem related interface to simplify the process of using mtr. Link: https://lore.kernel.org/r/1583839084-31579-5-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_device.h | 4 --- drivers/infiniband/hw/hns/hns_roce_hem.c | 16 +++++++---- drivers/infiniband/hw/hns/hns_roce_qp.c | 42 +++++++---------------------- 3 files changed, 21 insertions(+), 41 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index b6ae12dd4024..f6b3cf6b95d6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -669,10 +669,6 @@ struct hns_roce_qp { struct ib_umem *umem; struct hns_roce_mtt mtt; struct hns_roce_mtr mtr; - - /* this define must less than HNS_ROCE_MAX_BT_REGION */ -#define HNS_ROCE_WQE_REGION_MAX 3 - struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX]; int wqe_bt_pg_shift; u32 buff_size; diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index e82215774032..8380d7187494 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -1383,6 +1383,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base; u64 phy_base; int ret = 0; + int ba_num; int offset; int total; int step; @@ -1393,12 +1394,16 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, if (root_hem) return 0; + ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit); + if (ba_num < 1) + return -ENOMEM; + INIT_LIST_HEAD(&temp_root); - total = r->offset; + offset = r->offset; /* indicate to last region */ r = ®ions[region_cnt - 1]; - root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1, - unit, true, 0); + root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1, + ba_num, true, 0); if (!root_hem) return -ENOMEM; list_add(&root_hem->list, &temp_root); @@ -1410,7 +1415,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, INIT_LIST_HEAD(&temp_list[i]); total = 0; - for (i = 0; i < region_cnt && total < unit; i++) { + for (i = 0; i < region_cnt && total < ba_num; i++) { r = ®ions[i]; if (!r->count) continue; @@ -1443,7 +1448,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, /* if exist mid bt, link L1 to L0 */ list_for_each_entry_safe(hem, temp_hem, &hem_list->mid_bt[i][1], list) { - offset = hem->start / step * BA_BYTE_LEN; + offset = (hem->start - r->offset) / step * + BA_BYTE_LEN; hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr); total++; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index c2ea489b0df5..7ba3db5a6192 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -579,30 +579,6 @@ static int split_wqe_buf_region(struct hns_roce_dev *hr_dev, return region_cnt; } -static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev, - struct hns_roce_buf_region *regions, - int region_cnt) -{ - int bt_pg_shift; - int ba_num; - int ret; - - bt_pg_shift = PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz; - - /* all root ba entries must in one bt page */ - do { - ba_num = (1 << bt_pg_shift) / BA_BYTE_LEN; - ret = hns_roce_hem_list_calc_root_ba(regions, region_cnt, - ba_num); - if (ret <= ba_num) - break; - - bt_pg_shift++; - } while (ret > ba_num); - - return bt_pg_shift - PAGE_SHIFT; -} - static int set_extend_sge_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { @@ -768,7 +744,10 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, u32 page_shift, bool is_user) { - dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; +/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */ +#define HNS_ROCE_WQE_REGION_MAX 3 + struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {}; + dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = {}; struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_buf_region *r; int region_count; @@ -776,18 +755,18 @@ static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, int ret; int i; - region_count = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->regions, - ARRAY_SIZE(hr_qp->regions), page_shift); + region_count = split_wqe_buf_region(hr_dev, hr_qp, regions, + ARRAY_SIZE(regions), page_shift); /* alloc a tmp list to store WQE buffers address */ - ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, region_count); + ret = hns_roce_alloc_buf_list(regions, buf_list, region_count); if (ret) { ibdev_err(ibdev, "Failed to alloc WQE buffer list\n"); return ret; } for (i = 0; i < region_count; i++) { - r = &hr_qp->regions[i]; + r = ®ions[i]; if (is_user) buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i], r->count, r->offset, hr_qp->umem, @@ -805,11 +784,10 @@ static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } } - hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, - region_count); + hr_qp->wqe_bt_pg_shift = hr_dev->caps.mtt_ba_pg_sz; hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, page_shift); - ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, hr_qp->regions, + ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, regions, region_count); if (ret) ibdev_err(ibdev, "Failed to attach WQE's mtr\n"); -- cgit v1.2.3-58-ga151 From d6a3627e311c5488539150614fedaea5e3103235 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 10 Mar 2020 19:18:04 +0800 Subject: RDMA/hns: Optimize wqe buffer set flow for post send Splits hns_roce_v2_post_send() into three sub-functions: set_rc_wqe(), set_ud_wqe() and update_sq_db() to simplify the code. Link: https://lore.kernel.org/r/1583839084-31579-6-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 472 ++++++++++++++--------------- 1 file changed, 224 insertions(+), 248 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c813c7419932..9bd8fbf2e96b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -287,6 +287,214 @@ static int check_send_valid(struct hns_roce_dev *hr_dev, return 0; } +static inline int calc_wr_sge_num(const struct ib_send_wr *wr, u32 *sge_len) +{ + int valid_num = 0; + u32 len = 0; + int i; + + for (i = 0; i < wr->num_sge; i++) { + if (likely(wr->sg_list[i].length)) { + len += wr->sg_list[i].length; + valid_num++; + } + } + + *sge_len = len; + return valid_num; +} + +static inline int set_ud_wqe(struct hns_roce_qp *qp, + const struct ib_send_wr *wr, + void *wqe, unsigned int *sge_idx, + unsigned int owner_bit) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); + struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); + struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe; + unsigned int curr_idx = *sge_idx; + int valid_num_sge; + u32 msg_len = 0; + bool loopback; + u8 *smac; + + valid_num_sge = calc_wr_sge_num(wr, &msg_len); + memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe)); + + roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M, + V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]); + roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M, + V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]); + roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M, + V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]); + roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M, + V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]); + roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_4_M, + V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, ah->av.mac[4]); + roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M, + V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]); + + /* MAC loopback */ + smac = (u8 *)hr_dev->dev_addr[qp->port]; + loopback = ether_addr_equal_unaligned(ah->av.mac, smac) ? 1 : 0; + + roce_set_bit(ud_sq_wqe->byte_40, + V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback); + + roce_set_field(ud_sq_wqe->byte_4, + V2_UD_SEND_WQE_BYTE_4_OPCODE_M, + V2_UD_SEND_WQE_BYTE_4_OPCODE_S, + HNS_ROCE_V2_WQE_OP_SEND); + + ud_sq_wqe->msg_len = cpu_to_le32(msg_len); + + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + case IB_WR_RDMA_WRITE_WITH_IMM: + ud_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); + break; + default: + ud_sq_wqe->immtdata = 0; + break; + } + + /* Set sig attr */ + roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S, + (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); + + /* Set se attr */ + roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S, + (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); + + roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S, + owner_bit); + + roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M, + V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn); + + roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M, + V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); + + roce_set_field(ud_sq_wqe->byte_20, + V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, + V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, + curr_idx & (qp->sge.sge_cnt - 1)); + + roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, + V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0); + ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? + qp->qkey : ud_wr(wr)->remote_qkey); + roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M, + V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn); + + roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M, + V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id); + roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, + V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit); + roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M, + V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass); + roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, + V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel); + roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M, + V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl); + roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M, + V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port); + + roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, + ah->av.vlan_en ? 1 : 0); + roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M, + V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index); + + memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2); + + set_extend_sge(qp, wr, &curr_idx, valid_num_sge); + + *sge_idx = curr_idx; + + return 0; +} + +static inline int set_rc_wqe(struct hns_roce_qp *qp, + const struct ib_send_wr *wr, + void *wqe, unsigned int *sge_idx, + unsigned int owner_bit) +{ + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; + unsigned int curr_idx = *sge_idx; + int valid_num_sge; + u32 msg_len = 0; + int ret = 0; + + valid_num_sge = calc_wr_sge_num(wr, &msg_len); + memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); + + rc_sq_wqe->msg_len = cpu_to_le32(msg_len); + + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + case IB_WR_RDMA_WRITE_WITH_IMM: + rc_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); + break; + case IB_WR_SEND_WITH_INV: + rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); + break; + default: + rc_sq_wqe->immtdata = 0; + break; + } + + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S, + (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); + + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S, + (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); + + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S, + (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); + + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, + owner_bit); + + wqe += sizeof(struct hns_roce_v2_rc_send_wqe); + switch (wr->opcode) { + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); + rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); + break; + case IB_WR_LOCAL_INV: + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1); + rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); + break; + case IB_WR_REG_MR: + set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr)); + break; + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey); + rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr); + break; + default: + break; + } + + roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M, + V2_RC_SEND_WQE_BYTE_4_OPCODE_S, + to_hr_opcode(wr->opcode)); + + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) + set_atomic_seg(wr, wqe, rc_sq_wqe, valid_num_sge); + else if (wr->opcode != IB_WR_REG_MR) + ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe, + wqe, &curr_idx, valid_num_sge); + + *sge_idx = curr_idx; + + return ret; +} + static inline void update_sq_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp) { @@ -324,23 +532,15 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, const struct ib_send_wr **bad_wr) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); - struct hns_roce_v2_ud_send_wqe *ud_sq_wqe; - struct hns_roce_v2_rc_send_wqe *rc_sq_wqe; + struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_qp *qp = to_hr_qp(ibqp); - struct device *dev = hr_dev->dev; + unsigned long flags = 0; unsigned int owner_bit; unsigned int sge_idx; unsigned int wqe_idx; - unsigned long flags; - int valid_num_sge; void *wqe = NULL; - bool loopback; - u32 tmp_len; - u8 *smac; int nreq; int ret; - int i; spin_lock_irqsave(&qp->sq.lock, flags); @@ -363,8 +563,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); if (unlikely(wr->num_sge > qp->sq.max_gs)) { - dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, qp->sq.max_gs); + ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n", + wr->num_sge, qp->sq.max_gs); ret = -EINVAL; *bad_wr = wr; goto out; @@ -374,248 +574,24 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, qp->sq.wrid[wqe_idx] = wr->wr_id; owner_bit = ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); - valid_num_sge = 0; - tmp_len = 0; - - for (i = 0; i < wr->num_sge; i++) { - if (likely(wr->sg_list[i].length)) { - tmp_len += wr->sg_list[i].length; - valid_num_sge++; - } - } /* Corresponding to the QP type, wqe process separately */ - if (ibqp->qp_type == IB_QPT_GSI) { - ud_sq_wqe = wqe; - memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe)); - - roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M, - V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]); - roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M, - V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]); - roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M, - V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]); - roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M, - V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]); - roce_set_field(ud_sq_wqe->byte_48, - V2_UD_SEND_WQE_BYTE_48_DMAC_4_M, - V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, - ah->av.mac[4]); - roce_set_field(ud_sq_wqe->byte_48, - V2_UD_SEND_WQE_BYTE_48_DMAC_5_M, - V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, - ah->av.mac[5]); - - /* MAC loopback */ - smac = (u8 *)hr_dev->dev_addr[qp->port]; - loopback = ether_addr_equal_unaligned(ah->av.mac, - smac) ? 1 : 0; - - roce_set_bit(ud_sq_wqe->byte_40, - V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback); - - roce_set_field(ud_sq_wqe->byte_4, - V2_UD_SEND_WQE_BYTE_4_OPCODE_M, - V2_UD_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND); - - ud_sq_wqe->msg_len = - cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len); - - switch (wr->opcode) { - case IB_WR_SEND_WITH_IMM: - case IB_WR_RDMA_WRITE_WITH_IMM: - ud_sq_wqe->immtdata = - cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); - break; - default: - ud_sq_wqe->immtdata = 0; - break; - } - - /* Set sig attr */ - roce_set_bit(ud_sq_wqe->byte_4, - V2_UD_SEND_WQE_BYTE_4_CQE_S, - (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); - - /* Set se attr */ - roce_set_bit(ud_sq_wqe->byte_4, - V2_UD_SEND_WQE_BYTE_4_SE_S, - (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); - - roce_set_bit(ud_sq_wqe->byte_4, - V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit); - - roce_set_field(ud_sq_wqe->byte_16, - V2_UD_SEND_WQE_BYTE_16_PD_M, - V2_UD_SEND_WQE_BYTE_16_PD_S, - to_hr_pd(ibqp->pd)->pdn); - - roce_set_field(ud_sq_wqe->byte_16, - V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, - valid_num_sge); - - roce_set_field(ud_sq_wqe->byte_20, - V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, - V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - sge_idx & (qp->sge.sge_cnt - 1)); - - roce_set_field(ud_sq_wqe->byte_24, - V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, - V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0); - ud_sq_wqe->qkey = - cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? - qp->qkey : ud_wr(wr)->remote_qkey); - roce_set_field(ud_sq_wqe->byte_32, - V2_UD_SEND_WQE_BYTE_32_DQPN_M, - V2_UD_SEND_WQE_BYTE_32_DQPN_S, - ud_wr(wr)->remote_qpn); - - roce_set_field(ud_sq_wqe->byte_36, - V2_UD_SEND_WQE_BYTE_36_VLAN_M, - V2_UD_SEND_WQE_BYTE_36_VLAN_S, - ah->av.vlan_id); - roce_set_field(ud_sq_wqe->byte_36, - V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, - V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, - ah->av.hop_limit); - roce_set_field(ud_sq_wqe->byte_36, - V2_UD_SEND_WQE_BYTE_36_TCLASS_M, - V2_UD_SEND_WQE_BYTE_36_TCLASS_S, - ah->av.tclass); - roce_set_field(ud_sq_wqe->byte_40, - V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, - V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, - ah->av.flowlabel); - roce_set_field(ud_sq_wqe->byte_40, - V2_UD_SEND_WQE_BYTE_40_SL_M, - V2_UD_SEND_WQE_BYTE_40_SL_S, - ah->av.sl); - roce_set_field(ud_sq_wqe->byte_40, - V2_UD_SEND_WQE_BYTE_40_PORTN_M, - V2_UD_SEND_WQE_BYTE_40_PORTN_S, - qp->port); - - roce_set_bit(ud_sq_wqe->byte_40, - V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, - ah->av.vlan_en ? 1 : 0); - roce_set_field(ud_sq_wqe->byte_48, - V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M, - V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, - hns_get_gid_index(hr_dev, qp->phy_port, - ah->av.gid_index)); - - memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], - GID_LEN_V2); - - set_extend_sge(qp, wr, &sge_idx, valid_num_sge); - } else if (ibqp->qp_type == IB_QPT_RC) { - rc_sq_wqe = wqe; - memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); - - rc_sq_wqe->msg_len = - cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len); - - switch (wr->opcode) { - case IB_WR_SEND_WITH_IMM: - case IB_WR_RDMA_WRITE_WITH_IMM: - rc_sq_wqe->immtdata = - cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); - break; - case IB_WR_SEND_WITH_INV: - rc_sq_wqe->inv_key = - cpu_to_le32(wr->ex.invalidate_rkey); - break; - default: - rc_sq_wqe->immtdata = 0; - break; - } - - roce_set_bit(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_FENCE_S, - (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); - - roce_set_bit(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_SE_S, - (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); - - roce_set_bit(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_CQE_S, - (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); - - roce_set_bit(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit); - - wqe += sizeof(struct hns_roce_v2_rc_send_wqe); - switch (wr->opcode) { - case IB_WR_RDMA_READ: - rc_sq_wqe->rkey = - cpu_to_le32(rdma_wr(wr)->rkey); - rc_sq_wqe->va = - cpu_to_le64(rdma_wr(wr)->remote_addr); - break; - case IB_WR_RDMA_WRITE: - rc_sq_wqe->rkey = - cpu_to_le32(rdma_wr(wr)->rkey); - rc_sq_wqe->va = - cpu_to_le64(rdma_wr(wr)->remote_addr); - break; - case IB_WR_RDMA_WRITE_WITH_IMM: - rc_sq_wqe->rkey = - cpu_to_le32(rdma_wr(wr)->rkey); - rc_sq_wqe->va = - cpu_to_le64(rdma_wr(wr)->remote_addr); - break; - case IB_WR_LOCAL_INV: - roce_set_bit(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_SO_S, 1); - rc_sq_wqe->inv_key = - cpu_to_le32(wr->ex.invalidate_rkey); - break; - case IB_WR_REG_MR: - set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr)); - break; - case IB_WR_ATOMIC_CMP_AND_SWP: - rc_sq_wqe->rkey = - cpu_to_le32(atomic_wr(wr)->rkey); - rc_sq_wqe->va = - cpu_to_le64(atomic_wr(wr)->remote_addr); - break; - case IB_WR_ATOMIC_FETCH_AND_ADD: - rc_sq_wqe->rkey = - cpu_to_le32(atomic_wr(wr)->rkey); - rc_sq_wqe->va = - cpu_to_le64(atomic_wr(wr)->remote_addr); - break; - default: - break; - } - - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - to_hr_opcode(wr->opcode)); - - if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || - wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) - set_atomic_seg(wr, wqe, rc_sq_wqe, - valid_num_sge); - else if (wr->opcode != IB_WR_REG_MR) { - ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, - wqe, &sge_idx, - valid_num_sge); - if (ret) { - *bad_wr = wr; - goto out; - } - } - } else { - dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); + if (ibqp->qp_type == IB_QPT_GSI) + ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit); + else if (ibqp->qp_type == IB_QPT_RC) + ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit); + else { + ibdev_err(ibdev, "Illegal qp_type(0x%x)\n", + ibqp->qp_type); spin_unlock_irqrestore(&qp->sq.lock, flags); *bad_wr = wr; return -EOPNOTSUPP; } + + if (ret) { + *bad_wr = wr; + goto out; + } } out: -- cgit v1.2.3-58-ga151 From 4b34e23f4eaa236b918886fb90f468a6aa04997f Mon Sep 17 00:00:00 2001 From: "Sindhu, Devale" Date: Fri, 13 Mar 2020 16:44:06 -0500 Subject: i40iw: Report correct firmware version The driver uses a hard-coded value for FW version and reports an inconsistent FW version between ibv_devinfo and /sys/class/infiniband/i40iw/fw_ver. Retrieve the FW version via a Control QP (CQP) operation and report it consistently across sysfs and query device. Fixes: d37498417947 ("i40iw: add files for iwarp interface") Link: https://lore.kernel.org/r/20200313214406.2159-1-shiraz.saleem@intel.com Reported-by: Jarod Wilson Signed-off-by: Sindhu, Devale Signed-off-by: Shiraz Saleem Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/i40iw/i40iw.h | 22 ++++++- drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 96 ++++++++++++++++++++++++++++++ drivers/infiniband/hw/i40iw/i40iw_d.h | 26 +++++++- drivers/infiniband/hw/i40iw/i40iw_main.c | 6 ++ drivers/infiniband/hw/i40iw/i40iw_p.h | 1 + drivers/infiniband/hw/i40iw/i40iw_status.h | 3 +- drivers/infiniband/hw/i40iw/i40iw_type.h | 12 ++++ drivers/infiniband/hw/i40iw/i40iw_verbs.c | 10 ++-- 8 files changed, 167 insertions(+), 9 deletions(-) diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 8feec35f95a7..3c62c9327a9c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -67,7 +67,7 @@ #include "i40iw_user.h" #include "i40iw_puda.h" -#define I40IW_FW_VERSION 2 +#define I40IW_FW_VER_DEFAULT 2 #define I40IW_HW_VERSION 2 #define I40IW_ARP_ADD 1 @@ -325,6 +325,26 @@ struct i40iw_handler { struct i40e_info ldev; }; +/** + * i40iw_fw_major_ver - get firmware major version + * @dev: iwarp device + **/ +static inline u64 i40iw_fw_major_ver(struct i40iw_sc_dev *dev) +{ + return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO], + I40IW_FW_VER_MAJOR); +} + +/** + * i40iw_fw_minor_ver - get firmware minor version + * @dev: iwarp device + **/ +static inline u64 i40iw_fw_minor_ver(struct i40iw_sc_dev *dev) +{ + return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO], + I40IW_FW_VER_MINOR); +} + /** * to_iwdev - get device * @ibdev: ib device diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 4d841a3c68f3..e8b4b3743661 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -1021,6 +1021,95 @@ static enum i40iw_status_code i40iw_sc_commit_fpm_values( return ret_code; } +/** + * i40iw_sc_query_rdma_features_done - poll cqp for query features done + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code +i40iw_sc_query_rdma_features_done(struct i40iw_sc_cqp *cqp) +{ + return i40iw_sc_poll_for_cqp_op_done( + cqp, I40IW_CQP_OP_QUERY_RDMA_FEATURES, NULL); +} + +/** + * i40iw_sc_query_rdma_features - query rdma features + * @cqp: struct for cqp hw + * @feat_mem: holds PA for HW to use + * @scratch: u64 saved to be used during cqp completion + */ +static enum i40iw_status_code +i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp, + struct i40iw_dma_mem *feat_mem, u64 scratch) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 32, feat_mem->pa); + + header = LS_64(I40IW_CQP_OP_QUERY_RDMA_FEATURES, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | feat_mem->size; + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY RDMA FEATURES WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * i40iw_get_rdma_features - get RDMA features + * @dev - sc device struct + */ +enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev) +{ + enum i40iw_status_code ret_code; + struct i40iw_dma_mem feat_buf; + u64 temp; + u16 byte_idx, feat_type, feat_cnt; + + ret_code = i40iw_allocate_dma_mem(dev->hw, &feat_buf, + I40IW_FEATURE_BUF_SIZE, + I40IW_FEATURE_BUF_ALIGNMENT); + + if (ret_code) + return I40IW_ERR_NO_MEMORY; + + ret_code = i40iw_sc_query_rdma_features(dev->cqp, &feat_buf, 0); + if (!ret_code) + ret_code = i40iw_sc_query_rdma_features_done(dev->cqp); + + if (ret_code) + goto exit; + + get_64bit_val(feat_buf.va, 0, &temp); + feat_cnt = RS_64(temp, I40IW_FEATURE_CNT); + if (feat_cnt < I40IW_MAX_FEATURES) { + ret_code = I40IW_ERR_INVALID_FEAT_CNT; + goto exit; + } else if (feat_cnt > I40IW_MAX_FEATURES) { + i40iw_debug(dev, I40IW_DEBUG_CQP, + "features buf size insufficient\n"); + } + + for (byte_idx = 0, feat_type = 0; feat_type < I40IW_MAX_FEATURES; + feat_type++, byte_idx += 8) { + get_64bit_val((u64 *)feat_buf.va, byte_idx, &temp); + dev->feature_info[feat_type] = RS_64(temp, I40IW_FEATURE_INFO); + } +exit: + i40iw_free_dma_mem(dev->hw, &feat_buf); + + return ret_code; +} + /** * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm * @cqp: struct for cqp hw @@ -4265,6 +4354,13 @@ static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev, true, I40IW_CQP_WAIT_EVENT); break; + case OP_QUERY_RDMA_FEATURES: + values_mem.pa = pcmdinfo->in.u.query_rdma_features.cap_pa; + values_mem.va = pcmdinfo->in.u.query_rdma_features.cap_va; + status = i40iw_sc_query_rdma_features( + pcmdinfo->in.u.query_rdma_features.cqp, &values_mem, + pcmdinfo->in.u.query_rdma_features.scratch); + break; default: status = I40IW_NOT_SUPPORTED; break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h index 6ddaeec87d2f..e8367d67575d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h @@ -403,7 +403,7 @@ #define I40IW_CQP_OP_MANAGE_ARP 0x0f #define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10 #define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11 -#define I40IW_CQP_OP_MANAGE_PE_TEAM 0x12 +#define I40IW_CQP_OP_QUERY_RDMA_FEATURES 0x12 #define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13 #define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14 #define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15 @@ -431,6 +431,24 @@ #define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b #define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d +#define I40IW_FEATURE_BUF_SIZE (8 * I40IW_MAX_FEATURES) + +#define I40IW_FW_VER_MINOR_SHIFT 0 +#define I40IW_FW_VER_MINOR_MASK \ + (0xffffULL << I40IW_FW_VER_MINOR_SHIFT) + +#define I40IW_FW_VER_MAJOR_SHIFT 16 +#define I40IW_FW_VER_MAJOR_MASK \ + (0xffffULL << I40IW_FW_VER_MAJOR_SHIFT) + +#define I40IW_FEATURE_INFO_SHIFT 0 +#define I40IW_FEATURE_INFO_MASK \ + (0xffffULL << I40IW_FEATURE_INFO_SHIFT) + +#define I40IW_FEATURE_CNT_SHIFT 32 +#define I40IW_FEATURE_CNT_MASK \ + (0xffffULL << I40IW_FEATURE_CNT_SHIFT) + #define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16 #define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT) @@ -1529,7 +1547,8 @@ enum i40iw_alignment { I40IW_AEQ_ALIGNMENT = 0x100, I40IW_CEQ_ALIGNMENT = 0x100, I40IW_CQ0_ALIGNMENT = 0x100, - I40IW_SD_BUF_ALIGNMENT = 0x80 + I40IW_SD_BUF_ALIGNMENT = 0x80, + I40IW_FEATURE_BUF_ALIGNMENT = 0x8 }; #define I40IW_WQE_SIZE_64 64 @@ -1732,6 +1751,7 @@ enum i40iw_alignment { #define OP_REQUESTED_COMMANDS 31 #define OP_COMPLETED_COMMANDS 32 #define OP_GEN_AE 33 -#define OP_SIZE_CQP_STAT_ARRAY 34 +#define OP_QUERY_RDMA_FEATURES 34 +#define OP_SIZE_CQP_STAT_ARRAY 35 #endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 84e1b52af15e..9c96ece5e7f3 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1683,6 +1683,12 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) status = i40iw_setup_ceqs(iwdev, ldev); if (status) break; + + status = i40iw_get_rdma_features(dev); + if (status) + dev->feature_info[I40IW_FEATURE_FW_INFO] = + I40IW_FW_VER_DEFAULT; + iwdev->init_state = CEQ_CREATED; status = i40iw_initialize_hw_resources(iwdev); if (status) diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h index 11d3a2a72100..4c429567bbb4 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_p.h +++ b/drivers/infiniband/hw/i40iw/i40iw_p.h @@ -105,6 +105,7 @@ enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp * bool poll_registers); enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count); +enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev); void free_sd_mem(struct i40iw_sc_dev *dev); diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h index f7013f11d808..d1c5855bd8c3 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_status.h +++ b/drivers/infiniband/hw/i40iw/i40iw_status.h @@ -95,7 +95,8 @@ enum i40iw_status_code { I40IW_ERR_INVALID_MAC_ADDR = -65, I40IW_ERR_BAD_STAG = -66, I40IW_ERR_CQ_COMPL_ERROR = -67, - I40IW_ERR_QUEUE_DESTROYED = -68 + I40IW_ERR_QUEUE_DESTROYED = -68, + I40IW_ERR_INVALID_FEAT_CNT = -69 }; #endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h index adc8d2ec523d..54c323c40d96 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_type.h +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h @@ -234,6 +234,11 @@ enum i40iw_hw_stats_index_64b { I40IW_HW_STAT_INDEX_MAX_64 }; +enum i40iw_feature_type { + I40IW_FEATURE_FW_INFO = 0, + I40IW_MAX_FEATURES +}; + struct i40iw_dev_hw_stats_offsets { u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32]; u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64]; @@ -501,6 +506,7 @@ struct i40iw_sc_dev { const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops; struct i40iw_hmc_fpm_misc hmc_fpm_misc; + u64 feature_info[I40IW_MAX_FEATURES]; u32 debug_mask; u8 hmc_fn_id; bool is_pf; @@ -1340,6 +1346,12 @@ struct cqp_info { struct i40iw_sc_qp *qp; u64 scratch; } suspend_resume; + struct { + struct i40iw_sc_cqp *cqp; + void *cap_va; + u64 cap_pa; + u64 scratch; + } query_rdma_features; } u; }; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index fa1292932b88..1b6fb1380961 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -64,7 +64,8 @@ static int i40iw_query_device(struct ib_device *ibdev, return -EINVAL; memset(props, 0, sizeof(*props)); ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); - props->fw_ver = I40IW_FW_VERSION; + props->fw_ver = i40iw_fw_major_ver(&iwdev->sc_dev) << 32 | + i40iw_fw_minor_ver(&iwdev->sc_dev); props->device_cap_flags = iwdev->device_cap_flags; props->vendor_id = iwdev->ldev->pcidev->vendor; props->vendor_part_id = iwdev->ldev->pcidev->device; @@ -2534,10 +2535,11 @@ static const char * const i40iw_hw_stat_names[] = { static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str) { - u32 firmware_version = I40IW_FW_VERSION; + struct i40iw_device *iwdev = to_iwdev(dev); - snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version, - (firmware_version & 0x000000ff)); + snprintf(str, IB_FW_VERSION_NAME_MAX, "%llu.%llu", + i40iw_fw_major_ver(&iwdev->sc_dev), + i40iw_fw_minor_ver(&iwdev->sc_dev)); } /** -- cgit v1.2.3-58-ga151 From 026ded373483c07983a6a30b70034ad0f3667a44 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 12 Mar 2020 17:50:24 +0800 Subject: RDMA/hns: Check if depth of qp is 0 before configure Depth of qp shouldn't be allowed to be set to zero, after ensuring that, subsequent process can be simplified. And when qp is changed from reset to reset, the capability of minimum qp depth was used to identify hardware of hip06, it should be changed into a more readable form. Link: https://lore.kernel.org/r/1584006624-11846-1-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_qp.c | 77 ++++++++++++++------------------- 1 file changed, 33 insertions(+), 44 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 7ba3db5a6192..6317901c4b4f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -359,52 +359,44 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, bool is_user, int has_rq, struct hns_roce_qp *hr_qp) { - struct ib_device *ibdev = &hr_dev->ib_dev; u32 max_cnt; - /* Check the validity of QP support capacity */ - if (cap->max_recv_wr > hr_dev->caps.max_wqes || - cap->max_recv_sge > hr_dev->caps.max_rq_sg) { - ibdev_err(ibdev, "Failed to check max recv WR %d and SGE %d\n", - cap->max_recv_wr, cap->max_recv_sge); - return -EINVAL; - } - /* If srq exist, set zero for relative number of rq */ if (!has_rq) { hr_qp->rq.wqe_cnt = 0; hr_qp->rq.max_gs = 0; cap->max_recv_wr = 0; cap->max_recv_sge = 0; - } else { - if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { - ibdev_err(ibdev, "Failed to check user max recv WR and SGE\n"); - return -EINVAL; - } - if (hr_dev->caps.min_wqes) - max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes); - else - max_cnt = cap->max_recv_wr; + return 0; + } - hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); + /* Check the validity of QP support capacity */ + if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || + cap->max_recv_sge > hr_dev->caps.max_rq_sg) { + ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n", + cap->max_recv_wr, cap->max_recv_sge); + return -EINVAL; + } - if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { - ibdev_err(ibdev, "Failed to check RQ WQE count limit\n"); - return -EINVAL; - } + max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes); - max_cnt = max(1U, cap->max_recv_sge); - hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); - if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) - hr_qp->rq.wqe_shift = - ilog2(hr_dev->caps.max_rq_desc_sz); - else - hr_qp->rq.wqe_shift = - ilog2(hr_dev->caps.max_rq_desc_sz - * hr_qp->rq.max_gs); + hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); + if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { + ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", + cap->max_recv_wr); + return -EINVAL; } + max_cnt = max(1U, cap->max_recv_sge); + hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); + + if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); + else + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * + hr_qp->rq.max_gs); + cap->max_recv_wr = hr_qp->rq.wqe_cnt; cap->max_recv_sge = hr_qp->rq.max_gs; @@ -613,29 +605,27 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev, static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) { - struct device *dev = hr_dev->dev; u32 page_size; u32 max_cnt; int size; int ret; - if (cap->max_send_wr > hr_dev->caps.max_wqes || + if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || cap->max_send_sge > hr_dev->caps.max_sq_sg || cap->max_inline_data > hr_dev->caps.max_sq_inline) { - dev_err(dev, "SQ WR or sge or inline data error!\n"); + ibdev_err(&hr_dev->ib_dev, + "SQ WR or sge or inline data error!\n"); return -EINVAL; } hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); - if (hr_dev->caps.min_wqes) - max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); - else - max_cnt = cap->max_send_wr; + max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { - dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n"); + ibdev_err(&hr_dev->ib_dev, + "while setting kernel sq size, sq.wqe_cnt too large\n"); return -EINVAL; } @@ -648,7 +638,7 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, ret = set_extend_sge_param(hr_dev, hr_qp); if (ret) { - dev_err(dev, "set extend sge parameters fail\n"); + ibdev_err(&hr_dev->ib_dev, "set extend sge parameters fail\n"); return ret; } @@ -1372,11 +1362,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; if (cur_state == new_state && cur_state == IB_QPS_RESET) { - if (hr_dev->caps.min_wqes) { + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { ret = -EPERM; ibdev_err(&hr_dev->ib_dev, - "cur_state=%d new_state=%d\n", cur_state, - new_state); + "RST2RST state is not supported\n"); } else { ret = 0; } -- cgit v1.2.3-58-ga151 From d61ba1b9aefe88f0c296e7c627d4946ac2a6c324 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 16 Mar 2020 17:04:54 -0400 Subject: IB/rdmavt: Delete unused routine This routine was obsoleted by the patch below. Delete it. Fixes: a2a074ef396f ("RDMA: Handle ucontext allocations by IB/core") Link: https://lore.kernel.org/r/20200316210454.7753.94689.stgit@awfm-01.aw.intel.com Reviewed-by: Kaike Wan Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rdmavt/vt.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 986265ad6e79..72b031ab7092 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -284,12 +284,6 @@ static int rvt_query_gid(struct ib_device *ibdev, u8 port_num, &gid->global.interface_id); } -static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext - *ibucontext) -{ - return container_of(ibucontext, struct rvt_ucontext, ibucontext); -} - /** * rvt_alloc_ucontext - Allocate a user context * @uctx: Verbs context -- cgit v1.2.3-58-ga151 From 5ab17a24cb82a636e80c62386dcf1676cbcabaf1 Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Mon, 16 Mar 2020 17:05:00 -0400 Subject: IB/hfi1: Remove kobj from hfi1_devdata The field kobj was added to hfi1_devdata structure to manage the life time of the hfi1_devdata structure for PSM accesses: commit e11ffbd57520 ("IB/hfi1: Do not free hfi1 cdev parent structure early") Later another mechanism user_refcount/user_comp was introduced to provide the same functionality: commit acd7c8fe1493 ("IB/hfi1: Fix an Oops on pci device force remove") This patch will remove this kobj field, as it is no longer needed. Link: https://lore.kernel.org/r/20200316210500.7753.4145.stgit@awfm-01.aw.intel.com Reviewed-by: Mike Marciniszyn Signed-off-by: Kaike Wan Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/file_ops.c | 4 +--- drivers/infiniband/hw/hfi1/hfi.h | 2 -- drivers/infiniband/hw/hfi1/init.c | 26 ++++---------------------- 3 files changed, 5 insertions(+), 27 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 259115886d35..e7fdd70c6e78 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -209,7 +209,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) fd->mm = current->mm; mmgrab(fd->mm); fd->dd = dd; - kobject_get(&fd->dd->kobj); fp->private_data = fd; return 0; nomem: @@ -713,7 +712,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) deallocate_ctxt(uctxt); done: mmdrop(fdata->mm); - kobject_put(&dd->kobj); if (atomic_dec_and_test(&dd->user_refcount)) complete(&dd->user_comp); @@ -1696,7 +1694,7 @@ static int user_add(struct hfi1_devdata *dd) snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops, &dd->user_cdev, &dd->user_device, - true, &dd->kobj); + true, &dd->verbs_dev.rdi.ibdev.dev.kobj); if (ret) user_remove(dd); diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index cae12f416ca0..b06c2594105a 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1413,8 +1413,6 @@ struct hfi1_devdata { bool aspm_enabled; /* ASPM state: enabled/disabled */ struct rhashtable *sdma_rht; - struct kobject kobj; - /* vnic data */ struct hfi1_vnic_data vnic; /* Lock to protect IRQ SRC register access */ diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index e3acda7a0800..3759d9233a1c 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1198,13 +1198,13 @@ static void finalize_asic_data(struct hfi1_devdata *dd, } /** - * hfi1_clean_devdata - cleans up per-unit data structure + * hfi1_free_devdata - cleans up and frees per-unit data structure * @dd: pointer to a valid devdata structure * - * It cleans up all data structures set up by + * It cleans up and frees all data structures set up by * by hfi1_alloc_devdata(). */ -static void hfi1_clean_devdata(struct hfi1_devdata *dd) +void hfi1_free_devdata(struct hfi1_devdata *dd) { struct hfi1_asic_data *ad; unsigned long flags; @@ -1231,23 +1231,6 @@ static void hfi1_clean_devdata(struct hfi1_devdata *dd) rvt_dealloc_device(&dd->verbs_dev.rdi); } -static void __hfi1_free_devdata(struct kobject *kobj) -{ - struct hfi1_devdata *dd = - container_of(kobj, struct hfi1_devdata, kobj); - - hfi1_clean_devdata(dd); -} - -static struct kobj_type hfi1_devdata_type = { - .release = __hfi1_free_devdata, -}; - -void hfi1_free_devdata(struct hfi1_devdata *dd) -{ - kobject_put(&dd->kobj); -} - /** * hfi1_alloc_devdata - Allocate our primary per-unit data structure. * @pdev: Valid PCI device @@ -1333,11 +1316,10 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, goto bail; } - kobject_init(&dd->kobj, &hfi1_devdata_type); return dd; bail: - hfi1_clean_devdata(dd); + hfi1_free_devdata(dd); return ERR_PTR(ret); } -- cgit v1.2.3-58-ga151 From fa8a44f6b245ffa9d39d667ebbf81bebc61ca657 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 10 Mar 2020 11:14:30 +0200 Subject: RDMA/efa: Use in-kernel offsetofend() to check field availability Remove custom and duplicated variant of offsetofend(). Link: https://lore.kernel.org/r/20200310091438.248429-4-leon@kernel.org Signed-off-by: Leon Romanovsky Acked-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_verbs.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index bf3120f140f7..5c57098a4aee 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -144,9 +144,6 @@ static inline bool is_rdma_read_cap(struct efa_dev *dev) return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK; } -#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \ - sizeof_field(typeof(x), fld) <= (sz)) - #define is_reserved_cleared(reserved) \ !memchr_inv(reserved, 0, sizeof(reserved)) @@ -609,7 +606,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, if (err) goto err_out; - if (!field_avail(cmd, driver_qp_type, udata->inlen)) { + if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, no input udata\n"); err = -EINVAL; @@ -896,7 +893,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto err_out; } - if (!field_avail(cmd, num_sub_cqs, udata->inlen)) { + if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) { ibdev_dbg(ibdev, "Incompatible ABI params, no input udata\n"); err = -EINVAL; -- cgit v1.2.3-58-ga151 From 5fb5186383bba84937887d73eb2f7ab8819e7a3e Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 23 Mar 2020 11:46:27 -0700 Subject: RDMA/siw: Suppress uninitialized var warning drivers/infiniband/sw/siw/siw_qp_rx.c: In function siw_proc_send: ./include/linux/spinlock.h:288:3: warning: flags may be used uninitialized in this function [-Wmaybe-uninitialized] _raw_spin_unlock_irqrestore(lock, flags); \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/infiniband/sw/siw/siw_qp_rx.c:335:16: note: flags was declared here unsigned long flags; Link: https://lore.kernel.org/r/20200323184627.ZWPg91uin%akpm@linux-foundation.org Signed-off-by: Andrew Morton Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/siw/siw_qp_rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 9ccce2909ac4..650520244ed0 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -332,7 +332,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp) struct siw_srq *srq; struct siw_wqe *wqe = NULL; bool srq_event = false; - unsigned long flags; + unsigned long uninitialized_var(flags); srq = qp->srq; if (srq) { -- cgit v1.2.3-58-ga151 From a766fa84738f52f8227eb96aed4362725a82ccf2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 20 Mar 2020 16:26:41 +0300 Subject: IB/mlx5: Fix a NULL vs IS_ERR() check The kzalloc() function returns NULL, not error pointers. Fixes: 30f2fe40c72b ("IB/mlx5: Introduce UAPIs to manage packet pacing") Link: https://lore.kernel.org/r/20200320132641.GF95012@mwanda Signed-off-by: Dan Carpenter Acked-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/qos.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c index f822b06e7c9e..cac878a70edb 100644 --- a/drivers/infiniband/hw/mlx5/qos.c +++ b/drivers/infiniband/hw/mlx5/qos.c @@ -46,8 +46,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)( dev = to_mdev(c->ibucontext.device); pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL); - if (IS_ERR(pp_entry)) - return PTR_ERR(pp_entry); + if (!pp_entry) + return -ENOMEM; in_ctx = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX); -- cgit v1.2.3-58-ga151 From b1d56fdcb66ebe6604166d71a26744d3cd03fecb Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Fri, 13 Mar 2020 10:34:02 -0700 Subject: RDMA/bnxt_re: Wait for all the CQ events before freeing CQ data structures Destroy CQ command to firmware returns the num_cnq_events as a response. This indicates the driver about the number of CQ events generated for this CQ. Driver should wait for all these events before freeing the CQ host structures. Also, add routine to clean all the pending notification for the CQs getting destroyed. This avoids the possibility of accessing the CQ data structures after its freed. Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver") Link: https://lore.kernel.org/r/1584120842-3200-1-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 73 ++++++++++++++++++++++++++++++++ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 1 + 2 files changed, 74 insertions(+) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 2ccf1c3708d1..899a5d2c100e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -231,6 +232,70 @@ fail: return rc; } +static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) +{ + struct bnxt_qplib_hwq *hwq = &nq->hwq; + struct nq_base *nqe, **nq_ptr; + int budget = nq->budget; + u32 sw_cons, raw_cons; + uintptr_t q_handle; + u16 type; + + spin_lock_bh(&hwq->lock); + /* Service the NQ until empty */ + raw_cons = hwq->cons; + while (budget--) { + sw_cons = HWQ_CMP(raw_cons, hwq); + nq_ptr = (struct nq_base **)hwq->pbl_ptr; + nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]; + if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements)) + break; + + /* + * The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + + type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; + switch (type) { + case NQ_BASE_TYPE_CQ_NOTIFICATION: + { + struct nq_cn *nqcne = (struct nq_cn *)nqe; + + q_handle = le32_to_cpu(nqcne->cq_handle_low); + q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) + << 32; + if ((unsigned long)cq == q_handle) { + nqcne->cq_handle_low = 0; + nqcne->cq_handle_high = 0; + cq->cnq_events++; + } + break; + } + default: + break; + } + raw_cons++; + } + spin_unlock_bh(&hwq->lock); +} + +/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with + * this CQ. + */ +static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events) +{ + u32 retry_cnt = 100; + + while (retry_cnt--) { + if (cnq_events == cq->cnq_events) + return; + usleep_range(50, 100); + clean_nq(cq->nq, cq); + } +} + static void bnxt_qplib_service_nq(unsigned long data) { struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data; @@ -244,6 +309,7 @@ static void bnxt_qplib_service_nq(unsigned long data) uintptr_t q_handle; u16 type; + spin_lock_bh(&hwq->lock); /* Service the NQ until empty */ raw_cons = hwq->cons; while (budget--) { @@ -269,6 +335,8 @@ static void bnxt_qplib_service_nq(unsigned long data) q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) << 32; cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle; + if (!cq) + break; bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA); spin_lock_bh(&cq->compl_lock); @@ -278,6 +346,7 @@ static void bnxt_qplib_service_nq(unsigned long data) else dev_warn(&nq->pdev->dev, "cqn - type 0x%x not handled\n", type); + cq->cnq_events++; spin_unlock_bh(&cq->compl_lock); break; } @@ -316,6 +385,7 @@ static void bnxt_qplib_service_nq(unsigned long data) hwq->cons = raw_cons; bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true); } + spin_unlock_bh(&hwq->lock); } static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) @@ -2003,6 +2073,7 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_cq req; struct creq_destroy_cq_resp resp; + u16 total_cnq_events; u16 cmd_flags = 0; int rc; @@ -2013,6 +2084,8 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) (void *)&resp, NULL, 0); if (rc) return rc; + total_cnq_events = le16_to_cpu(resp.total_cnq_events); + __wait_for_all_nqes(cq, total_cnq_events); bnxt_qplib_free_hwq(res, &cq->hwq); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 9e8d1c5c3f4a..7edb70b6bb16 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -402,6 +402,7 @@ struct bnxt_qplib_cq { * of the same QP while manipulating the flush list. */ spinlock_t flush_lock; /* QP flush management */ + u16 cnq_events; }; #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) -- cgit v1.2.3-58-ga151 From 2f49de21f3e96d869d00659581394f106fa66371 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 17 Mar 2020 11:55:23 +0800 Subject: RDMA/hns: Optimize mhop get flow for multi-hop addressing Splits hns_roce_table_mhop_get() into 4 sub-functions to make the code flow clearer. Link: https://lore.kernel.org/r/1584417324-2255-2-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hem.c | 297 +++++++++++++++++++------------ 1 file changed, 182 insertions(+), 115 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 8380d7187494..cc557f126c82 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -39,6 +39,16 @@ #define DMA_ADDR_T_SHIFT 12 #define BT_BA_SHIFT 32 +#define HEM_INDEX_BUF BIT(0) +#define HEM_INDEX_L0 BIT(1) +#define HEM_INDEX_L1 BIT(2) +struct hns_roce_hem_index { + u64 buf; + u64 l0; + u64 l1; + u32 inited; /* indicate which index is available */ +}; + bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) { int hop_num = 0; @@ -434,178 +444,235 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, return ret; } -static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, - unsigned long obj) +static int calc_hem_config(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, unsigned long obj, + struct hns_roce_hem_mhop *mhop, + struct hns_roce_hem_index *index) { - struct device *dev = hr_dev->dev; - struct hns_roce_hem_mhop mhop; - struct hns_roce_hem_iter iter; - u32 buf_chunk_size; - u32 bt_chunk_size; + struct ib_device *ibdev = &hr_dev->ib_dev; + unsigned long mhop_obj = obj; + u32 l0_idx, l1_idx, l2_idx; u32 chunk_ba_num; - u32 hop_num; - u32 size; u32 bt_num; - u64 hem_idx; - u64 bt_l1_idx = 0; - u64 bt_l0_idx = 0; - u64 bt_ba; - unsigned long mhop_obj = obj; - int bt_l1_allocated = 0; - int bt_l0_allocated = 0; - int step_idx; int ret; - ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); + ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop); if (ret) return ret; - buf_chunk_size = mhop.buf_chunk_size; - bt_chunk_size = mhop.bt_chunk_size; - hop_num = mhop.hop_num; - chunk_ba_num = bt_chunk_size / BA_BYTE_LEN; - - bt_num = hns_roce_get_bt_num(table->type, hop_num); + l0_idx = mhop->l0_idx; + l1_idx = mhop->l1_idx; + l2_idx = mhop->l2_idx; + chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; + bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); switch (bt_num) { case 3: - hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + - mhop.l1_idx * chunk_ba_num + mhop.l2_idx; - bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx; - bt_l0_idx = mhop.l0_idx; + index->l1 = l0_idx * chunk_ba_num + l1_idx; + index->l0 = l0_idx; + index->buf = l0_idx * chunk_ba_num * chunk_ba_num + + l1_idx * chunk_ba_num + l2_idx; break; case 2: - hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx; - bt_l0_idx = mhop.l0_idx; + index->l0 = l0_idx; + index->buf = l0_idx * chunk_ba_num + l1_idx; break; case 1: - hem_idx = mhop.l0_idx; + index->buf = l0_idx; break; default: - dev_err(dev, "Table %d not support hop_num = %d!\n", - table->type, hop_num); + ibdev_err(ibdev, "Table %d not support mhop.hop_num = %d!\n", + table->type, mhop->hop_num); return -EINVAL; } - if (unlikely(hem_idx >= table->num_hem)) { - dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n", - table->type, hem_idx, table->num_hem); + if (unlikely(index->buf >= table->num_hem)) { + ibdev_err(ibdev, "Table %d exceed hem limt idx %llu,max %lu!\n", + table->type, index->buf, table->num_hem); return -EINVAL; } - mutex_lock(&table->mutex); + return 0; +} - if (table->hem[hem_idx]) { - ++table->hem[hem_idx]->refcount; - goto out; +static void free_mhop_hem(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, + struct hns_roce_hem_mhop *mhop, + struct hns_roce_hem_index *index) +{ + u32 bt_size = mhop->bt_chunk_size; + struct device *dev = hr_dev->dev; + + if (index->inited & HEM_INDEX_BUF) { + hns_roce_free_hem(hr_dev, table->hem[index->buf]); + table->hem[index->buf] = NULL; + } + + if (index->inited & HEM_INDEX_L1) { + dma_free_coherent(dev, bt_size, table->bt_l1[index->l1], + table->bt_l1_dma_addr[index->l1]); + table->bt_l1[index->l1] = NULL; } + if (index->inited & HEM_INDEX_L0) { + dma_free_coherent(dev, bt_size, table->bt_l0[index->l0], + table->bt_l0_dma_addr[index->l0]); + table->bt_l0[index->l0] = NULL; + } +} + +static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, + struct hns_roce_hem_mhop *mhop, + struct hns_roce_hem_index *index) +{ + u32 bt_size = mhop->bt_chunk_size; + struct device *dev = hr_dev->dev; + struct hns_roce_hem_iter iter; + gfp_t flag; + u64 bt_ba; + u32 size; + int ret; + /* alloc L1 BA's chunk */ - if ((check_whether_bt_num_3(table->type, hop_num) || - check_whether_bt_num_2(table->type, hop_num)) && - !table->bt_l0[bt_l0_idx]) { - table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size, - &(table->bt_l0_dma_addr[bt_l0_idx]), + if ((check_whether_bt_num_3(table->type, mhop->hop_num) || + check_whether_bt_num_2(table->type, mhop->hop_num)) && + !table->bt_l0[index->l0]) { + table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size, + &table->bt_l0_dma_addr[index->l0], GFP_KERNEL); - if (!table->bt_l0[bt_l0_idx]) { + if (!table->bt_l0[index->l0]) { ret = -ENOMEM; goto out; } - bt_l0_allocated = 1; - - /* set base address to hardware */ - if (table->type < HEM_TYPE_MTT) { - step_idx = 0; - if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { - ret = -ENODEV; - dev_err(dev, "set HEM base address to HW failed!\n"); - goto err_dma_alloc_l1; - } - } + index->inited |= HEM_INDEX_L0; } /* alloc L2 BA's chunk */ - if (check_whether_bt_num_3(table->type, hop_num) && - !table->bt_l1[bt_l1_idx]) { - table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size, - &(table->bt_l1_dma_addr[bt_l1_idx]), + if (check_whether_bt_num_3(table->type, mhop->hop_num) && + !table->bt_l1[index->l1]) { + table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size, + &table->bt_l1_dma_addr[index->l1], GFP_KERNEL); - if (!table->bt_l1[bt_l1_idx]) { + if (!table->bt_l1[index->l1]) { ret = -ENOMEM; - goto err_dma_alloc_l1; - } - bt_l1_allocated = 1; - *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = - table->bt_l1_dma_addr[bt_l1_idx]; - - /* set base address to hardware */ - step_idx = 1; - if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { - ret = -ENODEV; - dev_err(dev, "set HEM base address to HW failed!\n"); - goto err_alloc_hem_buf; + goto err_alloc_hem; } + index->inited |= HEM_INDEX_L1; + *(table->bt_l0[index->l0] + mhop->l1_idx) = + table->bt_l1_dma_addr[index->l1]; } /* * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC. * alloc bt space chunk for MTT/CQE. */ - size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size; - table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev, - size >> PAGE_SHIFT, - size, - (table->lowmem ? GFP_KERNEL : - GFP_HIGHUSER) | __GFP_NOWARN); - if (!table->hem[hem_idx]) { + size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size; + flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN; + table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT, + size, flag); + if (!table->hem[index->buf]) { ret = -ENOMEM; - goto err_alloc_hem_buf; + goto err_alloc_hem; } - hns_roce_hem_first(table->hem[hem_idx], &iter); + index->inited |= HEM_INDEX_BUF; + hns_roce_hem_first(table->hem[index->buf], &iter); bt_ba = hns_roce_hem_addr(&iter); - if (table->type < HEM_TYPE_MTT) { - if (hop_num == 2) { - *(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba; - step_idx = 2; - } else if (hop_num == 1) { - *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba; - step_idx = 1; - } else if (hop_num == HNS_ROCE_HOP_NUM_0) { - step_idx = 0; - } else { - ret = -EINVAL; - goto err_dma_alloc_l1; + if (mhop->hop_num == 2) + *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba; + else if (mhop->hop_num == 1) + *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba; + } else if (mhop->hop_num == 2) { + *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba; + } + + return 0; +err_alloc_hem: + free_mhop_hem(hr_dev, table, mhop, index); +out: + return ret; +} + +static int set_mhop_hem(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, unsigned long obj, + struct hns_roce_hem_mhop *mhop, + struct hns_roce_hem_index *index) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + int step_idx; + int ret; + + if (index->inited & HEM_INDEX_L0) { + ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0); + if (ret) { + ibdev_err(ibdev, "set HEM step 0 failed!\n"); + goto out; } + } - /* set HEM base address to hardware */ - if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { - ret = -ENODEV; - dev_err(dev, "set HEM base address to HW failed!\n"); - goto err_alloc_hem_buf; + if (index->inited & HEM_INDEX_L1) { + ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1); + if (ret) { + ibdev_err(ibdev, "set HEM step 1 failed!\n"); + goto out; } - } else if (hop_num == 2) { - *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba; } - ++table->hem[hem_idx]->refcount; - goto out; + if (index->inited & HEM_INDEX_BUF) { + if (mhop->hop_num == HNS_ROCE_HOP_NUM_0) + step_idx = 0; + else + step_idx = mhop->hop_num; + ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx); + if (ret) + ibdev_err(ibdev, "set HEM step last failed!\n"); + } +out: + return ret; +} -err_alloc_hem_buf: - if (bt_l1_allocated) { - dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx], - table->bt_l1_dma_addr[bt_l1_idx]); - table->bt_l1[bt_l1_idx] = NULL; +static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, + unsigned long obj) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_hem_index index = {}; + struct hns_roce_hem_mhop mhop = {}; + int ret; + + ret = calc_hem_config(hr_dev, table, obj, &mhop, &index); + if (ret) { + ibdev_err(ibdev, "calc hem config failed!\n"); + return ret; } -err_dma_alloc_l1: - if (bt_l0_allocated) { - dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx], - table->bt_l0_dma_addr[bt_l0_idx]); - table->bt_l0[bt_l0_idx] = NULL; + mutex_lock(&table->mutex); + if (table->hem[index.buf]) { + ++table->hem[index.buf]->refcount; + goto out; + } + + ret = alloc_mhop_hem(hr_dev, table, &mhop, &index); + if (ret) { + ibdev_err(ibdev, "alloc mhop hem failed!\n"); + goto out; + } + + /* set HEM base address to hardware */ + if (table->type < HEM_TYPE_MTT) { + ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index); + if (ret) { + ibdev_err(ibdev, "set HEM address to HW failed!\n"); + goto err_alloc; + } } + ++table->hem[index.buf]->refcount; + goto out; + +err_alloc: + free_mhop_hem(hr_dev, table, &mhop, &index); out: mutex_unlock(&table->mutex); return ret; -- cgit v1.2.3-58-ga151 From 38dcb35048fdc9b11530b23c092b250b2e8135a1 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 17 Mar 2020 11:55:24 +0800 Subject: RDMA/hns: Optimize mhop put flow for multi-hop addressing Optimizes hns_roce_table_mhop_get() by encapsulating code about clearing hem into clear_mhop_hem(), which will make the code flow clearer. Link: https://lore.kernel.org/r/1584417324-2255-3-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hem.c | 161 ++++++++++++------------------- 1 file changed, 61 insertions(+), 100 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index cc557f126c82..c96378718f88 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -94,25 +94,27 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) return hop_num ? true : false; } -static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx, - u32 bt_chunk_num, u64 hem_max_num) +static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx, + u32 bt_chunk_num, u64 hem_max_num) { + u64 start_idx = round_down(hem_idx, bt_chunk_num); u64 check_max_num = start_idx + bt_chunk_num; u64 i; for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++) - if (hem[i]) + if (i != hem_idx && hem[i]) return false; return true; } -static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num) +static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num) { + u64 start_idx = round_down(ba_idx, bt_chunk_num); int i; for (i = 0; i < bt_chunk_num; i++) - if (bt[start_idx + i]) + if (i != ba_idx && bt[start_idx + i]) return false; return true; @@ -723,116 +725,75 @@ out: return ret; } +static void clear_mhop_hem(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, unsigned long obj, + struct hns_roce_hem_mhop *mhop, + struct hns_roce_hem_index *index) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + u32 hop_num = mhop->hop_num; + u32 chunk_ba_num; + int step_idx; + + index->inited = HEM_INDEX_BUF; + chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; + if (check_whether_bt_num_2(table->type, hop_num)) { + if (hns_roce_check_hem_null(table->hem, index->buf, + chunk_ba_num, table->num_hem)) + index->inited |= HEM_INDEX_L0; + } else if (check_whether_bt_num_3(table->type, hop_num)) { + if (hns_roce_check_hem_null(table->hem, index->buf, + chunk_ba_num, table->num_hem)) { + index->inited |= HEM_INDEX_L1; + if (hns_roce_check_bt_null(table->bt_l1, index->l1, + chunk_ba_num)) + index->inited |= HEM_INDEX_L0; + } + } + + if (table->type < HEM_TYPE_MTT) { + if (hop_num == HNS_ROCE_HOP_NUM_0) + step_idx = 0; + else + step_idx = hop_num; + + if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx)) + ibdev_warn(ibdev, "Clear hop%d HEM failed.\n", hop_num); + + if (index->inited & HEM_INDEX_L1) + if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) + ibdev_warn(ibdev, "Clear HEM step 1 failed.\n"); + + if (index->inited & HEM_INDEX_L0) + if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) + ibdev_warn(ibdev, "Clear HEM step 0 failed.\n"); + } +} + static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, unsigned long obj, int check_refcount) { - struct device *dev = hr_dev->dev; - struct hns_roce_hem_mhop mhop; - unsigned long mhop_obj = obj; - u32 bt_chunk_size; - u32 chunk_ba_num; - u32 hop_num; - u32 start_idx; - u32 bt_num; - u64 hem_idx; - u64 bt_l1_idx = 0; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_hem_index index = {}; + struct hns_roce_hem_mhop mhop = {}; int ret; - ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); - if (ret) - return; - - bt_chunk_size = mhop.bt_chunk_size; - hop_num = mhop.hop_num; - chunk_ba_num = bt_chunk_size / BA_BYTE_LEN; - - bt_num = hns_roce_get_bt_num(table->type, hop_num); - switch (bt_num) { - case 3: - hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + - mhop.l1_idx * chunk_ba_num + mhop.l2_idx; - bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx; - break; - case 2: - hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx; - break; - case 1: - hem_idx = mhop.l0_idx; - break; - default: - dev_err(dev, "Table %d not support hop_num = %d!\n", - table->type, hop_num); + ret = calc_hem_config(hr_dev, table, obj, &mhop, &index); + if (ret) { + ibdev_err(ibdev, "calc hem config failed!\n"); return; } mutex_lock(&table->mutex); - - if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) { + if (check_refcount && (--table->hem[index.buf]->refcount > 0)) { mutex_unlock(&table->mutex); return; } - if (table->type < HEM_TYPE_MTT && hop_num == 1) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) - dev_warn(dev, "Clear HEM base address failed.\n"); - } else if (table->type < HEM_TYPE_MTT && hop_num == 2) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2)) - dev_warn(dev, "Clear HEM base address failed.\n"); - } else if (table->type < HEM_TYPE_MTT && - hop_num == HNS_ROCE_HOP_NUM_0) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) - dev_warn(dev, "Clear HEM base address failed.\n"); - } - - /* - * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC. - * free bt space chunk for MTT/CQE. - */ - hns_roce_free_hem(hr_dev, table->hem[hem_idx]); - table->hem[hem_idx] = NULL; - - if (check_whether_bt_num_2(table->type, hop_num)) { - start_idx = mhop.l0_idx * chunk_ba_num; - if (hns_roce_check_hem_null(table->hem, start_idx, - chunk_ba_num, table->num_hem)) { - if (table->type < HEM_TYPE_MTT && - hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) - dev_warn(dev, "Clear HEM base address failed.\n"); - - dma_free_coherent(dev, bt_chunk_size, - table->bt_l0[mhop.l0_idx], - table->bt_l0_dma_addr[mhop.l0_idx]); - table->bt_l0[mhop.l0_idx] = NULL; - } - } else if (check_whether_bt_num_3(table->type, hop_num)) { - start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + - mhop.l1_idx * chunk_ba_num; - if (hns_roce_check_hem_null(table->hem, start_idx, - chunk_ba_num, table->num_hem)) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) - dev_warn(dev, "Clear HEM base address failed.\n"); - - dma_free_coherent(dev, bt_chunk_size, - table->bt_l1[bt_l1_idx], - table->bt_l1_dma_addr[bt_l1_idx]); - table->bt_l1[bt_l1_idx] = NULL; - - start_idx = mhop.l0_idx * chunk_ba_num; - if (hns_roce_check_bt_null(table->bt_l1, start_idx, - chunk_ba_num)) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, - 0)) - dev_warn(dev, "Clear HEM base address failed.\n"); - - dma_free_coherent(dev, bt_chunk_size, - table->bt_l0[mhop.l0_idx], - table->bt_l0_dma_addr[mhop.l0_idx]); - table->bt_l0[mhop.l0_idx] = NULL; - } - } - } + clear_mhop_hem(hr_dev, table, obj, &mhop, &index); + free_mhop_hem(hr_dev, table, &mhop, &index); mutex_unlock(&table->mutex); } -- cgit v1.2.3-58-ga151 From 1f3db161881b7e21efb149e0ae8152b79a571a8f Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Wed, 18 Mar 2020 12:03:23 +0200 Subject: IB/mlx5: Generally use the WC auto detection test result Now that we have direct and reliable detection of WC support by the system, use is broadly. The only case we have to worry about is when the WC autodetector cannot run. For this fringe case generally assume that that WC is available, except in the well defined case of no PAT support on x86 which is tested by calling arch_can_pci_mmap_wc(). If WC is wrongly assumed to be available then it causes a small performance hit on paths in userspace that are tuned to the assumption that WC is available. There is no functional loss. It is very unlikely that any platforms exist that lack WC and also care about the micro optimization of WC in the fringe case where autodetection does not work. By removing the fairly bogus CONFIG tests this makes WC work broadly on all arches and all platforms. Link: https://lore.kernel.org/r/20200318100323.46659-1-leon@kernel.org Signed-off-by: Yishai Hadas Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 16 ++++------------ drivers/infiniband/hw/mlx5/mem.c | 2 +- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b7151f8f8fa5..1f91d9e543e2 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -39,9 +39,6 @@ #include #include #include -#if defined(CONFIG_X86) -#include -#endif #include #include #include @@ -2145,14 +2142,6 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, switch (cmd) { case MLX5_IB_MMAP_WC_PAGE: case MLX5_IB_MMAP_ALLOC_WC: -/* Some architectures don't support WC memory */ -#if defined(CONFIG_X86) - if (!pat_enabled()) - return -EPERM; -#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) - return -EPERM; -#endif - /* fall through */ case MLX5_IB_MMAP_REGULAR_PAGE: /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ prot = pgprot_writecombine(vma->vm_page_prot); @@ -2298,9 +2287,12 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm command = get_command(vma->vm_pgoff); switch (command) { case MLX5_IB_MMAP_WC_PAGE: + case MLX5_IB_MMAP_ALLOC_WC: + if (!dev->wc_support) + return -EPERM; + fallthrough; case MLX5_IB_MMAP_NC_PAGE: case MLX5_IB_MMAP_REGULAR_PAGE: - case MLX5_IB_MMAP_ALLOC_WC: return uar_mmap(dev, command, vma, context); case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index b90a3649e7d1..c19ec9fd8a63 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -316,7 +316,7 @@ int mlx5_ib_test_wc(struct mlx5_ib_dev *dev) if (!dev->mdev->roce.roce_en && port_type_cap == MLX5_CAP_PORT_TYPE_ETH) { if (mlx5_core_is_pf(dev->mdev)) - dev->wc_support = true; + dev->wc_support = arch_can_pci_mmap_wc(); return 0; } -- cgit v1.2.3-58-ga151 From 987914ab841e2ec281a35b54348ab109b4c0bb4e Mon Sep 17 00:00:00 2001 From: Avihai Horon Date: Wed, 18 Mar 2020 12:17:41 +0200 Subject: RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow After a successful allocation of path_rec, num_paths is set to 1, but any error after such allocation will leave num_paths uncleared. This causes to de-referencing a NULL pointer later on. Hence, num_paths needs to be set back to 0 if such an error occurs. The following crash from syzkaller revealed it. kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI CPU: 0 PID: 357 Comm: syz-executor060 Not tainted 4.18.0+ #311 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.0-0-g63451fca13-prebuilt.qemu-project.org 04/01/2014 RIP: 0010:ib_copy_path_rec_to_user+0x94/0x3e0 Code: f1 f1 f1 f1 c7 40 0c 00 00 f4 f4 65 48 8b 04 25 28 00 00 00 48 89 45 c8 31 c0 e8 d7 60 24 ff 48 8d 7b 4c 48 89 f8 48 c1 e8 03 <42> 0f b6 14 30 48 89 f8 83 e0 07 83 c0 03 38 d0 7c 08 84 d2 0f 85 RSP: 0018:ffff88006586f980 EFLAGS: 00010207 RAX: 0000000000000009 RBX: 0000000000000000 RCX: 1ffff1000d5fe475 RDX: ffff8800621e17c0 RSI: ffffffff820d45f9 RDI: 000000000000004c RBP: ffff88006586fa50 R08: ffffed000cb0df73 R09: ffffed000cb0df72 R10: ffff88006586fa70 R11: ffffed000cb0df73 R12: 1ffff1000cb0df30 R13: ffff88006586fae8 R14: dffffc0000000000 R15: ffff88006aff2200 FS: 00000000016fc880(0000) GS:ffff88006d000000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020000040 CR3: 0000000063fec000 CR4: 00000000000006b0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ? ib_copy_path_rec_from_user+0xcc0/0xcc0 ? __mutex_unlock_slowpath+0xfc/0x670 ? wait_for_completion+0x3b0/0x3b0 ? ucma_query_route+0x818/0xc60 ucma_query_route+0x818/0xc60 ? ucma_listen+0x1b0/0x1b0 ? sched_clock_cpu+0x18/0x1d0 ? sched_clock_cpu+0x18/0x1d0 ? ucma_listen+0x1b0/0x1b0 ? ucma_write+0x292/0x460 ucma_write+0x292/0x460 ? ucma_close_id+0x60/0x60 ? sched_clock_cpu+0x18/0x1d0 ? sched_clock_cpu+0x18/0x1d0 __vfs_write+0xf7/0x620 ? ucma_close_id+0x60/0x60 ? kernel_read+0x110/0x110 ? time_hardirqs_on+0x19/0x580 ? lock_acquire+0x18b/0x3a0 ? finish_task_switch+0xf3/0x5d0 ? _raw_spin_unlock_irq+0x29/0x40 ? _raw_spin_unlock_irq+0x29/0x40 ? finish_task_switch+0x1be/0x5d0 ? __switch_to_asm+0x34/0x70 ? __switch_to_asm+0x40/0x70 ? security_file_permission+0x172/0x1e0 vfs_write+0x192/0x460 ksys_write+0xc6/0x1a0 ? __ia32_sys_read+0xb0/0xb0 ? entry_SYSCALL_64_after_hwframe+0x3e/0xbe ? do_syscall_64+0x1d/0x470 do_syscall_64+0x9e/0x470 entry_SYSCALL_64_after_hwframe+0x49/0xbe Fixes: 3c86aa70bf67 ("RDMA/cm: Add RDMA CM support for IBoE devices") Link: https://lore.kernel.org/r/20200318101741.47211-1-leon@kernel.org Signed-off-by: Avihai Horon Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 4df75ab4ee9d..26e6f7df247b 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2978,6 +2978,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) err2: kfree(route->path_rec); route->path_rec = NULL; + route->num_paths = 0; err1: kfree(work); return ret; -- cgit v1.2.3-58-ga151 From 23ab5261e29b6b95803ee8dc919ae76e260b358d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 19 Mar 2020 16:46:41 +0100 Subject: IB/hfi1: Use scnprintf() for avoiding potential buffer overflow Since snprintf() returns the would-be-output size instead of the actual output size, the succeeding calls may go beyond the given buffer limit. Fix it by replacing with scnprintf(). Link: https://lore.kernel.org/r/20200319154641.23711-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/fault.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index 986c12153e62..0dfbcfb048ca 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -222,11 +222,11 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, while (bit < bitsize) { zero = find_next_zero_bit(fault->opcodes, bitsize, bit); if (zero - 1 != bit) - size += snprintf(data + size, + size += scnprintf(data + size, datalen - size - 1, "0x%lx-0x%lx,", bit, zero - 1); else - size += snprintf(data + size, + size += scnprintf(data + size, datalen - size - 1, "0x%lx,", bit); bit = find_next_bit(fault->opcodes, bitsize, zero); -- cgit v1.2.3-58-ga151 From d0ca2c35dd15a3d989955caec02beea02f735ee6 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Mon, 23 Mar 2020 13:28:00 +0200 Subject: RDMA/rxe: Set sys_image_guid to be aligned with HW IB devices The RXE driver doesn't set sys_image_guid and user space applications see zeros. This causes to pyverbs tests to fail with the following traceback, because the IBTA spec requires to have valid sys_image_guid. Traceback (most recent call last): File "./tests/test_device.py", line 51, in test_query_device self.verify_device_attr(attr) File "./tests/test_device.py", line 74, in verify_device_attr assert attr.sys_image_guid != 0 In order to fix it, set sys_image_guid to be equal to node_guid. Before: 5: rxe0: ... node_guid 5054:00ff:feaa:5363 sys_image_guid 0000:0000:0000:0000 After: 5: rxe0: ... node_guid 5054:00ff:feaa:5363 sys_image_guid 5054:00ff:feaa:5363 Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20200323112800.1444784-1-leon@kernel.org Signed-off-by: Zhu Yanjun Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 0946a301a5c5..4afdd2e20883 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -103,6 +103,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe) rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN; rxe->attr.max_pkeys = RXE_MAX_PKEYS; rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY; + addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid, + rxe->ndev->dev_addr); rxe->max_ucontext = RXE_MAX_UCONTEXT; } -- cgit v1.2.3-58-ga151 From 26e28deb813eed908cf31a6052870b6493ec0e86 Mon Sep 17 00:00:00 2001 From: Sergey Gorenko Date: Wed, 25 Mar 2020 15:12:10 +0000 Subject: IB/iser: Always check sig MR before putting it to the free pool libiscsi calls the check_protection transport handler only if SCSI-Respose is received. So, the handler is never called if iSCSI task is completed for some other reason like a timeout or error handling. And this behavior looks correct. But the iSER does not handle this case properly because it puts a non-checked signature MR to the free pool. Then the error occurs at reusing the MR because it is not allowed to invalidate a signature MR without checking. This commit adds an extra check to iser_unreg_mem_fastreg(), which is a part of the task cleanup flow. Now the signature MR is checked there if it is needed. Link: https://lore.kernel.org/r/20200325151210.1548-1-sergeygo@mellanox.com Signed-off-by: Sergey Gorenko Reviewed-by: Max Gurtovoy Signed-off-by: Jason Gunthorpe --- drivers/infiniband/ulp/iser/iser_memory.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 7a8f24de3631..999ef7cdd05e 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -292,12 +292,27 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, { struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + struct iser_fr_desc *desc; + struct ib_mr_status mr_status; - if (!reg->mem_h) + desc = reg->mem_h; + if (!desc) return; - device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, - reg->mem_h); + /* + * The signature MR cannot be invalidated and reused without checking. + * libiscsi calls the check_protection transport handler only if + * SCSI-Response is received. And the signature MR is not checked if + * the task is completed for some other reason like a timeout or error + * handling. That's why we must check the signature MR here before + * putting it to the free pool. + */ + if (unlikely(desc->sig_protected)) { + desc->sig_protected = false; + ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS, + &mr_status); + } + device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc); reg->mem_h = NULL; } -- cgit v1.2.3-58-ga151 From ae1c61489c7fa02f32cc45464dfb5612fa812b32 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Fri, 20 Mar 2020 11:23:33 +0800 Subject: RDMA/hns: Unify format of prints Use ibdev_err/dbg/warn() instead of dev_err/dbg/warn(), and modify some prints into format of "failed to do something, ret = n". Link: https://lore.kernel.org/r/1584674622-52773-2-git-send-email-liweihang@huawei.com Signed-off-by: Lijun Ou Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 156 ++++++++++++++++------------- drivers/infiniband/hw/hns/hns_roce_pd.c | 6 +- 2 files changed, 89 insertions(+), 73 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 9bd8fbf2e96b..94cb2984ad6d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -266,21 +266,24 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, static int check_send_valid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { + struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_qp *ibqp = &hr_qp->ibqp; - struct device *dev = hr_dev->dev; if (unlikely(ibqp->qp_type != IB_QPT_RC && ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_UD)) { - dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); + ibdev_err(ibdev, "Not supported QP(0x%x)type!\n", + ibqp->qp_type); return -EOPNOTSUPP; } else if (unlikely(hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_INIT || hr_qp->state == IB_QPS_RTR)) { - dev_err(dev, "Post WQE fail, QP state %d!\n", hr_qp->state); + ibdev_err(ibdev, "failed to post WQE, QP state %d!\n", + hr_qp->state); return -EINVAL; } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { - dev_err(dev, "Post WQE fail, dev state %d!\n", hr_dev->state); + ibdev_err(ibdev, "failed to post WQE, dev state %d!\n", + hr_dev->state); return -EIO; } @@ -625,9 +628,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_rinl_sge *sge_list; - struct device *dev = hr_dev->dev; unsigned long flags; void *wqe = NULL; u32 wqe_idx; @@ -655,8 +658,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { - dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, hr_qp->rq.max_gs); + ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n", + wr->num_sge, hr_qp->rq.max_gs); ret = -EINVAL; *bad_wr = wr; goto out; @@ -2440,7 +2443,9 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type); if (ret) - dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret); + ibdev_err(&hr_dev->ib_dev, + "failed to configure sgid table, ret = %d!\n", + ret); return ret; } @@ -3022,8 +3027,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) { hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); if (unlikely(!hr_qp)) { - dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n", - hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK)); + ibdev_err(&hr_dev->ib_dev, + "CQ %06lx with entry for unknown QPN %06x\n", + hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK); return -EINVAL; } *cur_qp = hr_qp; @@ -3125,8 +3131,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, */ if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) { - dev_err(hr_dev->dev, "error cqe status is: 0x%x\n", - status & HNS_ROCE_V2_CQE_STATUS_MASK); + ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n", + status & HNS_ROCE_V2_CQE_STATUS_MASK); if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) init_flush_work(hr_dev, hr_qp); @@ -3974,21 +3980,22 @@ static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, int mtt_cnt, u32 page_size) { - struct device *dev = hr_dev->dev; + struct ib_device *ibdev = &hr_dev->ib_dev; if (hr_qp->rq.wqe_cnt < 1) return true; if (mtt_cnt < 1) { - dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n", - hr_qp->qpn); + ibdev_err(ibdev, "failed to find RQWQE buf ba of QP(0x%lx)\n", + hr_qp->qpn); return false; } if (mtt_cnt < MTT_MIN_COUNT && (hr_qp->rq.offset + page_size) < hr_qp->buff_size) { - dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n", - hr_qp->qpn); + ibdev_err(ibdev, + "failed to find next RQWQE buf ba of QP(0x%lx)\n", + hr_qp->qpn); return false; } @@ -4003,7 +4010,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = hr_dev->dev; + struct ib_device *ibdev = &hr_dev->ib_dev; u64 mtts[MTT_MIN_COUNT] = { 0 }; dma_addr_t dma_handle_3; dma_addr_t dma_handle_2; @@ -4030,7 +4037,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, hr_qp->qpn, &dma_handle_2); if (!mtts_2) { - dev_err(dev, "qp irrl_table find failed\n"); + ibdev_err(ibdev, "failed to find QP irrl_table\n"); return -EINVAL; } @@ -4038,12 +4045,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, hr_qp->qpn, &dma_handle_3); if (!mtts_3) { - dev_err(dev, "qp trrl_table find failed\n"); + ibdev_err(ibdev, "failed to find QP trrl_table\n"); return -EINVAL; } if (attr_mask & IB_QP_ALT_PATH) { - dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask); + ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error\n", + attr_mask); return -EINVAL; } @@ -4246,7 +4254,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = hr_dev->dev; + struct ib_device *ibdev = &hr_dev->ib_dev; u64 sge_cur_blk = 0; u64 sq_cur_blk = 0; u32 page_size; @@ -4255,7 +4263,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, /* Search qp buf's mtts */ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL); if (count < 1) { - dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn); + ibdev_err(ibdev, "failed to find buf pa of QP(0x%lx)\n", + hr_qp->qpn); return -EINVAL; } @@ -4265,8 +4274,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, hr_qp->sge.offset / page_size, &sge_cur_blk, 1, NULL); if (count < 1) { - dev_err(dev, "qp(0x%lx) sge pa find failed\n", - hr_qp->qpn); + ibdev_err(ibdev, "failed to find sge pa of QP(0x%lx)\n", + hr_qp->qpn); return -EINVAL; } } @@ -4274,7 +4283,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, /* Not support alternate path and path migration */ if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_PATH_MIG_STATE)) { - dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); + ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); return -EINVAL; } @@ -4392,6 +4401,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct ib_device *ibdev = &hr_dev->ib_dev; const struct ib_gid_attr *gid_attr = NULL; int is_roce_protocol; u16 vlan_id = 0xffff; @@ -4433,13 +4443,13 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, V2_QPC_BYTE_24_VLAN_ID_S, 0); if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { - dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n", - grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); + ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n", + grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); return -EINVAL; } if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { - dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n"); + ibdev_err(ibdev, "ah attr is not RDMA roce type\n"); return -EINVAL; } @@ -4517,7 +4527,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, /* Nothing */ ; } else { - dev_err(hr_dev->dev, "Illegal state for QP!\n"); + ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n"); ret = -EINVAL; goto out; } @@ -4552,8 +4562,8 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, 0); } else { - dev_warn(hr_dev->dev, - "Local ACK timeout shall be 0 to 30.\n"); + ibdev_warn(&hr_dev->ib_dev, + "Local ACK timeout shall be 0 to 30.\n"); } } @@ -4721,7 +4731,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, struct hns_roce_v2_qp_context ctx[2]; struct hns_roce_v2_qp_context *context = ctx; struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; - struct device *dev = hr_dev->dev; + struct ib_device *ibdev = &hr_dev->ib_dev; unsigned long sq_flag = 0; unsigned long rq_flag = 0; int ret; @@ -4785,7 +4795,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, /* SW pass context to HW */ ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp); if (ret) { - dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); + ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret); goto out; } @@ -4842,10 +4852,8 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, HNS_ROCE_CMD_QUERY_QPC, HNS_ROCE_CMD_TIMEOUT_MSECS); - if (ret) { - dev_err(hr_dev->dev, "QUERY QP cmd process error\n"); + if (ret) goto out; - } memcpy(hr_context, mailbox->buf, sizeof(*hr_context)); @@ -4861,7 +4869,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_v2_qp_context context = {}; - struct device *dev = hr_dev->dev; + struct ib_device *ibdev = &hr_dev->ib_dev; int tmp_qp_state; int state; int ret; @@ -4879,7 +4887,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); if (ret) { - dev_err(dev, "query qpc error\n"); + ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret); ret = -EINVAL; goto out; } @@ -4888,7 +4896,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); if (tmp_qp_state == -1) { - dev_err(dev, "Illegal ib_qp_state\n"); + ibdev_err(ibdev, "Illegal ib_qp_state\n"); ret = -EINVAL; goto out; } @@ -4986,8 +4994,8 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata) { - struct hns_roce_cq *send_cq, *recv_cq; struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_cq *send_cq, *recv_cq; unsigned long flags; int ret = 0; @@ -4996,7 +5004,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); if (ret) - ibdev_err(ibdev, "modify QP to Reset failed.\n"); + ibdev_err(ibdev, + "failed to modify QP to RST, ret = %d\n", + ret); } send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; @@ -5033,7 +5043,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); if (ret) - ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", + ibdev_err(&hr_dev->ib_dev, + "failed to destroy QP 0x%06lx, ret = %d\n", hr_qp->qpn, ret); hns_roce_qp_destroy(hr_dev, hr_qp, udata); @@ -5042,8 +5053,9 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) } static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp) + struct hns_roce_qp *hr_qp) { + struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_sccc_clr_done *resp; struct hns_roce_sccc_clr *clr; struct hns_roce_cmq_desc desc; @@ -5055,7 +5067,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { - dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret); + ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret); goto out; } @@ -5065,7 +5077,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, clr->qpn = cpu_to_le32(hr_qp->qpn); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { - dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret); + ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret); goto out; } @@ -5076,7 +5088,8 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, HNS_ROCE_OPC_QUERY_SCCC, true); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) { - dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret); + ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n", + ret); goto out; } @@ -5086,7 +5099,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, msleep(20); } - dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n"); + ibdev_err(ibdev, "Query SCC clr done flag overtime.\n"); ret = -ETIMEDOUT; out: @@ -5130,7 +5143,9 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) HNS_ROCE_CMD_TIMEOUT_MSECS); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) - dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n"); + ibdev_err(&hr_dev->ib_dev, + "failed to process cmd when modifying CQ, ret = %d\n", + ret); return ret; } @@ -5139,54 +5154,54 @@ static void hns_roce_irq_work_handle(struct work_struct *work) { struct hns_roce_work *irq_work = container_of(work, struct hns_roce_work, work); - struct device *dev = irq_work->hr_dev->dev; + struct ib_device *ibdev = &irq_work->hr_dev->ib_dev; u32 qpn = irq_work->qpn; u32 cqn = irq_work->cqn; switch (irq_work->event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: - dev_info(dev, "Path migrated succeeded.\n"); + ibdev_info(ibdev, "Path migrated succeeded.\n"); break; case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - dev_warn(dev, "Path migration failed.\n"); + ibdev_warn(ibdev, "Path migration failed.\n"); break; case HNS_ROCE_EVENT_TYPE_COMM_EST: break; case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: - dev_warn(dev, "Send queue drained.\n"); + ibdev_warn(ibdev, "Send queue drained.\n"); break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n", - qpn, irq_work->sub_type); + ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n", + qpn, irq_work->sub_type); break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_err(dev, "Invalid request local work queue 0x%x error.\n", - qpn); + ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n", + qpn); break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n", - qpn, irq_work->sub_type); + ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n", + qpn, irq_work->sub_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - dev_warn(dev, "SRQ limit reach.\n"); + ibdev_warn(ibdev, "SRQ limit reach.\n"); break; case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: - dev_warn(dev, "SRQ last wqe reach.\n"); + ibdev_warn(ibdev, "SRQ last wqe reach.\n"); break; case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - dev_err(dev, "SRQ catas error.\n"); + ibdev_err(ibdev, "SRQ catas error.\n"); break; case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_err(dev, "CQ 0x%x access err.\n", cqn); + ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn); break; case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%x overflow\n", cqn); + ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn); break; case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - dev_warn(dev, "DB overflow.\n"); + ibdev_warn(ibdev, "DB overflow.\n"); break; case HNS_ROCE_EVENT_TYPE_FLR: - dev_warn(dev, "Function level reset.\n"); + ibdev_warn(ibdev, "Function level reset.\n"); break; default: break; @@ -6119,8 +6134,9 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, HNS_ROCE_CMD_TIMEOUT_MSECS); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) { - dev_err(hr_dev->dev, - "MODIFY SRQ Failed to cmd mailbox.\n"); + ibdev_err(&hr_dev->ib_dev, + "failed to process cmd when modifying SRQ, ret = %d\n", + ret); return ret; } } @@ -6146,7 +6162,9 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) HNS_ROCE_CMD_QUERY_SRQC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) { - dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n"); + ibdev_err(&hr_dev->ib_dev, + "failed to process cmd when querying SRQ, ret = %d\n", + ret); goto out; } diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 780c780fdb22..b10c50b8736e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -60,14 +60,12 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ib_dev = ibpd->device; - struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); - struct device *dev = hr_dev->dev; struct hns_roce_pd *pd = to_hr_pd(ibpd); int ret; ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); if (ret) { - dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n"); + ibdev_err(ib_dev, "failed to alloc pd, ret = %d\n", ret); return ret; } @@ -76,7 +74,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); - dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); + ibdev_err(ib_dev, "failed to copy to udata\n"); return -EFAULT; } } -- cgit v1.2.3-58-ga151 From 30d41e18c3a6eac39b7348ae7e3388f0ec7e680e Mon Sep 17 00:00:00 2001 From: Weihang Li Date: Fri, 20 Mar 2020 11:23:34 +0800 Subject: RDMA/hns: Fix a wrong judgment of return value hns_roce_alloc_mtt_range() never return -1, ret should be checked whether it is zero instead of -1. Fixes: 1ceb0b11a8a2 ("RDMA/hns: Fix non-standard error codes") Link: https://lore.kernel.org/r/1584674622-52773-3-git-send-email-liweihang@huawei.com Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_mr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index b9898e71655a..176f34692f88 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -243,7 +243,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, /* Allocate MTT entry */ ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg, mtt->mtt_type); - if (ret == -1) + if (ret) return -ENOMEM; return 0; -- cgit v1.2.3-58-ga151 From d398d4ca5f17e7ba762bc897c82afca868e47a7c Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Fri, 20 Mar 2020 11:23:35 +0800 Subject: RDMA/hns: Simplify attribute judgment code Combine attribute flags before masking them. Link: https://lore.kernel.org/r/1584674622-52773-4-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 94cb2984ad6d..518a6491c03f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4281,8 +4281,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, } /* Not support alternate path and path migration */ - if ((attr_mask & IB_QP_ALT_PATH) || - (attr_mask & IB_QP_PATH_MIG_STATE)) { + if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) { ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); return -EINVAL; } -- cgit v1.2.3-58-ga151 From 99e713f8daf8e0ddb728ba543a05a2b67d8c47cc Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Fri, 20 Mar 2020 11:23:36 +0800 Subject: RDMA/hns: Optimize hns_roce_alloc_vf_resource() The capbilities of hardware should be got at first and then used in hns_roce_alloc_vf_resource(). Also removes an unnecessary if ... else condition in it. Link: https://lore.kernel.org/r/1584674622-52773-5-git-send-email-liweihang@huawei.com Signed-off-by: Lijun Ou Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 143 +++++++++++++---------------- 1 file changed, 62 insertions(+), 81 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 518a6491c03f..aff7c5da7080 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1432,82 +1432,63 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); else desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - - if (i == 0) { - roce_set_field(req_a->vf_qpc_bt_idx_num, - VF_RES_A_DATA_1_VF_QPC_BT_IDX_M, - VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0); - roce_set_field(req_a->vf_qpc_bt_idx_num, - VF_RES_A_DATA_1_VF_QPC_BT_NUM_M, - VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, - HNS_ROCE_VF_QPC_BT_NUM); - - roce_set_field(req_a->vf_srqc_bt_idx_num, - VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M, - VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0); - roce_set_field(req_a->vf_srqc_bt_idx_num, - VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M, - VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S, - HNS_ROCE_VF_SRQC_BT_NUM); - - roce_set_field(req_a->vf_cqc_bt_idx_num, - VF_RES_A_DATA_3_VF_CQC_BT_IDX_M, - VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0); - roce_set_field(req_a->vf_cqc_bt_idx_num, - VF_RES_A_DATA_3_VF_CQC_BT_NUM_M, - VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, - HNS_ROCE_VF_CQC_BT_NUM); - - roce_set_field(req_a->vf_mpt_bt_idx_num, - VF_RES_A_DATA_4_VF_MPT_BT_IDX_M, - VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0); - roce_set_field(req_a->vf_mpt_bt_idx_num, - VF_RES_A_DATA_4_VF_MPT_BT_NUM_M, - VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, - HNS_ROCE_VF_MPT_BT_NUM); - - roce_set_field(req_a->vf_eqc_bt_idx_num, - VF_RES_A_DATA_5_VF_EQC_IDX_M, - VF_RES_A_DATA_5_VF_EQC_IDX_S, 0); - roce_set_field(req_a->vf_eqc_bt_idx_num, - VF_RES_A_DATA_5_VF_EQC_NUM_M, - VF_RES_A_DATA_5_VF_EQC_NUM_S, - HNS_ROCE_VF_EQC_NUM); - } else { - roce_set_field(req_b->vf_smac_idx_num, - VF_RES_B_DATA_1_VF_SMAC_IDX_M, - VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0); - roce_set_field(req_b->vf_smac_idx_num, - VF_RES_B_DATA_1_VF_SMAC_NUM_M, - VF_RES_B_DATA_1_VF_SMAC_NUM_S, - HNS_ROCE_VF_SMAC_NUM); - - roce_set_field(req_b->vf_sgid_idx_num, - VF_RES_B_DATA_2_VF_SGID_IDX_M, - VF_RES_B_DATA_2_VF_SGID_IDX_S, 0); - roce_set_field(req_b->vf_sgid_idx_num, - VF_RES_B_DATA_2_VF_SGID_NUM_M, - VF_RES_B_DATA_2_VF_SGID_NUM_S, - HNS_ROCE_VF_SGID_NUM); - - roce_set_field(req_b->vf_qid_idx_sl_num, - VF_RES_B_DATA_3_VF_QID_IDX_M, - VF_RES_B_DATA_3_VF_QID_IDX_S, 0); - roce_set_field(req_b->vf_qid_idx_sl_num, - VF_RES_B_DATA_3_VF_SL_NUM_M, - VF_RES_B_DATA_3_VF_SL_NUM_S, - HNS_ROCE_VF_SL_NUM); - - roce_set_field(req_b->vf_sccc_idx_num, - VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M, - VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0); - roce_set_field(req_b->vf_sccc_idx_num, - VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M, - VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S, - HNS_ROCE_VF_SCCC_BT_NUM); - } } + roce_set_field(req_a->vf_qpc_bt_idx_num, + VF_RES_A_DATA_1_VF_QPC_BT_IDX_M, + VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0); + roce_set_field(req_a->vf_qpc_bt_idx_num, + VF_RES_A_DATA_1_VF_QPC_BT_NUM_M, + VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM); + + roce_set_field(req_a->vf_srqc_bt_idx_num, + VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M, + VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0); + roce_set_field(req_a->vf_srqc_bt_idx_num, + VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M, + VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S, + HNS_ROCE_VF_SRQC_BT_NUM); + + roce_set_field(req_a->vf_cqc_bt_idx_num, + VF_RES_A_DATA_3_VF_CQC_BT_IDX_M, + VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0); + roce_set_field(req_a->vf_cqc_bt_idx_num, + VF_RES_A_DATA_3_VF_CQC_BT_NUM_M, + VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM); + + roce_set_field(req_a->vf_mpt_bt_idx_num, + VF_RES_A_DATA_4_VF_MPT_BT_IDX_M, + VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0); + roce_set_field(req_a->vf_mpt_bt_idx_num, + VF_RES_A_DATA_4_VF_MPT_BT_NUM_M, + VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM); + + roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M, + VF_RES_A_DATA_5_VF_EQC_IDX_S, 0); + roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M, + VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM); + + roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M, + VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0); + roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M, + VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM); + + roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M, + VF_RES_B_DATA_2_VF_SGID_IDX_S, 0); + roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M, + VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM); + + roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M, + VF_RES_B_DATA_3_VF_QID_IDX_S, 0); + roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M, + VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM); + + roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M, + VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0); + roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M, + VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S, + HNS_ROCE_VF_SCCC_BT_NUM); + return hns_roce_cmq_send(hr_dev, desc, 2); } @@ -2001,13 +1982,6 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) } } - ret = hns_roce_alloc_vf_resource(hr_dev); - if (ret) { - dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n", - ret); - return ret; - } - hr_dev->vendor_part_id = hr_dev->pci_dev->device; hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); @@ -2028,6 +2002,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) if (ret) set_default_caps(hr_dev); + ret = hns_roce_alloc_vf_resource(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n", + ret); + return ret; + } + ret = hns_roce_v2_set_bt(hr_dev); if (ret) dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n", -- cgit v1.2.3-58-ga151 From fd72926c332eaa28845b1f655b24006158ec5207 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Fri, 20 Mar 2020 11:23:37 +0800 Subject: RDMA/hns: Adjust the qp status value sequence of the hardware Interchange SQD and SQE to match the protocol. Link: https://lore.kernel.org/r/1584674622-52773-6-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 2a117ff6a6be..83e94df7f190 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -460,8 +460,8 @@ enum hns_roce_v2_qp_state { HNS_ROCE_QP_ST_INIT, HNS_ROCE_QP_ST_RTR, HNS_ROCE_QP_ST_RTS, - HNS_ROCE_QP_ST_SQER, HNS_ROCE_QP_ST_SQD, + HNS_ROCE_QP_ST_SQER, HNS_ROCE_QP_ST_ERR, HNS_ROCE_QP_ST_SQ_DRAINING, HNS_ROCE_QP_NUM_ST -- cgit v1.2.3-58-ga151 From f91b9196875262f256cc018bb55585bd1e473fd8 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Fri, 20 Mar 2020 11:23:38 +0800 Subject: RDMA/hns: Remove definition of cq doorbell structure The struct hns_roce_v2_cq_db is unused, it should be removed. Link: https://lore.kernel.org/r/1584674622-52773-7-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 83e94df7f190..7c999536a4c5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -1056,11 +1056,6 @@ struct hns_roce_v2_mpt_entry { #define V2_DB_PARAMETER_SL_S 16 #define V2_DB_PARAMETER_SL_M GENMASK(18, 16) -struct hns_roce_v2_cq_db { - __le32 byte_4; - __le32 parameter; -}; - #define V2_CQ_DB_BYTE_4_TAG_S 0 #define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0) -- cgit v1.2.3-58-ga151 From bceda6e67b8f211419876c64ce2125f683927e64 Mon Sep 17 00:00:00 2001 From: Wenpeng Liang Date: Fri, 20 Mar 2020 11:23:39 +0800 Subject: RDMA/hns: Remove meaningless prints ceq and aeq is a ring buffer, consumer index of them will be set to zero after reaching the maximum value. The warning should be removed or it may mislead the users. Link: https://lore.kernel.org/r/1584674622-52773-8-git-send-email-liweihang@huawei.com Signed-off-by: Wenpeng Liang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 9 ++------- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 5 +---- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 2e5304502a08..5ff028d77be3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -3935,10 +3935,8 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, eq->cons_index++; aeqes_found = 1; - if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) { - dev_warn(dev, "cons_index overflow, set back to 0.\n"); + if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) eq->cons_index = 0; - } } set_eq_cons_index_v1(eq, 0); @@ -3988,11 +3986,8 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, ceqes_found = 1; if (eq->cons_index > - EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) { - dev_warn(&eq->hr_dev->pdev->dev, - "cons_index overflow, set back to 0.\n"); + EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) eq->cons_index = 0; - } } set_eq_cons_index_v1(eq, 0); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index aff7c5da7080..bd14e71f4566 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5361,7 +5361,6 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { - struct device *dev = hr_dev->dev; struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); int ceqe_found = 0; u32 cqn; @@ -5380,10 +5379,8 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, ++eq->cons_index; ceqe_found = 1; - if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) { - dev_warn(dev, "cons_index overflow, set back to 0.\n"); + if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) eq->cons_index = 0; - } ceqe = next_ceqe_sw_v2(eq); } -- cgit v1.2.3-58-ga151 From f4c5d869c8270703c3f3267242e4cc968c723370 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Fri, 20 Mar 2020 11:23:40 +0800 Subject: RDMA/hns: Remove redundant qpc setup operations Before calling modify_qp_reset_to_init(), the entire qpc mask has been cleared, so it is no longer necessary to clear the specific fields in the mask. Link: https://lore.kernel.org/r/1584674622-52773-9-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 237 +---------------------------- 1 file changed, 1 insertion(+), 236 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index bd14e71f4566..2b03c722c2d9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3541,14 +3541,9 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ? ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); - roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, 0); - roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, @@ -3556,9 +3551,6 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || hr_qp->ibqp.srq) ? 0 : ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); } static void modify_qp_reset_to_init(struct ib_qp *ibqp, @@ -3578,280 +3570,53 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, */ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type)); - roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, - V2_QPC_BYTE_4_TST_S, 0); roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); - roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, - V2_QPC_BYTE_4_SQPN_S, 0); roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); - roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, - V2_QPC_BYTE_16_PD_S, 0); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, - V2_QPC_BYTE_20_RQWS_S, 0); set_qpc_wqe_cnt(hr_qp, context, qpc_mask); /* No VLAN need to set 0xFFF */ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, V2_QPC_BYTE_24_VLAN_ID_S, 0xfff); - roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, - V2_QPC_BYTE_24_VLAN_ID_S, 0); - /* - * Set some fields in context to zero, Because the default values - * of all fields in context are zero, we need not set them to 0 again. - * but we should set the relevant fields of context mask to 0. - */ - roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0); - roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0); - roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0); - roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0); - - roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M, - V2_QPC_BYTE_60_TEMPID_S, 0); - - roce_set_field(qpc_mask->byte_60_qpst_tempid, - V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_tempid, - V2_QPC_BYTE_60_SQ_DB_DOING_S, 0); - roce_set_bit(qpc_mask->byte_60_qpst_tempid, - V2_QPC_BYTE_60_RQ_DB_DOING_S, 0); - roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0); - roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0); - - if (hr_qp->rdb_en) { + if (hr_qp->rdb_en) roce_set_bit(context->byte_68_rq_db, V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1); - roce_set_bit(qpc_mask->byte_68_rq_db, - V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0); - } roce_set_field(context->byte_68_rq_db, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, ((u32)hr_qp->rdb.dma) >> 1); - roce_set_field(qpc_mask->byte_68_rq_db, - V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, - V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0); context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32); - qpc_mask->rq_db_record_addr = 0; roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0); - roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn); - roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, - V2_QPC_BYTE_80_RX_CQN_S, 0); if (ibqp->srq) { roce_set_field(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, to_hr_srq(ibqp->srq)->srqn); - roce_set_field(qpc_mask->byte_76_srqn_op_en, - V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0); roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_SRQ_EN_S, 1); - roce_set_bit(qpc_mask->byte_76_srqn_op_en, - V2_QPC_BYTE_76_SRQ_EN_S, 0); } - roce_set_field(qpc_mask->byte_84_rq_ci_pi, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); - roce_set_field(qpc_mask->byte_84_rq_ci_pi, - V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M, - V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0); - - roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M, - V2_QPC_BYTE_92_SRQ_INFO_S, 0); - - roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M, - V2_QPC_BYTE_96_RX_REQ_MSN_S, 0); - - roce_set_field(qpc_mask->byte_104_rq_sge, - V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M, - V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0); - - roce_set_bit(qpc_mask->byte_108_rx_reqepsn, - V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0); - roce_set_field(qpc_mask->byte_108_rx_reqepsn, - V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M, - V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0); - roce_set_bit(qpc_mask->byte_108_rx_reqepsn, - V2_QPC_BYTE_108_RX_REQ_RNR_S, 0); - - qpc_mask->rq_rnr_timer = 0; - qpc_mask->rx_msg_len = 0; - qpc_mask->rx_rkey_pkt_info = 0; - qpc_mask->rx_va = 0; - - roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M, - V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0); - roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, - V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0); - - roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S, - 0); - roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M, - V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0); - roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M, - V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0); - - roce_set_field(qpc_mask->byte_144_raq, - V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M, - V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0); - roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M, - V2_QPC_BYTE_144_RAQ_CREDIT_S, 0); - roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0); - - roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M, - V2_QPC_BYTE_148_RQ_MSN_S, 0); - roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M, - V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0); - - roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, - V2_QPC_BYTE_152_RAQ_PSN_S, 0); - roce_set_field(qpc_mask->byte_152_raq, - V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M, - V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0); - - roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M, - V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0); - - roce_set_field(qpc_mask->byte_160_sq_ci_pi, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); - roce_set_field(qpc_mask->byte_160_sq_ci_pi, - V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M, - V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0); - - roce_set_bit(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0); - roce_set_bit(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0); - roce_set_bit(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0); - roce_set_bit(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0); - roce_set_bit(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0); - roce_set_field(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_IRRL_IDX_LSB_M, - V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0); - roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M, V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4); - roce_set_field(qpc_mask->byte_172_sq_psn, - V2_QPC_BYTE_172_ACK_REQ_FREQ_M, - V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0); - - roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S, - 0); roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1); - roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0); - - roce_set_field(qpc_mask->byte_176_msg_pktn, - V2_QPC_BYTE_176_MSG_USE_PKTN_M, - V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0); - roce_set_field(qpc_mask->byte_176_msg_pktn, - V2_QPC_BYTE_176_IRRL_HEAD_PRE_M, - V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0); - - roce_set_field(qpc_mask->byte_184_irrl_idx, - V2_QPC_BYTE_184_IRRL_IDX_MSB_M, - V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0); - - qpc_mask->cur_sge_offset = 0; - - roce_set_field(qpc_mask->byte_192_ext_sge, - V2_QPC_BYTE_192_CUR_SGE_IDX_M, - V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0); - roce_set_field(qpc_mask->byte_192_ext_sge, - V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M, - V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0); - - roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M, - V2_QPC_BYTE_196_IRRL_HEAD_S, 0); - - roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M, - V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0); - roce_set_field(qpc_mask->byte_200_sq_max, - V2_QPC_BYTE_200_LCL_OPERATED_CNT_M, - V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0); - - roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0); - roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0); - - roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M, - V2_QPC_BYTE_212_CHECK_FLG_S, 0); - - qpc_mask->sq_timer = 0; - - roce_set_field(qpc_mask->byte_220_retry_psn_msn, - V2_QPC_BYTE_220_RETRY_MSG_MSN_M, - V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0); - roce_set_field(qpc_mask->byte_232_irrl_sge, - V2_QPC_BYTE_232_IRRL_SGE_IDX_M, - V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0); - - roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S, - 0); - roce_set_bit(qpc_mask->byte_232_irrl_sge, - V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0); - roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S, - 0); - - qpc_mask->irrl_cur_sge_offset = 0; - - roce_set_field(qpc_mask->byte_240_irrl_tail, - V2_QPC_BYTE_240_IRRL_TAIL_REAL_M, - V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0); - roce_set_field(qpc_mask->byte_240_irrl_tail, - V2_QPC_BYTE_240_IRRL_TAIL_RD_M, - V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0); - roce_set_field(qpc_mask->byte_240_irrl_tail, - V2_QPC_BYTE_240_RX_ACK_MSN_M, - V2_QPC_BYTE_240_RX_ACK_MSN_S, 0); - - roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M, - V2_QPC_BYTE_248_IRRL_PSN_S, 0); - roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S, - 0); - roce_set_field(qpc_mask->byte_248_ack_psn, - V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M, - V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0); - roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S, - 0); - roce_set_bit(qpc_mask->byte_248_ack_psn, - V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0); - roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S, - 0); hr_qp->access_flags = attr->qp_access_flags; roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); - roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, - V2_QPC_BYTE_252_TX_CQN_S, 0); - - roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M, - V2_QPC_BYTE_252_ERR_TYPE_S, 0); - - roce_set_field(qpc_mask->byte_256_sqflush_rqcqe, - V2_QPC_BYTE_256_RQ_CQE_IDX_M, - V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0); - roce_set_field(qpc_mask->byte_256_sqflush_rqcqe, - V2_QPC_BYTE_256_SQ_FLUSH_IDX_M, - V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0); } static void modify_qp_init_to_init(struct ib_qp *ibqp, -- cgit v1.2.3-58-ga151 From cd4a70bb7d19063f63c8189bef08e2149f116ef0 Mon Sep 17 00:00:00 2001 From: Weihang Li Date: Fri, 20 Mar 2020 11:23:41 +0800 Subject: RDMA/hns: Remove redundant assignment of wc->smac when polling cq The field smac in ib_wc was used for create AH and then it will be treated as destination mac address in UD sqwqe, but related code about filling smac into AH has been removed in core. Actually, the dmac in UD sqwqe is parsed from the dgid in grh which is passed in by ULP now, so this assignment should be removed. Link: https://lore.kernel.org/r/1584674622-52773-10-git-send-email-liweihang@huawei.com Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 2b03c722c2d9..31b6146bf975 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3236,14 +3236,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, wc->port_num = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S); wc->pkey_index = 0; - memcpy(wc->smac, cqe->smac, 4); - wc->smac[4] = roce_get_field(cqe->byte_28, - V2_CQE_BYTE_28_SMAC_4_M, - V2_CQE_BYTE_28_SMAC_4_S); - wc->smac[5] = roce_get_field(cqe->byte_28, - V2_CQE_BYTE_28_SMAC_5_M, - V2_CQE_BYTE_28_SMAC_5_S); - wc->wc_flags |= IB_WC_WITH_SMAC; + if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) { wc->vlan_id = (u16)roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_VID_M, -- cgit v1.2.3-58-ga151 From e0b0722643fc3bb7e2401a868cc28e65c31ce07d Mon Sep 17 00:00:00 2001 From: Weihang Li Date: Fri, 20 Mar 2020 11:23:42 +0800 Subject: RDMA/hns: Remove redundant judgment of qp_type Type of qp has been checked in check_send_valid(), so this judgment should be removed. Link: https://lore.kernel.org/r/1584674622-52773-11-git-send-email-liweihang@huawei.com Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 31b6146bf975..7eceeea9ccea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -583,13 +583,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit); else if (ibqp->qp_type == IB_QPT_RC) ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit); - else { - ibdev_err(ibdev, "Illegal qp_type(0x%x)\n", - ibqp->qp_type); - spin_unlock_irqrestore(&qp->sq.lock, flags); - *bad_wr = wr; - return -EOPNOTSUPP; - } if (ret) { *bad_wr = wr; -- cgit v1.2.3-58-ga151 From a4da83c215ac4e69ef7ae9ec3ee9ef63a3edafdf Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 17 Mar 2020 15:54:23 +0100 Subject: IB/hfi1: Get rid of a warning The right markup for a variable is @foo, and not @foo[]. Using a wrong markup caused this warning: ./drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h:243: WARNING: Inline strong start-string without end-string. Link: https://lore.kernel.org/r/9dce702510505556d75a13d9641e09218a4b4a65.1584456635.git.mchehab+huawei@kernel.org Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jason Gunthorpe --- drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h index 0b3570dc606d..d324312a373c 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h @@ -239,7 +239,7 @@ struct opa_veswport_mactable_entry { * @offset: mac table starting offset * @num_entries: Number of entries to get or set * @mac_tbl_digest: mac table digest - * @tbl_entries[]: Array of table entries + * @tbl_entries: Array of table entries * * The EM sends down this structure in a MAD indicating * the starting offset in the forwarding table that this -- cgit v1.2.3-58-ga151 From 342ee59de98a2ecdf15a46849a2534e7c808eb1f Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 24 Mar 2020 08:01:39 +0200 Subject: IB/mlx5: Expose UAR object and its alloc/destroy commands Expose UAR object and its alloc/destroy commands to be used over the ioctl interface by user space applications. This API supports both BF & NC modes and enables a dynamic allocation of UARs once really needed. As the number of driver objects were limited by the core ones when the merged tree is prepared, had to decrease the number of core objects to enable the new UAR object usage. Link: https://lore.kernel.org/r/20200324060143.1569116-2-leon@kernel.org Signed-off-by: Yishai Hadas Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 172 ++++++++++++++++++++++++++++-- drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 + include/rdma/uverbs_ioctl.h | 2 +- include/uapi/rdma/mlx5_user_ioctl_cmds.h | 18 ++++ include/uapi/rdma/mlx5_user_ioctl_verbs.h | 5 + 5 files changed, 189 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 1f91d9e543e2..289445f6af21 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2021,6 +2021,17 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; } +static u64 uar_index2paddress(struct mlx5_ib_dev *dev, + int uar_idx) +{ + unsigned int fw_uars_per_page; + + fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? + MLX5_UARS_IN_PAGE : 1; + + return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE); +} + static int get_command(unsigned long offset) { return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; @@ -2105,6 +2116,11 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) mutex_unlock(&var_table->bitmap_lock); kfree(mentry); break; + case MLX5_IB_MMAP_TYPE_UAR_WC: + case MLX5_IB_MMAP_TYPE_UAR_NC: + mlx5_cmd_free_uar(dev->mdev, mentry->page_idx); + kfree(mentry); + break; default: WARN_ON(true); } @@ -2256,7 +2272,8 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev, mentry = to_mmmap(entry); pfn = (mentry->address >> PAGE_SHIFT); - if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR) + if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR || + mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC) prot = pgprot_noncached(vma->vm_page_prot); else prot = pgprot_writecombine(vma->vm_page_prot); @@ -6078,9 +6095,9 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) mlx5_nic_vport_disable_roce(dev->mdev); } -static int var_obj_cleanup(struct ib_uobject *uobject, - enum rdma_remove_reason why, - struct uverbs_attr_bundle *attrs) +static int mmap_obj_cleanup(struct ib_uobject *uobject, + enum rdma_remove_reason why, + struct uverbs_attr_bundle *attrs) { struct mlx5_user_mmap_entry *obj = uobject->object; @@ -6088,6 +6105,16 @@ static int var_obj_cleanup(struct ib_uobject *uobject, return 0; } +static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c, + struct mlx5_user_mmap_entry *entry, + size_t length) +{ + return rdma_user_mmap_entry_insert_range( + &c->ibucontext, &entry->rdma_entry, length, + (MLX5_IB_MMAP_OFFSET_START << 16), + ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1)); +} + static struct mlx5_user_mmap_entry * alloc_var_entry(struct mlx5_ib_ucontext *c) { @@ -6118,10 +6145,8 @@ alloc_var_entry(struct mlx5_ib_ucontext *c) entry->page_idx = page_idx; entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR; - err = rdma_user_mmap_entry_insert_range( - &c->ibucontext, &entry->rdma_entry, var_table->stride_size, - MLX5_IB_MMAP_OFFSET_START << 16, - (MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1); + err = mlx5_rdma_user_mmap_entry_insert(c, entry, + var_table->stride_size); if (err) goto err_insert; @@ -6205,7 +6230,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY( UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR, - UVERBS_TYPE_ALLOC_IDR(var_obj_cleanup), + UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup), &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC), &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY)); @@ -6217,6 +6242,134 @@ static bool var_is_supported(struct ib_device *device) MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q); } +static struct mlx5_user_mmap_entry * +alloc_uar_entry(struct mlx5_ib_ucontext *c, + enum mlx5_ib_uapi_uar_alloc_type alloc_type) +{ + struct mlx5_user_mmap_entry *entry; + struct mlx5_ib_dev *dev; + u32 uar_index; + int err; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + + dev = to_mdev(c->ibucontext.device); + err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); + if (err) + goto end; + + entry->page_idx = uar_index; + entry->address = uar_index2paddress(dev, uar_index); + if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF) + entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC; + else + entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC; + + err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE); + if (err) + goto err_insert; + + return entry; + +err_insert: + mlx5_cmd_free_uar(dev->mdev, uar_index); +end: + kfree(entry); + return ERR_PTR(err); +} + +static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_uobject *uobj = uverbs_attr_get_uobject( + attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE); + enum mlx5_ib_uapi_uar_alloc_type alloc_type; + struct mlx5_ib_ucontext *c; + struct mlx5_user_mmap_entry *entry; + u64 mmap_offset; + u32 length; + int err; + + c = to_mucontext(ib_uverbs_get_ucontext(attrs)); + if (IS_ERR(c)) + return PTR_ERR(c); + + err = uverbs_get_const(&alloc_type, attrs, + MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE); + if (err) + return err; + + if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF && + alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC) + return -EOPNOTSUPP; + + if (!to_mdev(c->ibucontext.device)->wc_support && + alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF) + return -EOPNOTSUPP; + + entry = alloc_uar_entry(c, alloc_type); + if (IS_ERR(entry)) + return PTR_ERR(entry); + + mmap_offset = mlx5_entry_to_mmap_offset(entry); + length = entry->rdma_entry.npages * PAGE_SIZE; + uobj->object = entry; + + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET, + &mmap_offset, sizeof(mmap_offset)); + if (err) + goto err; + + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID, + &entry->page_idx, sizeof(entry->page_idx)); + if (err) + goto err; + + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH, + &length, sizeof(length)); + if (err) + goto err; + + return 0; + +err: + rdma_user_mmap_entry_remove(&entry->rdma_entry); + return err; +} + +DECLARE_UVERBS_NAMED_METHOD( + MLX5_IB_METHOD_UAR_OBJ_ALLOC, + UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE, + MLX5_IB_OBJECT_UAR, + UVERBS_ACCESS_NEW, + UA_MANDATORY), + UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE, + enum mlx5_ib_uapi_uar_alloc_type, + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET, + UVERBS_ATTR_TYPE(u64), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + MLX5_IB_METHOD_UAR_OBJ_DESTROY, + UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE, + MLX5_IB_OBJECT_UAR, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR, + UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup), + &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC), + &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY)); + ADD_UVERBS_ATTRIBUTES_SIMPLE( mlx5_ib_dm, UVERBS_OBJECT_DM, @@ -6248,6 +6401,7 @@ static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR), {} }; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 2be773a24dda..3ba6175f949e 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -124,6 +124,8 @@ enum { enum mlx5_ib_mmap_type { MLX5_IB_MMAP_TYPE_MEMIC = 1, MLX5_IB_MMAP_TYPE_VAR = 2, + MLX5_IB_MMAP_TYPE_UAR_WC = 3, + MLX5_IB_MMAP_TYPE_UAR_NC = 4, }; struct mlx5_ib_ucontext { diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 28570ac2b6a0..9f3b1e004046 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -173,7 +173,7 @@ enum uapi_radix_data { UVERBS_API_OBJ_KEY_BITS = 5, UVERBS_API_OBJ_KEY_SHIFT = UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT, - UVERBS_API_OBJ_KEY_NUM_CORE = 24, + UVERBS_API_OBJ_KEY_NUM_CORE = 20, UVERBS_API_OBJ_KEY_NUM_DRIVER = (1 << UVERBS_API_OBJ_KEY_BITS) - UVERBS_API_OBJ_KEY_NUM_CORE, UVERBS_API_OBJ_KEY_MASK = GENMASK(31, UVERBS_API_OBJ_KEY_SHIFT), diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index 8f4a417fc70a..24f3388c3182 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -131,6 +131,23 @@ enum mlx5_ib_var_obj_methods { MLX5_IB_METHOD_VAR_OBJ_DESTROY, }; +enum mlx5_ib_uar_alloc_attrs { + MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE, + MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET, + MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH, + MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID, +}; + +enum mlx5_ib_uar_obj_destroy_attrs { + MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum mlx5_ib_uar_obj_methods { + MLX5_IB_METHOD_UAR_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_METHOD_UAR_OBJ_DESTROY, +}; + enum mlx5_ib_devx_umem_reg_attrs { MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR, @@ -190,6 +207,7 @@ enum mlx5_ib_objects { MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, MLX5_IB_OBJECT_VAR, MLX5_IB_OBJECT_PP, + MLX5_IB_OBJECT_UAR, }; enum mlx5_ib_flow_matcher_create_attrs { diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index b4641a7865f7..3f7a97c28045 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -77,5 +77,10 @@ enum mlx5_ib_uapi_pp_alloc_flags { MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX = 1 << 0, }; +enum mlx5_ib_uapi_uar_alloc_type { + MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF = 0x0, + MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC = 0x1, +}; + #endif -- cgit v1.2.3-58-ga151 From 64d99f6a62b98532886ede9913a026b2e2bc0419 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 24 Mar 2020 08:01:40 +0200 Subject: IB/mlx5: Extend CQ creation to get uar page index from user space Extend CQ creation to get uar page index from user space, this mode can be used with the UAR dynamic mode APIs to allocate/destroy a UAR object. Link: https://lore.kernel.org/r/20200324060143.1569116-3-leon@kernel.org Signed-off-by: Yishai Hadas Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/cq.c | 17 +++++++++++------ include/uapi/rdma/mlx5-abi.h | 4 ++++ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 367a71bc5f4b..1d184bd5c759 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -692,17 +692,19 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( udata, struct mlx5_ib_ucontext, ibucontext); - ucmdlen = udata->inlen < sizeof(ucmd) ? - (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd); + ucmdlen = min(udata->inlen, sizeof(ucmd)); + if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags)) + return -EINVAL; if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) return -EFAULT; - if (ucmdlen == sizeof(ucmd) && - (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD))) + if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD | + MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX))) return -EINVAL; - if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) + if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || + ucmd.reserved0 || ucmd.reserved1) return -EINVAL; *cqe_size = ucmd.cqe_size; @@ -739,7 +741,10 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, MLX5_SET(cqc, cqc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = context->bfregi.sys_pages[0]; + if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) + *index = ucmd.uar_page_index; + else + *index = context->bfregi.sys_pages[0]; if (ucmd.cqe_comp_en == 1) { int mini_cqe_format; diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 624f5b53eb1f..e900f9a64feb 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -266,6 +266,7 @@ struct mlx5_ib_query_device_resp { enum mlx5_ib_create_cq_flags { MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0, + MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1, }; struct mlx5_ib_create_cq { @@ -275,6 +276,9 @@ struct mlx5_ib_create_cq { __u8 cqe_comp_en; __u8 cqe_comp_res_format; __u16 flags; + __u16 uar_page_index; + __u16 reserved0; + __u32 reserved1; }; struct mlx5_ib_create_cq_resp { -- cgit v1.2.3-58-ga151 From ac42a5ee922503f99e8a71d41b0067fa19f23ca6 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 24 Mar 2020 08:01:41 +0200 Subject: IB/mlx5: Extend QP creation to get uar page index from user space Extend QP creation to get uar page index from user space, this mode can be used with the UAR dynamic mode APIs to allocate/destroy a UAR object. As part of enabling this option blocked the weird/un-supported cross channel option which uses index 0 hard-coded. This QP flag wasn't exposed to user space as part of any formal upstream release, the dynamic option can allow having valid UAR page index instead. Link: https://lore.kernel.org/r/20200324060143.1569116-4-leon@kernel.org Signed-off-by: Yishai Hadas Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/qp.c | 27 +++++++++++++++++---------- include/uapi/rdma/mlx5-abi.h | 1 + 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7b4e936ad210..b708a0484f77 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -919,6 +919,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, void *qpc; int err; u16 uid; + u32 uar_flags; err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); if (err) { @@ -928,24 +929,29 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext, ibucontext); - if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) { + uar_flags = ucmd.flags & (MLX5_QP_FLAG_UAR_PAGE_INDEX | + MLX5_QP_FLAG_BFREG_INDEX); + switch (uar_flags) { + case MLX5_QP_FLAG_UAR_PAGE_INDEX: + uar_index = ucmd.bfreg_index; + bfregn = MLX5_IB_INVALID_BFREG; + break; + case MLX5_QP_FLAG_BFREG_INDEX: uar_index = bfregn_to_uar_index(dev, &context->bfregi, ucmd.bfreg_index, true); if (uar_index < 0) return uar_index; - bfregn = MLX5_IB_INVALID_BFREG; - } else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) { - /* - * TBD: should come from the verbs when we have the API - */ - /* In CROSS_CHANNEL CQ and QP must use the same UAR */ - bfregn = MLX5_CROSS_CHANNEL_BFREG; - } - else { + break; + case 0: + if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) + return -EINVAL; bfregn = alloc_bfreg(dev, &context->bfregi); if (bfregn < 0) return bfregn; + break; + default: + return -EINVAL; } mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index); @@ -2100,6 +2106,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC | MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | MLX5_QP_FLAG_TUNNEL_OFFLOADS | + MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_TYPE_DCI | MLX5_QP_FLAG_TYPE_DCT)) return -EINVAL; diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index e900f9a64feb..a65d60b44829 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -49,6 +49,7 @@ enum { MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9, + MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10, }; enum { -- cgit v1.2.3-58-ga151 From 2152862298fbfd237d37c231dfca8ae8f3ed0e48 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 24 Mar 2020 08:01:42 +0200 Subject: IB/mlx5: Limit the scope of struct mlx5_bfreg_info to mlx5_ib struct mlx5_bfreg_info is used by mlx5_ib only but is exposed to both RDMA and netdev parts of mlx5 driver. Move that struct to mlx5_ib namespace, clean vertical space alignment and convert lib_uar_4k from bool to bitfield. Link: https://lore.kernel.org/r/20200324060143.1569116-5-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 17 +++++++++++++++++ include/linux/mlx5/driver.h | 17 ----------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 3ba6175f949e..ae6289133cd8 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -128,6 +128,23 @@ enum mlx5_ib_mmap_type { MLX5_IB_MMAP_TYPE_UAR_NC = 4, }; +struct mlx5_bfreg_info { + u32 *sys_pages; + int num_low_latency_bfregs; + unsigned int *count; + + /* + * protect bfreg allocation data structs + */ + struct mutex lock; + u32 ver; + u8 lib_uar_4k : 1; + u32 num_sys_pages; + u32 num_static_sys_pages; + u32 total_num_bfregs; + u32 num_dyn_bfregs; +}; + struct mlx5_ib_ucontext { struct ib_ucontext ibucontext; struct list_head db_page_list; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1de78f001d26..a30d834fdf7e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -213,23 +213,6 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; -struct mlx5_bfreg_info { - u32 *sys_pages; - int num_low_latency_bfregs; - unsigned int *count; - - /* - * protect bfreg allocation data structs - */ - struct mutex lock; - u32 ver; - bool lib_uar_4k; - u32 num_sys_pages; - u32 num_static_sys_pages; - u32 total_num_bfregs; - u32 num_dyn_bfregs; -}; - struct mlx5_cmd_first { __be32 data[4]; }; -- cgit v1.2.3-58-ga151 From 0a2fd01c28ae490a639a32a52b81fb2df48b92a0 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 24 Mar 2020 08:01:43 +0200 Subject: IB/mlx5: Move to fully dynamic UAR mode once user space supports it Move to fully dynamic UAR mode once user space supports it. In this case we prevent any legacy mode of UARs on the allocated context and prevent redundant allocation of the static ones. Link: https://lore.kernel.org/r/20200324060143.1569116-6-leon@kernel.org Signed-off-by: Yishai Hadas Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/cq.c | 8 ++++++-- drivers/infiniband/hw/mlx5/main.c | 13 ++++++++++++- drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + drivers/infiniband/hw/mlx5/qp.c | 6 ++++++ include/uapi/rdma/mlx5-abi.h | 1 + 5 files changed, 26 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 1d184bd5c759..f1c7fa561b16 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -741,10 +741,14 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, MLX5_SET(cqc, cqc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); - if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) + if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) { *index = ucmd.uar_page_index; - else + } else if (context->bfregi.lib_uar_dyn) { + err = -EINVAL; + goto err_cqb; + } else { *index = context->bfregi.sys_pages[0]; + } if (ucmd.cqe_comp_en == 1) { int mini_cqe_format; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 289445f6af21..12273851f2db 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1786,6 +1786,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, max_cqe_version); u32 dump_fill_mkey; bool lib_uar_4k; + bool lib_uar_dyn; if (!dev->ib_active) return -EAGAIN; @@ -1844,8 +1845,14 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, } lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; + lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR; bfregi = &context->bfregi; + if (lib_uar_dyn) { + bfregi->lib_uar_dyn = lib_uar_dyn; + goto uar_done; + } + /* updates req->total_num_bfregs */ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); if (err) @@ -1872,6 +1879,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, if (err) goto out_sys_pages; +uar_done: if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { err = mlx5_ib_devx_create(dev, true); if (err < 0) @@ -1893,7 +1901,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); - resp.tot_bfregs = req.total_num_bfregs; + resp.tot_bfregs = lib_uar_dyn ? 0 : req.total_num_bfregs; resp.num_ports = dev->num_ports; if (offsetofend(typeof(resp), cqe_version) <= udata->outlen) @@ -2141,6 +2149,9 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : bfregi->num_static_sys_pages; + if (bfregi->lib_uar_dyn) + return -EINVAL; + if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index ae6289133cd8..b88414acb993 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -139,6 +139,7 @@ struct mlx5_bfreg_info { struct mutex lock; u32 ver; u8 lib_uar_4k : 1; + u8 lib_uar_dyn : 1; u32 num_sys_pages; u32 num_static_sys_pages; u32 total_num_bfregs; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index b708a0484f77..aa7834d80493 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -697,6 +697,9 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev, { int bfregn = -ENOMEM; + if (bfregi->lib_uar_dyn) + return -EINVAL; + mutex_lock(&bfregi->lock); if (bfregi->ver >= 2) { bfregn = alloc_high_class_bfreg(dev, bfregi); @@ -768,6 +771,9 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev, u32 index_of_sys_page; u32 offset; + if (bfregi->lib_uar_dyn) + return -EINVAL; + bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * MLX5_NON_FP_BFREGS_PER_UAR; index_of_sys_page = bfregn / bfregs_per_sys_page; diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index a65d60b44829..df1cc3641bda 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -79,6 +79,7 @@ struct mlx5_ib_alloc_ucontext_req { enum mlx5_lib_caps { MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, + MLX5_LIB_CAP_DYN_UAR = (__u64)1 << 1, }; enum mlx5_ib_alloc_uctx_v2_flags { -- cgit v1.2.3-58-ga151 From 5c15abc4328ad696fa61e2f3604918ed0c207755 Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Thu, 26 Mar 2020 12:38:07 -0400 Subject: IB/hfi1: Fix memory leaks in sysfs registration and unregistration When the hfi1 driver is unloaded, kmemleak will report the following issue: unreferenced object 0xffff8888461a4c08 (size 8): comm "kworker/0:0", pid 5, jiffies 4298601264 (age 2047.134s) hex dump (first 8 bytes): 73 64 6d 61 30 00 ff ff sdma0... backtrace: [<00000000311a6ef5>] kvasprintf+0x62/0xd0 [<00000000ade94d9f>] kobject_set_name_vargs+0x1c/0x90 [<0000000060657dbb>] kobject_init_and_add+0x5d/0xb0 [<00000000346fe72b>] 0xffffffffa0c5ecba [<000000006cfc5819>] 0xffffffffa0c866b9 [<0000000031c65580>] 0xffffffffa0c38e87 [<00000000e9739b3f>] local_pci_probe+0x41/0x80 [<000000006c69911d>] work_for_cpu_fn+0x16/0x20 [<00000000601267b5>] process_one_work+0x171/0x380 [<0000000049a0eefa>] worker_thread+0x1d1/0x3f0 [<00000000909cf2b9>] kthread+0xf8/0x130 [<0000000058f5f874>] ret_from_fork+0x35/0x40 This patch fixes the issue by: - Releasing dd->per_sdma[i].kobject in hfi1_unregister_sysfs(). - This will fix the memory leak. - Calling kobject_put() to unwind operations only for those entries in dd->per_sdma[] whose operations have succeeded (including the current one that has just failed) in hfi1_verbs_register_sysfs(). Cc: Fixes: 0cb2aa690c7e ("IB/hfi1: Add sysfs interface for affinity setup") Link: https://lore.kernel.org/r/20200326163807.21129.27371.stgit@awfm-01.aw.intel.com Reviewed-by: Mike Marciniszyn Signed-off-by: Kaike Wan Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/sysfs.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 90f62c4bddba..f1bcecf92535 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -853,8 +853,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd) return 0; bail: - for (i = 0; i < dd->num_sdma; i++) - kobject_del(&dd->per_sdma[i].kobj); + /* + * The function kobject_put() will call kobject_del() if the kobject + * has been added successfully. The sysfs files created under the + * kobject directory will also be removed during the process. + */ + for (; i >= 0; i--) + kobject_put(&dd->per_sdma[i].kobj); return ret; } @@ -867,6 +872,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd) struct hfi1_pportdata *ppd; int i; + /* Unwind operations in hfi1_verbs_register_sysfs() */ + for (i = 0; i < dd->num_sdma; i++) + kobject_put(&dd->per_sdma[i].kobj); + for (i = 0; i < dd->num_pports; i++) { ppd = &dd->pport[i]; -- cgit v1.2.3-58-ga151 From dfb5394f804ed4fcea1fc925be275a38d66712ab Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Thu, 26 Mar 2020 12:38:14 -0400 Subject: IB/hfi1: Call kobject_put() when kobject_init_and_add() fails When kobject_init_and_add() returns an error in the function hfi1_create_port_files(), the function kobject_put() is not called for the corresponding kobject, which potentially leads to memory leak. This patch fixes the issue by calling kobject_put() even if kobject_init_and_add() fails. Cc: Link: https://lore.kernel.org/r/20200326163813.21129.44280.stgit@awfm-01.aw.intel.com Reviewed-by: Mike Marciniszyn Signed-off-by: Kaike Wan Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/sysfs.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index f1bcecf92535..074ec71772d2 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping sc2vl sysfs info, (err %d) port %u\n", ret, port_num); - goto bail; + /* + * Based on the documentation for kobject_init_and_add(), the + * caller should call kobject_put even if this call fails. + */ + goto bail_sc2vl; } kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD); @@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping sl2sc sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sc2vl; + goto bail_sl2sc; } kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD); @@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping vl2mtu sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sl2sc; + goto bail_vl2mtu; } kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD); @@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_vl2mtu; + goto bail_cc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); @@ -742,7 +746,6 @@ bail_sl2sc: kobject_put(&ppd->sl2sc_kobj); bail_sc2vl: kobject_put(&ppd->sc2vl_kobj); -bail: return ret; } -- cgit v1.2.3-58-ga151 From 24670b1a31661815777c2e88b94c162e47ea43fc Mon Sep 17 00:00:00 2001 From: Michael Guralnik Date: Tue, 24 Mar 2020 08:14:24 +0200 Subject: net/mlx5: Add support for RDMA TX steering Add new RDMA TX flow steering namespace. Flow steering rules in this namespace are used to filter transmitted RDMA traffic. Link: https://lore.kernel.org/r/20200324061425.1570190-2-leon@kernel.org Signed-off-by: Michael Guralnik Reviewed-by: Maor Gottlieb Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 53 +++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 7 ++- include/linux/mlx5/device.h | 6 +++ include/linux/mlx5/fs.h | 1 + include/linux/mlx5/mlx5_ifc.h | 2 +- 6 files changed, 67 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index b25465d9e030..90048697b2ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -904,6 +904,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ case FS_FT_SNIFFER_TX: case FS_FT_NIC_TX: case FS_FT_RDMA_RX: + case FS_FT_RDMA_TX: return mlx5_fs_cmd_get_fw_cmds(); default: return mlx5_fs_cmd_get_stub_cmds(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9dc24241dc91..98c74a867ef4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -87,6 +87,15 @@ .identified_miss_table_mode), \ FS_CAP(flow_table_properties_nic_transmit.flow_table_modify)) +#define FS_CHAINING_CAPS_RDMA_TX \ + FS_REQUIRED_CAPS( \ + FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \ + FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \ + FS_CAP(flow_table_properties_nic_transmit_rdma \ + .identified_miss_table_mode), \ + FS_CAP(flow_table_properties_nic_transmit_rdma \ + .flow_table_modify)) + #define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1 @@ -202,6 +211,18 @@ static struct init_tree_node rdma_rx_root_fs = { } }; +static struct init_tree_node rdma_tx_root_fs = { + .type = FS_TYPE_NAMESPACE, + .ar_size = 1, + .children = (struct init_tree_node[]) { + ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, + FS_CHAINING_CAPS_RDMA_TX, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + } +}; + enum fs_i_lock_class { FS_LOCK_GRANDPARENT, FS_LOCK_PARENT, @@ -2132,6 +2153,8 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) { root_ns = steering->rdma_rx_root_ns; prio = RDMA_RX_KERNEL_PRIO; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) { + root_ns = steering->rdma_tx_root_ns; } else { /* Must be NIC RX */ root_ns = steering->root_ns; prio = type; @@ -2535,6 +2558,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); cleanup_root_ns(steering->rdma_rx_root_ns); + cleanup_root_ns(steering->rdma_tx_root_ns); cleanup_root_ns(steering->egress_root_ns); mlx5_cleanup_fc_stats(dev); kmem_cache_destroy(steering->ftes_cache); @@ -2591,6 +2615,29 @@ out_err: return err; } +static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering) +{ + int err; + + steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX); + if (!steering->rdma_tx_root_ns) + return -ENOMEM; + + err = init_root_tree(steering, &rdma_tx_root_fs, + &steering->rdma_tx_root_ns->ns.node); + if (err) + goto out_err; + + set_prio_attrs(steering->rdma_tx_root_ns); + + return 0; + +out_err: + cleanup_root_ns(steering->rdma_tx_root_ns); + steering->rdma_tx_root_ns = NULL; + return err; +} + /* FT and tc chains are stored in the same array so we can re-use the * mlx5_get_fdb_sub_ns() and tc api for FT chains. * When creating a new ns for each chain store it in the first available slot. @@ -2890,6 +2937,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } + if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) { + err = init_rdma_tx_root_ns(steering); + if (err) + goto err; + } + if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { err = init_egress_root_ns(steering); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index be5f5e32c1e8..508108c58dae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -86,7 +86,8 @@ enum fs_flow_table_type { FS_FT_SNIFFER_RX = 0X5, FS_FT_SNIFFER_TX = 0X6, FS_FT_RDMA_RX = 0X7, - FS_FT_MAX_TYPE = FS_FT_RDMA_RX, + FS_FT_RDMA_TX = 0X8, + FS_FT_MAX_TYPE = FS_FT_RDMA_TX, }; enum fs_flow_table_op_mod { @@ -116,6 +117,7 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns; struct mlx5_flow_root_namespace *rdma_rx_root_ns; + struct mlx5_flow_root_namespace *rdma_tx_root_ns; struct mlx5_flow_root_namespace *egress_root_ns; }; @@ -316,7 +318,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev); (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \ (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \ - (BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\ + (type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \ + (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\ ) #endif diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0e62c3db45e5..2b90097a6cf9 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1211,6 +1211,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap) +#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap) + +#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap) + #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 4cae16016b2b..44c9fe792fc4 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -77,6 +77,7 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_EGRESS, MLX5_FLOW_NAMESPACE_RDMA_RX, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, + MLX5_FLOW_NAMESPACE_RDMA_TX, }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 9b8ff4e57002..600366ef2d84 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -705,7 +705,7 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; - u8 reserved_at_a00[0x200]; + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; -- cgit v1.2.3-58-ga151 From af9c38411d188021900031d00bd8e8dafd4ad557 Mon Sep 17 00:00:00 2001 From: Michael Guralnik Date: Tue, 24 Mar 2020 08:14:25 +0200 Subject: RDMA/mlx5: Add support for RDMA TX flow table Enable user application to add rules for RDMA TX steering table. Rules in this steering table will allow to steer transmitted RDMA traffic. Link: https://lore.kernel.org/r/20200324061425.1570190-3-leon@kernel.org Signed-off-by: Michael Guralnik Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/flow.c | 3 +++ drivers/infiniband/hw/mlx5/main.c | 7 +++++++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + include/uapi/rdma/mlx5_user_ioctl_verbs.h | 1 + 4 files changed, 12 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index dbee17d22d50..862b7bf3e646 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -35,6 +35,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type, case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX: *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX; break; + case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX: + *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX; + break; default: return -EINVAL; } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index fce863621414..96dcaecf69f2 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4045,6 +4045,11 @@ _get_flow_table(struct mlx5_ib_dev *dev, BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size)); priority = fs_matcher->priority; + } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) { + max_table_size = + BIT(MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, + log_max_ft_size)); + priority = fs_matcher->priority; } max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); @@ -4061,6 +4066,8 @@ _get_flow_table(struct mlx5_ib_dev *dev, prio = &dev->flow_db->fdb; else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) prio = &dev->flow_db->rdma_rx[priority]; + else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) + prio = &dev->flow_db->rdma_tx[priority]; if (!prio) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 89a050e516a8..6fe01d6142aa 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -210,6 +210,7 @@ struct mlx5_ib_flow_db { struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; struct mlx5_ib_flow_prio fdb; struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; + struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT]; struct mlx5_flow_table *lag_demux_ft; /* Protect flow steering bypass flow tables * when add/del flow rules. diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 88b6ca70c2fe..7ab4b92966e2 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -44,6 +44,7 @@ enum mlx5_ib_uapi_flow_table_type { MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2, MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3, + MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX = 0x4, }; enum mlx5_ib_uapi_flow_action_packet_reformat_type { -- cgit v1.2.3-58-ga151 From 9d04d56c47b11962391b96202e181042941e30b3 Mon Sep 17 00:00:00 2001 From: Jihua Tao Date: Thu, 26 Mar 2020 11:40:16 +0800 Subject: RDMA/hns: Reduce PFC frames in congestion scenarios The original value means sending 16 packets at a time, and it should be configured to 0 which means sending 1 packet instead. It is modified to reduce the number of PFC frames to make sure the performance meets expectations when flow control is enabled on hip08. Link: https://lore.kernel.org/r/1585194018-4381-2-git-send-email-liweihang@huawei.com Signed-off-by: Jihua Tao Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 7eceeea9ccea..c3316672b70e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3928,7 +3928,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, - V2_QPC_BYTE_56_LP_PKTN_INI_S, 4); + V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); -- cgit v1.2.3-58-ga151 From 019cd05ce59d1f7c9cde4b77237d2c5fd114ce83 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 26 Mar 2020 11:40:17 +0800 Subject: RDMA/hns: Reduce the maximum number of extend SGE per WQE Just reduce the default number to 64 for backward compatibility, the driver can still get this configuration from the firmware. Link: https://lore.kernel.org/r/1585194018-4381-3-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 7c999536a4c5..c74bf3cdc9b1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -50,15 +50,14 @@ #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_SRQ 0x100000 #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 -#define HNS_ROCE_V2_MAX_SRQ_SGE 0x100 +#define HNS_ROCE_V2_MAX_SRQ_SGE 64 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 -#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100 -#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff -#define HNS_ROCE_V2_MAX_SRQ_SGE_NUM 0x100 +#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 +#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 #define HNS_ROCE_V2_UAR_NUM 256 -- cgit v1.2.3-58-ga151 From 90e735aecc4ce94eb4a2838f9fe04ee8da70b529 Mon Sep 17 00:00:00 2001 From: Lang Cheng Date: Thu, 26 Mar 2020 11:40:18 +0800 Subject: RDMA/hns: Modify the mask of QP number for CQE of hip08 The hip08 supports up to 1M QPs, so the qpn mask of cqe should be modified. Link: https://lore.kernel.org/r/1585194018-4381-4-git-send-email-liweihang@huawei.com Signed-off-by: Lang Cheng Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index c74bf3cdc9b1..82dd9f6f4845 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -162,7 +162,7 @@ enum { #define GID_LEN_V2 16 -#define HNS_ROCE_V2_CQE_QPN_MASK 0x3ffff +#define HNS_ROCE_V2_CQE_QPN_MASK 0xfffff enum { HNS_ROCE_V2_WQE_OP_SEND = 0x0, -- cgit v1.2.3-58-ga151 From d35dc58dd26e2aef5dd7ed1929179c9974f8bb97 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 27 Mar 2020 21:35:39 -0500 Subject: RDMA/hns: Fix uninitialized variable bug There is a potential execution path in which variable *ret* is returned without being properly initialized, previously. Fix this by initializing variable *ret* to 0. Link: https://lore.kernel.org/r/20200328023539.GA32016@embeddedor Addresses-Coverity-ID: 1491917 ("Uninitialized scalar variable") Fixes: 2f49de21f3e9 ("RDMA/hns: Optimize mhop get flow for multi-hop addressing") Signed-off-by: Gustavo A. R. Silva Acked-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index c96378718f88..263338b90d7a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -603,7 +603,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, { struct ib_device *ibdev = &hr_dev->ib_dev; int step_idx; - int ret; + int ret = 0; if (index->inited & HEM_INDEX_L0) { ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0); -- cgit v1.2.3-58-ga151 From 3e87f4313035c86999281336582ff554e9a17bef Mon Sep 17 00:00:00 2001 From: George Spelvin Date: Wed, 27 Mar 2019 12:55:00 -0400 Subject: IB/qib: Delete struct qib_ivdev.qp_rnd I was checking the field to see if it needed the full get_random_bytes() and discovered it's unused. Only compile-tested, as I don't have the hardware, but I'm still pretty confident. Link: https://lore.kernel.org/r/202003281643.02SGh6eG002694@sdf.org Signed-off-by: George Spelvin Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qib/qib_verbs.c | 2 -- drivers/infiniband/hw/qib/qib_verbs.h | 1 - 2 files changed, 3 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 5ef93f8f17a1..7508abb6a0fa 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include @@ -1503,7 +1502,6 @@ int qib_register_ib_device(struct qib_devdata *dd) unsigned i, ctxt; int ret; - get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); for (i = 0; i < dd->num_pports; i++) init_ibport(ppd + i); diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 8bf414b47b96..dc0e81f3b6f4 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -177,7 +177,6 @@ struct qib_ibdev { struct timer_list mem_timer; struct qib_pio_header *pio_hdrs; dma_addr_t pio_hdrs_phys; - u32 qp_rnd; /* random bytes for hash */ u32 n_piowait; u32 n_txwait; -- cgit v1.2.3-58-ga151 From b4d8ddf8356d8ac73fb931d16bcc661a83b2c0fe Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 30 Mar 2020 19:02:19 +0800 Subject: RDMA/bnxt_re: make bnxt_re_ib_init static Fix sparse warning: drivers/infiniband/hw/bnxt_re/main.c:1313:5: warning: symbol 'bnxt_re_ib_init' was not declared. Should it be static? Link: https://lore.kernel.org/r/20200330110219.24448-1-yuehaibing@huawei.com Signed-off-by: YueHaibing Acked-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 4a8fb1ad74a8..b12fbc857f94 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1310,7 +1310,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) le16_to_cpu(resp.hwrm_intf_patch); } -int bnxt_re_ib_init(struct bnxt_re_dev *rdev) +static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) { int rc = 0; u32 event; -- cgit v1.2.3-58-ga151