From 785f742223c03ec70f9dcf36b87f59e9df2067e0 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Mon, 30 Nov 2015 09:34:26 -0500 Subject: IB/qib: Fix qib_mr structure struct qib_mr requires the mr member be the last because struct qib_mregion contains a dynamic array at the end. The additions of members should have been placed before this structure as the comment noted. Failure to do so was causing random memory corruption. Reproducing this bug was easy to do by running the client and server of ib_write_bw -s 8 -n 5 on the same node. This BUG() was tripped in a slab debug kernel: kernel BUG at mm/slab.c:2572! Fixes: 38071a461f0a ("IB/qib: Support the new memory registration API") Reviewed-by: Mike Marciniszyn Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib_verbs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 2baf5ad251ed..bc803f33d5f6 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -329,9 +329,9 @@ struct qib_sge { struct qib_mr { struct ib_mr ibmr; struct ib_umem *umem; - struct qib_mregion mr; /* must be last */ u64 *pages; u32 npages; + struct qib_mregion mr; /* must be last */ }; /* -- cgit v1.2.3-58-ga151 From 1d784b890c0101db2556b981e864c2282a3c1b02 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Tue, 1 Dec 2015 10:13:51 -0500 Subject: IB/core: Fix user mode post wr corruption Commit e622f2f4ad21 ("IB: split struct ib_send_wr") introduced a regression for HCAs whose user mode post sends go through ib_uverbs_post_send(). The code didn't account for the fact that the first sge is offset by an operation dependent length. The allocation did, but the pointer to the destination sge list is computed without that knowledge. The sge list copy_from_user() then corrupts fields in the work request Store the operation dependent length in a local variable and compute the sge list copy_from_user() destination using that length. Reviewed-by: Ira Weiny Signed-off-by: Mike Marciniszyn Reviewed-by: Christoph Hellwig Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs_cmd.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 94816aeb95a0..4cb8e9d9966c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2446,6 +2446,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, int i, sg_ind; int is_ud; ssize_t ret = -EINVAL; + size_t next_size; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; @@ -2490,7 +2491,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, goto out_put; } - ud = alloc_wr(sizeof(*ud), user_wr->num_sge); + next_size = sizeof(*ud); + ud = alloc_wr(next_size, user_wr->num_sge); if (!ud) { ret = -ENOMEM; goto out_put; @@ -2511,7 +2513,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, user_wr->opcode == IB_WR_RDMA_READ) { struct ib_rdma_wr *rdma; - rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge); + next_size = sizeof(*rdma); + rdma = alloc_wr(next_size, user_wr->num_sge); if (!rdma) { ret = -ENOMEM; goto out_put; @@ -2525,7 +2528,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { struct ib_atomic_wr *atomic; - atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge); + next_size = sizeof(*atomic); + atomic = alloc_wr(next_size, user_wr->num_sge); if (!atomic) { ret = -ENOMEM; goto out_put; @@ -2540,7 +2544,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, } else if (user_wr->opcode == IB_WR_SEND || user_wr->opcode == IB_WR_SEND_WITH_IMM || user_wr->opcode == IB_WR_SEND_WITH_INV) { - next = alloc_wr(sizeof(*next), user_wr->num_sge); + next_size = sizeof(*next); + next = alloc_wr(next_size, user_wr->num_sge); if (!next) { ret = -ENOMEM; goto out_put; @@ -2572,7 +2577,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, if (next->num_sge) { next->sg_list = (void *) next + - ALIGN(sizeof *next, sizeof (struct ib_sge)); + ALIGN(next_size, sizeof(struct ib_sge)); if (copy_from_user(next->sg_list, buf + sizeof cmd + cmd.wr_count * cmd.wqe_size + -- cgit v1.2.3-58-ga151 From 57ab2512138205fe7836332fb4742441e53907ff Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Mon, 2 Nov 2015 12:13:14 -0500 Subject: IB/qib: Minor fixes to qib per SFF 8636 Minor errors found via code inspection during future development. SFF 8636 defines bit position 2 to hold the status indication of QSFP memory paging. The mask used to test for the value was incorrect and is fixed in this patch. Additionally, the dump function had a mismatch between the field being printed out and the field used to source the data which was fixed. Reviewed-by: Mitko Haralanov Reviewed-by: Mike Marciniszyn Reported-by: Easwar Hariharan Signed-off-by: Easwar Hariharan Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib_qsfp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 5e27f76805e2..4c7c3c84a741 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -292,7 +292,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) qib_dev_porterr(ppd->dd, ppd->port, "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]); - if ((peek[2] & 2) == 0) { + if ((peek[2] & 4) == 0) { /* * If cable is paged, rather than "flat memory", we need to * set the page to zero, Even if it already appears to be zero. @@ -538,7 +538,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len) sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n", QSFP_DATE_LEN, cd.date); sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n", - QSFP_LOT_LEN, cd.date); + QSFP_LOT_LEN, cd.lot); while (bidx < QSFP_DEFAULT_HDR_CNT) { int iidx; -- cgit v1.2.3-58-ga151 From d144da8c6f51f48ec39d891ea9dff80169c45f3b Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 2 Nov 2015 12:13:25 -0500 Subject: IB/core: use RCU for uverbs id lookup The current implementation gets a spin_lock, and at any scale with qib and hfi1 post send, the lock contention grows exponentially with the number of QPs. idr_find() is RCU compatibile, so read doesn't need the lock. Change to use rcu_read_lock() and rcu_read_unlock() in __idr_get_uobj(). kfree_rcu() is used to insure a grace period between the idr removal and actual free. Reviewed-by: Ira Weiny Signed-off-by: Mike Marciniszyn Reviewed-By: Jason Gunthorpe Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs_cmd.c | 12 +++++++----- include/rdma/ib_verbs.h | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4cb8e9d9966c..1c02deab068f 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; * The ib_uobject locking scheme is as follows: * * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it - * needs to be held during all idr operations. When an object is + * needs to be held during all idr write operations. When an object is * looked up, a reference must be taken on the object's kref before - * dropping this lock. + * dropping this lock. For read operations, the rcu_read_lock() + * and rcu_write_lock() but similarly the kref reference is grabbed + * before the rcu_read_unlock(). * * - Each object also has an rwsem. This rwsem must be held for * reading while an operation that uses the object is performed. @@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle, static void release_uobj(struct kref *kref) { - kfree(container_of(kref, struct ib_uobject, ref)); + kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); } static void put_uobj(struct ib_uobject *uobj) @@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, { struct ib_uobject *uobj; - spin_lock(&ib_uverbs_idr_lock); + rcu_read_lock(); uobj = idr_find(idr, id); if (uobj) { if (uobj->context == context) @@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, else uobj = NULL; } - spin_unlock(&ib_uverbs_idr_lock); + rcu_read_unlock(); return uobj; } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9a68a19532ba..120da1d7f57e 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1271,6 +1271,7 @@ struct ib_uobject { int id; /* index into kernel idr */ struct kref ref; struct rw_semaphore mutex; /* protects .live */ + struct rcu_head rcu; /* kfree_rcu() overhead */ int live; }; -- cgit v1.2.3-58-ga151 From 2c63d1072ad7cf1059333ef5cfa06075bead4a39 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 20 Nov 2015 17:41:36 +0100 Subject: IB/iser: use sector_div instead of do_div do_div is the wrong way to divide a sector_t, as it is less efficient when sector_t is 32-bit wide. With the upcoming do_div optimizations, the kernel starts warning about this: drivers/infiniband/ulp/iser/iser_verbs.c:1296:4: note: in expansion of macro 'do_div' include/asm-generic/div64.h:224:22: warning: passing argument 1 of '__div64_32' from incompatible pointer type This changes the code to use sector_div instead, which always produces optimal code. Signed-off-by: Arnd Bergmann Reviewed-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index a93070210109..42f4da620f2e 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1293,7 +1293,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { sector_t sector_off = mr_status.sig_err.sig_err_offset; - do_div(sector_off, sector_size + 8); + sector_div(sector_off, sector_size + 8); *sector = scsi_get_lba(iser_task->sc) + sector_off; pr_err("PI error found type %d at sector %llx " -- cgit v1.2.3-58-ga151 From 3ebd2fd0d0119a5ac7906bf17be637b527f63d31 Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Fri, 30 Oct 2015 08:23:45 -0400 Subject: IB/sa: Put netlink request into the request list before sending It was found by Saurabh Sengar that the netlink code tried to allocate memory with GFP_KERNEL while holding a spinlock. While it is possible to fix the issue by replacing GFP_KERNEL with GFP_ATOMIC, it is better to get rid of the spinlock while sending the packet. However, in order to protect against a race condition that a quick response may be received before the request is put on the request list, we need to put the request on the list first. Signed-off-by: Kaike Wan Reviewed-by: Jason Gunthorpe Reviewed-by: Ira Weiny Reported-by: Saurabh Sengar Signed-off-by: Doug Ledford --- drivers/infiniband/core/sa_query.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 2aba774f835b..a95a32ba596e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -512,7 +512,7 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) return len; } -static int ib_nl_send_msg(struct ib_sa_query *query) +static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; @@ -526,7 +526,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) if (len <= 0) return -EMSGSIZE; - skb = nlmsg_new(len, GFP_KERNEL); + skb = nlmsg_new(len, gfp_mask); if (!skb) return -ENOMEM; @@ -544,7 +544,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); - ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); + ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask); if (!ret) ret = len; else @@ -553,7 +553,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) return ret; } -static int ib_nl_make_request(struct ib_sa_query *query) +static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) { unsigned long flags; unsigned long delay; @@ -562,25 +562,27 @@ static int ib_nl_make_request(struct ib_sa_query *query) INIT_LIST_HEAD(&query->list); query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); + /* Put the request on the list first.*/ spin_lock_irqsave(&ib_nl_request_lock, flags); - ret = ib_nl_send_msg(query); - if (ret <= 0) { - ret = -EIO; - goto request_out; - } else { - ret = 0; - } - delay = msecs_to_jiffies(sa_local_svc_timeout_ms); query->timeout = delay + jiffies; list_add_tail(&query->list, &ib_nl_request_list); /* Start the timeout if this is the only request */ if (ib_nl_request_list.next == &query->list) queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); - -request_out: spin_unlock_irqrestore(&ib_nl_request_lock, flags); + ret = ib_nl_send_msg(query, gfp_mask); + if (ret <= 0) { + ret = -EIO; + /* Remove the request */ + spin_lock_irqsave(&ib_nl_request_lock, flags); + list_del(&query->list); + spin_unlock_irqrestore(&ib_nl_request_lock, flags); + } else { + ret = 0; + } + return ret; } @@ -1108,7 +1110,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { - if (!ib_nl_make_request(query)) + if (!ib_nl_make_request(query, gfp_mask)) return id; } ib_sa_disable_local_svc(query); -- cgit v1.2.3-58-ga151 From 4d59ad2995e4128c06d889f9e223099b1f19548e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 1 Dec 2015 10:17:32 -0800 Subject: IB/srp: Fix a memory leak If srp_connect_ch() returns a positive value then that is considered by its caller as a connection failure but this does not result in a scsi_host_put() call and additionally causes the srp_create_target() function to return a positive value while it should return a negative value. Avoid all this confusion and additionally fix a memory leak by ensuring that srp_connect_ch() always returns a value that is <= 0. This patch avoids that a rejected login triggers the following memory leak: unreferenced object 0xffff88021b24a220 (size 8): comm "srp_daemon", pid 56421, jiffies 4295006762 (age 4240.750s) hex dump (first 8 bytes): 68 6f 73 74 35 38 00 a5 host58.. backtrace: [] kmemleak_alloc+0x7a/0xc0 [] __kmalloc_track_caller+0xfe/0x160 [] kvasprintf+0x5b/0x90 [] kvasprintf_const+0x8d/0xb0 [] kobject_set_name_vargs+0x3c/0xa0 [] dev_set_name+0x3c/0x40 [] scsi_host_alloc+0x327/0x4b0 [] srp_create_target+0x4e/0x8a0 [ib_srp] [] dev_attr_store+0x1b/0x20 [] sysfs_kf_write+0x4a/0x60 [] kernfs_fop_write+0x14e/0x180 [] __vfs_write+0x2f/0xf0 [] vfs_write+0xa4/0x100 [] SyS_write+0x54/0xc0 [] entry_SYSCALL_64_fastpath+0x12/0x6f Signed-off-by: Bart Van Assche Reviewed-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Cc: Sebastian Parschauer Cc: stable Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 9909022dc6c3..a074dad28bab 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -994,16 +994,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) ret = srp_lookup_path(ch); if (ret) - return ret; + goto out; while (1) { init_completion(&ch->done); ret = srp_send_req(ch, multich); if (ret) - return ret; + goto out; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) - return ret; + goto out; /* * The CM event handling code will set status to @@ -1011,15 +1011,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) * back, or SRP_DLID_REDIRECT if we get a lid/qp * redirect REJ back. */ - switch (ch->status) { + ret = ch->status; + switch (ret) { case 0: ch->connected = true; - return 0; + goto out; case SRP_PORT_REDIRECT: ret = srp_lookup_path(ch); if (ret) - return ret; + goto out; break; case SRP_DLID_REDIRECT: @@ -1028,13 +1029,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) case SRP_STALE_CONN: shost_printk(KERN_ERR, target->scsi_host, PFX "giving up on stale connection\n"); - ch->status = -ECONNRESET; - return ch->status; + ret = -ECONNRESET; + goto out; default: - return ch->status; + goto out; } } + +out: + return ret <= 0 ? ret : -ENODEV; } static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) -- cgit v1.2.3-58-ga151 From 09c0c0bea500a9ad362589990ee316c9b2482f44 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 1 Dec 2015 10:18:03 -0800 Subject: IB/srp: Fix possible send queue overflow When using work request based memory registration (fast_reg) we must reserve SQ entries for registration and invalidation in addition to send operations. Each IO consumes 3 SQ entries (registration, send, invalidation) so we need to allocate 3x larger send-queue instead of 2x. Signed-off-by: Sagi Grimberg CC: Stable Reviewed-by: Christoph Hellwig Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index a074dad28bab..c7a95d2dc164 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -488,7 +488,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) struct ib_qp *qp; struct ib_fmr_pool *fmr_pool = NULL; struct srp_fr_pool *fr_pool = NULL; - const int m = 1 + dev->use_fast_reg; + const int m = dev->use_fast_reg ? 3 : 1; struct ib_cq_init_attr cq_attr = {}; int ret; -- cgit v1.2.3-58-ga151 From fc925518aa04ee9f03c4e0cadd9c59f1ef48bcfa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 1 Dec 2015 10:18:30 -0800 Subject: IB/srp: Initialize dma_length in srp_map_idb Without this sg_dma_len will return 0 on architectures tha have the dma_length field. Fixes: commit f7f7aab1a5c0 ("IB/srp: Convert to new registration API") Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index c7a95d2dc164..72fac204d756 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1524,6 +1524,9 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, state.sg_nents = 1; sg_set_buf(idb_sg, req->indirect_desc, idb_len); idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ +#ifdef CONFIG_NEED_SG_DMA_LENGTH + idb_sg->dma_length = idb_sg->length; /* hack^2 */ +#endif ret = srp_map_finish_fr(&state, ch); if (ret < 0) return ret; -- cgit v1.2.3-58-ga151 From a745f4f410082fedc17be75e10e4098f300943db Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 1 Dec 2015 10:18:47 -0800 Subject: IB/srp: Fix indirect data buffer rkey endianness Detected by sparse. Fixes: commit 330179f2fa93 ("IB/srp: Register the indirect data buffer descriptor") Signed-off-by: Bart Van Assche Cc: stable # v4.3+ Cc: Sagi Grimberg Cc: Christoph Hellwig Cc: Sebastian Parschauer Reviewed-by: Christoph Hellwig Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 72fac204d756..fac142389731 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1662,7 +1662,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, return ret; req->nmdesc++; } else { - idb_rkey = target->global_mr->rkey; + idb_rkey = cpu_to_be32(target->global_mr->rkey); } indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); -- cgit v1.2.3-58-ga151 From 57b0be9c0fb0ba3a35683c6ce21db7162d6758c5 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 1 Dec 2015 10:19:38 -0800 Subject: IB/srp: Fix srp_map_sg_fr() After dma_map_sg() has been called the return value of that function must be used as the number of elements in the scatterlist instead of scsi_sg_count(). Fixes: commit f7f7aab1a5c0 ("IB/srp: Convert to new registration API") Reported-by: Christoph Hellwig Signed-off-by: Bart Van Assche Cc: stable # v4.4+ Cc: Sagi Grimberg Cc: Sebastian Parschauer Reviewed-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 19 ++++++++----------- drivers/infiniband/ulp/srp/ib_srp.h | 5 +---- 2 files changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index fac142389731..3db9a659719b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1313,7 +1313,7 @@ reset_state: } static int srp_map_finish_fr(struct srp_map_state *state, - struct srp_rdma_ch *ch) + struct srp_rdma_ch *ch, int sg_nents) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; @@ -1328,10 +1328,10 @@ static int srp_map_finish_fr(struct srp_map_state *state, WARN_ON_ONCE(!dev->use_fast_reg); - if (state->sg_nents == 0) + if (sg_nents == 0) return 0; - if (state->sg_nents == 1 && target->global_mr) { + if (sg_nents == 1 && target->global_mr) { srp_map_desc(state, sg_dma_address(state->sg), sg_dma_len(state->sg), target->global_mr->rkey); @@ -1345,8 +1345,7 @@ static int srp_map_finish_fr(struct srp_map_state *state, rkey = ib_inc_rkey(desc->mr->rkey); ib_update_fast_reg_key(desc->mr, rkey); - n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents, - dev->mr_page_size); + n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size); if (unlikely(n < 0)) return n; @@ -1452,16 +1451,15 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, state->fr.next = req->fr_list; state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; state->sg = scat; - state->sg_nents = scsi_sg_count(req->scmnd); - while (state->sg_nents) { + while (count) { int i, n; - n = srp_map_finish_fr(state, ch); + n = srp_map_finish_fr(state, ch, count); if (unlikely(n < 0)) return n; - state->sg_nents -= n; + count -= n; for (i = 0; i < n; i++) state->sg = sg_next(state->sg); } @@ -1521,13 +1519,12 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, if (dev->use_fast_reg) { state.sg = idb_sg; - state.sg_nents = 1; sg_set_buf(idb_sg, req->indirect_desc, idb_len); idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ #ifdef CONFIG_NEED_SG_DMA_LENGTH idb_sg->dma_length = idb_sg->length; /* hack^2 */ #endif - ret = srp_map_finish_fr(&state, ch); + ret = srp_map_finish_fr(&state, ch, 1); if (ret < 0) return ret; } else if (dev->use_fmr) { diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 87a2a919dc43..f6af531f9f32 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -300,10 +300,7 @@ struct srp_map_state { dma_addr_t base_dma_addr; u32 dma_len; u32 total_len; - union { - unsigned int npages; - int sg_nents; - }; + unsigned int npages; unsigned int nmdesc; unsigned int ndesc; }; -- cgit v1.2.3-58-ga151 From 8f5ba10ed40a9d3ffe84854984227d011a7428bd Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 3 Dec 2015 16:04:17 -0800 Subject: IB core: Fix ib_sg_to_pages() On 12/03/2015 01:18 AM, Christoph Hellwig wrote: > The patch looks good to me, but while we touch this area, how about > throwing in a few cosmetic fixes as well? How about the patch below ? In that version of the ib_sg_to_pages() fix these concerns have been addressed and additionally to more bugs have been fixed. ------------ [PATCH] IB core: Fix ib_sg_to_pages() Fix the code for detecting gaps. A gap occurs not only if the second or later scatterlist element is not aligned but also if any scatterlist element other than the last does not end at a page boundary. In the code for coalescing contiguous elements, ensure that mr->length is correct and that last_page_addr is up-to-date. Ensure that this function returns a negative error code instead of zero if the first set_page() call fails. Fixes: commit 4c67e2bfc8b7 ("IB/core: Introduce new fast registration API") Reported-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Signed-off-by: Doug Ledford --- drivers/infiniband/core/verbs.c | 43 +++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 043a60ee6836..545906dec26d 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1516,7 +1516,7 @@ EXPORT_SYMBOL(ib_map_mr_sg); * @sg_nents: number of entries in sg * @set_page: driver page assignment function pointer * - * Core service helper for drivers to covert the largest + * Core service helper for drivers to convert the largest * prefix of given sg list to a page vector. The sg list * prefix converted is the prefix that meet the requirements * of ib_map_mr_sg. @@ -1533,7 +1533,7 @@ int ib_sg_to_pages(struct ib_mr *mr, u64 last_end_dma_addr = 0, last_page_addr = 0; unsigned int last_page_off = 0; u64 page_mask = ~((u64)mr->page_size - 1); - int i; + int i, ret; mr->iova = sg_dma_address(&sgl[0]); mr->length = 0; @@ -1544,27 +1544,29 @@ int ib_sg_to_pages(struct ib_mr *mr, u64 end_dma_addr = dma_addr + dma_len; u64 page_addr = dma_addr & page_mask; - if (i && page_addr != dma_addr) { - if (last_end_dma_addr != dma_addr) { - /* gap */ - goto done; - - } else if (last_page_off + dma_len <= mr->page_size) { - /* chunk this fragment with the last */ - mr->length += dma_len; - last_end_dma_addr += dma_len; - last_page_off += dma_len; - continue; - } else { - /* map starting from the next page */ - page_addr = last_page_addr + mr->page_size; - dma_len -= mr->page_size - last_page_off; - } + /* + * For the second and later elements, check whether either the + * end of element i-1 or the start of element i is not aligned + * on a page boundary. + */ + if (i && (last_page_off != 0 || page_addr != dma_addr)) { + /* Stop mapping if there is a gap. */ + if (last_end_dma_addr != dma_addr) + break; + + /* + * Coalesce this element with the last. If it is small + * enough just update mr->length. Otherwise start + * mapping from the next page. + */ + goto next_page; } do { - if (unlikely(set_page(mr, page_addr))) - goto done; + ret = set_page(mr, page_addr); + if (unlikely(ret < 0)) + return i ? : ret; +next_page: page_addr += mr->page_size; } while (page_addr < end_dma_addr); @@ -1574,7 +1576,6 @@ int ib_sg_to_pages(struct ib_mr *mr, last_page_off = end_dma_addr & ~page_mask; } -done: return i; } EXPORT_SYMBOL(ib_sg_to_pages); -- cgit v1.2.3-58-ga151 From d3632493c70b6a866a77264cd8cfdeb89958b906 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 20 Nov 2015 11:04:12 -0800 Subject: IB/cma: Add a missing rcu_read_unlock() Ensure that validate_ipv4_net_dev() calls rcu_read_unlock() if fib_lookup() fails. Detected by sparse. Compile-tested only. Fixes: "IB/cma: Validate routing of incoming requests" (commit f887f2ac87c2). Cc: Haggai Eran Cc: stable Reviewed-by: Sagi Grimberg Reviewed-by: Haggai Eran Reviewed-by: Jason Gunthorpe Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 944cd90417bc..d2d5d004f16d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1126,10 +1126,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev, rcu_read_lock(); err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); - if (err) - return false; - - ret = FIB_RES_DEV(res) == net_dev; + ret = err == 0 && FIB_RES_DEV(res) == net_dev; rcu_read_unlock(); return ret; -- cgit v1.2.3-58-ga151 From 533708867dd6388f643f12c87465b59e732d729d Mon Sep 17 00:00:00 2001 From: Hal Rosenstock Date: Fri, 13 Nov 2015 15:22:22 -0500 Subject: IB/mad: Require CM send method for everything except ClassPortInfo Receipt of CM MAD with other than the Send method for an attribute other than the ClassPortInfo attribute is invalid. CM attributes other than ClassPortInfo only use the send method. The SRP initiator does not maintain a timeout policy for CM connect requests relies on the CM layer to do that. The result was that the SRP initiator hung as the connect request never completed. A new SRP target has been observed to respond to Send CM REQ with GetResp of CM REQ with bad status. This is non conformant with IBA spec but exposes a vulnerability in the current MAD/CM code which will respond to the incoming GetResp of CM REQ as if it was a valid incoming Send of CM REQ rather than tossing this on the floor. It also causes the MAD layer not to retransmit the original REQ even though it has not received a REP. Reviewed-by: Sagi Grimberg Signed-off-by: Hal Rosenstock Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 5 +++++ include/rdma/ib_mad.h | 2 ++ 2 files changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 8d8af7a41a30..2281de122038 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1811,6 +1811,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr, if (qp_num == 0) valid = 1; } else { + /* CM attributes other than ClassPortInfo only use Send method */ + if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && + (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && + (mad_hdr->method != IB_MGMT_METHOD_SEND)) + goto out; /* Filter GSI packets sent to QP0 */ if (qp_num != 0) valid = 1; diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 188df91d5851..ec9b44dd3d80 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -237,6 +237,8 @@ struct ib_vendor_mad { u8 data[IB_MGMT_VENDOR_DATA]; }; +#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001) + struct ib_class_port_info { u8 base_version; u8 class_version; -- cgit v1.2.3-58-ga151 From a5e14ba334e202c58e45ef47414ec94c585c1a8c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 28 Oct 2015 13:28:15 +0200 Subject: mlx4: Expose correct max_sge_rd limit mlx4 devices (ConnectX-2, ConnectX-3) has a limitation where rdma read work queue entries cannot exceed 512 bytes. A rdma_read wqe needs to fit in 512 bytes: - wqe control segment (16 bytes) - rdma segment (16 bytes) - scatter elements (16 bytes each) So max_sge_rd should be: (512 - 16 - 16) / 16 = 30. Signed-off-by: Sagi Grimberg Reviewed-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/main.c | 2 +- include/linux/mlx4/device.h | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index f567160a4a56..97d6878f9938 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -456,7 +456,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; props->max_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); - props->max_sge_rd = props->max_sge; + props->max_sge_rd = MLX4_MAX_SGE_RD; props->max_cq = dev->dev->quotas.cq; props->max_cqe = dev->dev->caps.max_cqes; props->max_mr = dev->dev->quotas.mpt; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7501626ab529..d3133be12d92 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -426,6 +426,17 @@ enum { MLX4_MAX_FAST_REG_PAGES = 511, }; +enum { + /* + * Max wqe size for rdma read is 512 bytes, so this + * limits our max_sge_rd as the wqe needs to fit: + * - ctrl segment (16 bytes) + * - rdma segment (16 bytes) + * - scatter elements (16 bytes each) + */ + MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16 +}; + enum { MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, -- cgit v1.2.3-58-ga151 From f1a47d37fbc65bc49d32f23ab61c3e09fefeab73 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 28 Oct 2015 13:28:16 +0200 Subject: iser-target: Remove explicit mlx4 work-around The driver now exposes sufficient limits so we can avoid having mlx4 specific work-around. Signed-off-by: Sagi Grimberg Reviewed-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/isert/ib_isert.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index dfbbbb28090b..8a51c3b5d657 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -157,16 +157,9 @@ isert_create_qp(struct isert_conn *isert_conn, attr.recv_cq = comp->cq; attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; - /* - * FIXME: Use devattr.max_sge - 2 for max_send_sge as - * work-around for RDMA_READs with ConnectX-2. - * - * Also, still make sure to have at least two SGEs for - * outgoing control PDU responses. - */ - attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); - isert_conn->max_sge = attr.cap.max_send_sge; - + attr.cap.max_send_sge = device->dev_attr.max_sge; + isert_conn->max_sge = min(device->dev_attr.max_sge, + device->dev_attr.max_sge_rd); attr.cap.max_recv_sge = 1; attr.sq_sig_type = IB_SIGNAL_REQ_WR; attr.qp_type = IB_QPT_RC; -- cgit v1.2.3-58-ga151 From 73d4da7b9fec13cbddcf548888c4d0e46f1d5087 Mon Sep 17 00:00:00 2001 From: Wengang Wang Date: Thu, 8 Oct 2015 13:21:33 +0800 Subject: IB/mlx4: Use correct order of variables in log message There is a mis-order in mlx4 log. Fix it. Signed-off-by: Wengang Wang Acked-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 2177e56ed0be..d48d5793407d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1010,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && smp->method == IB_MGMT_METHOD_GET) || network_view) { mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", - slave, smp->method, smp->mgmt_class, + slave, smp->mgmt_class, smp->method, network_view ? "Network" : "Host", be16_to_cpu(smp->attr_id)); return -EPERM; -- cgit v1.2.3-58-ga151 From 0ef2f05c7e02ff99c0b5b583d7dee2cd12b053f2 Mon Sep 17 00:00:00 2001 From: Wengang Wang Date: Thu, 8 Oct 2015 13:27:04 +0800 Subject: IB/mlx4: Use vmalloc for WR buffers when needed There are several hits that WR buffer allocation(kmalloc) failed. It failed at order 3 and/or 4 contigous pages allocation. At the same time there are actually 100MB+ free memory but well fragmented. So try vmalloc when kmalloc failed. Signed-off-by: Wengang Wang Acked-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/qp.c | 19 +++++++++++++------ drivers/infiniband/hw/mlx4/srq.c | 11 ++++++++--- 2 files changed, 21 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index a2e4ca56da44..13eaaf45288f 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -795,8 +796,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (err) goto err_mtt; - qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); - qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); + qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp); + if (!qp->sq.wrid) + qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), + gfp, PAGE_KERNEL); + qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp); + if (!qp->rq.wrid) + qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), + gfp, PAGE_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; @@ -886,8 +893,8 @@ err_wrid: if (qp_has_rq(init_attr)) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); } else { - kfree(qp->sq.wrid); - kfree(qp->rq.wrid); + kvfree(qp->sq.wrid); + kvfree(qp->rq.wrid); } err_mtt: @@ -1062,8 +1069,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, &qp->db); ib_umem_release(qp->umem); } else { - kfree(qp->sq.wrid); - kfree(qp->rq.wrid); + kvfree(qp->sq.wrid); + kvfree(qp->rq.wrid); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) free_proxy_bufs(&dev->ib_dev, qp); diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index dce5dfe3a70e..8d133c40fa0e 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "mlx4_ib.h" #include "user.h" @@ -172,8 +173,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); if (!srq->wrid) { - err = -ENOMEM; - goto err_mtt; + srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64), + GFP_KERNEL, PAGE_KERNEL); + if (!srq->wrid) { + err = -ENOMEM; + goto err_mtt; + } } } @@ -204,7 +209,7 @@ err_wrid: if (pd->uobject) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); else - kfree(srq->wrid); + kvfree(srq->wrid); err_mtt: mlx4_mtt_cleanup(dev->dev, &srq->mtt); -- cgit v1.2.3-58-ga151 From ab5cdc31630c7596d81ca8fbe7d695f10666f39b Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 21 Oct 2015 09:21:17 +0300 Subject: IB/mlx5: Postpone remove_keys under knowledge of coming preemption The remove_keys() logic is performed as garbage collection task. Such task is intended to be run when no other active processes are running. The need_resched() will return TRUE if there are user tasks to be activated in near future. In such case, we don't execute remove_keys() and postpone the garbage collection work to try to run in next cycle, in order to free CPU resources to other tasks. The possible pseudo-code to trigger such scenario: 1. Allocate a lot of MR to fill the cache above the limit. 2. Wait a small amount of time "to calm" the system. 3. Start CPU extensive operations on multi-node cluster. 4. Expect performance degradation during MR cache shrink operation. Signed-off-by: Leon Romanovsky Signed-off-by: Eli Cohen Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/mr.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index ec8993a7b3be..6000f7aeede9 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -381,7 +381,19 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) } } } else if (ent->cur > 2 * ent->limit) { - if (!someone_adding(cache) && + /* + * The remove_keys() logic is performed as garbage collection + * task. Such task is intended to be run when no other active + * processes are running. + * + * The need_resched() will return TRUE if there are user tasks + * to be activated in near future. + * + * In such case, we don't execute remove_keys() and postpone + * the garbage collection work to try to run in next cycle, + * in order to free CPU resources to other tasks. + */ + if (!need_resched() && !someone_adding(cache) && time_after(jiffies, cache->last_add + 300 * HZ)) { remove_keys(dev, i, 1); if (ent->cur > ent->limit) -- cgit v1.2.3-58-ga151