summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-04-20 20:40:34 -0500
committerJason Gunthorpe <jgg@nvidia.com>2022-05-06 15:33:31 -0300
commit0b1fbfb9e9058e6eae6564cdb4ed0003d766445c (patch)
tree50ab91fbdb704ec3bf9ddbe5c9d343e23a5e9bd5 /drivers/infiniband
parent1a7085b34291a266a0413795c27061eb707104f7 (diff)
RDMA/rxe: Remove IB_SRQ_INIT_MASK
Currently the #define IB_SRQ_INIT_MASK is used to distinguish the rxe_create_srq verb from the rxe_modify_srq verb so that some code can be shared between these two subroutines. This commit splits rxe_srq_chk_attr into two subroutines: rxe_srq_chk_init and rxe_srq_chk_attr which handle the create_srq and modify_srq verbs separately. Link: https://lore.kernel.org/r/20220421014042.26985-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c118
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c4
3 files changed, 74 insertions, 57 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 2ffbe3390668..ff6cae2c2949 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -159,15 +159,12 @@ void retransmit_timer(struct timer_list *t);
void rnr_nak_timer(struct timer_list *t);
/* rxe_srq.c */
-#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
-
-int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
- struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
-
+int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init);
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp);
-
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 0c0721f04357..e2dcfc5d97e3 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -6,64 +6,34 @@
#include <linux/vmalloc.h>
#include "rxe.h"
-#include "rxe_loc.h"
#include "rxe_queue.h"
-int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
- struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
+int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
{
- if (srq && srq->error) {
- pr_warn("srq in error state\n");
+ struct ib_srq_attr *attr = &init->attr;
+
+ if (attr->max_wr > rxe->attr.max_srq_wr) {
+ pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
+ attr->max_wr, rxe->attr.max_srq_wr);
goto err1;
}
- if (mask & IB_SRQ_MAX_WR) {
- if (attr->max_wr > rxe->attr.max_srq_wr) {
- pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
- attr->max_wr, rxe->attr.max_srq_wr);
- goto err1;
- }
-
- if (attr->max_wr <= 0) {
- pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
- goto err1;
- }
-
- if (srq && srq->limit && (attr->max_wr < srq->limit)) {
- pr_warn("max_wr (%d) < srq->limit (%d)\n",
- attr->max_wr, srq->limit);
- goto err1;
- }
-
- if (attr->max_wr < RXE_MIN_SRQ_WR)
- attr->max_wr = RXE_MIN_SRQ_WR;
+ if (attr->max_wr <= 0) {
+ pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
+ goto err1;
}
- if (mask & IB_SRQ_LIMIT) {
- if (attr->srq_limit > rxe->attr.max_srq_wr) {
- pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
- attr->srq_limit, rxe->attr.max_srq_wr);
- goto err1;
- }
+ if (attr->max_wr < RXE_MIN_SRQ_WR)
+ attr->max_wr = RXE_MIN_SRQ_WR;
- if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
- pr_warn("srq_limit (%d) > cur limit(%d)\n",
- attr->srq_limit,
- srq->rq.queue->buf->index_mask);
- goto err1;
- }
+ if (attr->max_sge > rxe->attr.max_srq_sge) {
+ pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
+ attr->max_sge, rxe->attr.max_srq_sge);
+ goto err1;
}
- if (mask == IB_SRQ_INIT_MASK) {
- if (attr->max_sge > rxe->attr.max_srq_sge) {
- pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
- attr->max_sge, rxe->attr.max_srq_sge);
- goto err1;
- }
-
- if (attr->max_sge < RXE_MIN_SRQ_SGE)
- attr->max_sge = RXE_MIN_SRQ_SGE;
- }
+ if (attr->max_sge < RXE_MIN_SRQ_SGE)
+ attr->max_sge = RXE_MIN_SRQ_SGE;
return 0;
@@ -93,8 +63,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
spin_lock_init(&srq->rq.consumer_lock);
type = QUEUE_TYPE_FROM_CLIENT;
- q = rxe_queue_init(rxe, &srq->rq.max_wr,
- srq_wqe_size, type);
+ q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type);
if (!q) {
pr_warn("unable to allocate queue for srq\n");
return -ENOMEM;
@@ -121,6 +90,57 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
return 0;
}
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
+{
+ if (srq->error) {
+ pr_warn("srq in error state\n");
+ goto err1;
+ }
+
+ if (mask & IB_SRQ_MAX_WR) {
+ if (attr->max_wr > rxe->attr.max_srq_wr) {
+ pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
+ attr->max_wr, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (attr->max_wr <= 0) {
+ pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
+ goto err1;
+ }
+
+ if (srq->limit && (attr->max_wr < srq->limit)) {
+ pr_warn("max_wr (%d) < srq->limit (%d)\n",
+ attr->max_wr, srq->limit);
+ goto err1;
+ }
+
+ if (attr->max_wr < RXE_MIN_SRQ_WR)
+ attr->max_wr = RXE_MIN_SRQ_WR;
+ }
+
+ if (mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit > rxe->attr.max_srq_wr) {
+ pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
+ attr->srq_limit, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
+ pr_warn("srq_limit (%d) > cur limit(%d)\n",
+ attr->srq_limit,
+ srq->rq.queue->buf->index_mask);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 58e4412b1d16..2ddfd99dd020 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -7,8 +7,8 @@
#include <linux/dma-mapping.h>
#include <net/addrconf.h>
#include <rdma/uverbs_ioctl.h>
+
#include "rxe.h"
-#include "rxe_loc.h"
#include "rxe_queue.h"
#include "rxe_hw_counters.h"
@@ -295,7 +295,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
uresp = udata->outbuf;
}
- err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
+ err = rxe_srq_chk_init(rxe, init);
if (err)
goto err1;