diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe.h | 22 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_comp.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_icrc.c | 124 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_loc.h | 61 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_mr.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_net.c | 59 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_pool.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_recv.c | 23 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_req.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_resp.c | 33 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 59 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.h | 2 |
12 files changed, 226 insertions, 201 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index 623fd17df02d..1bb3fb618bf5 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/skbuff.h> -#include <linux/crc32.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> @@ -42,27 +41,6 @@ extern bool rxe_initialized; -static inline u32 rxe_crc32(struct rxe_dev *rxe, - u32 crc, void *next, size_t len) -{ - u32 retval; - int err; - - SHASH_DESC_ON_STACK(shash, rxe->tfm); - - shash->tfm = rxe->tfm; - *(u32 *)shash_desc_ctx(shash) = crc; - err = crypto_shash_update(shash, next, len); - if (unlikely(err)) { - pr_warn_ratelimited("failed crc calculation, err: %d\n", err); - return crc32_le(crc, next, len); - } - - retval = *(u32 *)shash_desc_ctx(shash); - barrier_data(shash_desc_ctx(shash)); - return retval; -} - void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name); diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 58ad9c2644f3..d2d802c776fd 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -349,7 +349,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp, ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &wqe->dma, payload_addr(pkt), - payload_size(pkt), RXE_TO_MR_OBJ, NULL); + payload_size(pkt), RXE_TO_MR_OBJ); if (ret) { wqe->status = IB_WC_LOC_PROT_ERR; return COMPST_ERROR; @@ -371,7 +371,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp, ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &wqe->dma, &atomic_orig, - sizeof(u64), RXE_TO_MR_OBJ, NULL); + sizeof(u64), RXE_TO_MR_OBJ); if (ret) { wqe->status = IB_WC_LOC_PROT_ERR; return COMPST_ERROR; diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c index 66b2aad54bb7..e03af3012590 100644 --- a/drivers/infiniband/sw/rxe/rxe_icrc.c +++ b/drivers/infiniband/sw/rxe/rxe_icrc.c @@ -4,18 +4,79 @@ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ +#include <linux/crc32.h> + #include "rxe.h" #include "rxe_loc.h" -/* Compute a partial ICRC for all the IB transport headers. */ -u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb) +/** + * rxe_icrc_init() - Initialize crypto function for computing crc32 + * @rxe: rdma_rxe device object + * + * Return: 0 on success else an error + */ +int rxe_icrc_init(struct rxe_dev *rxe) +{ + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash("crc32", 0, 0); + if (IS_ERR(tfm)) { + pr_warn("failed to init crc32 algorithm err:%ld\n", + PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + + rxe->tfm = tfm; + + return 0; +} + +/** + * rxe_crc32() - Compute cumulative crc32 for a contiguous segment + * @rxe: rdma_rxe device object + * @crc: starting crc32 value from previous segments + * @next: starting address of current segment + * @len: length of current segment + * + * Return: the cumulative crc32 checksum + */ +static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len) +{ + __be32 icrc; + int err; + + SHASH_DESC_ON_STACK(shash, rxe->tfm); + + shash->tfm = rxe->tfm; + *(__be32 *)shash_desc_ctx(shash) = crc; + err = crypto_shash_update(shash, next, len); + if (unlikely(err)) { + pr_warn_ratelimited("failed crc calculation, err: %d\n", err); + return (__force __be32)crc32_le((__force u32)crc, next, len); + } + + icrc = *(__be32 *)shash_desc_ctx(shash); + barrier_data(shash_desc_ctx(shash)); + + return icrc; +} + +/** + * rxe_icrc_hdr() - Compute the partial ICRC for the network and transport + * headers of a packet. + * @skb: packet buffer + * @pkt: packet information + * + * Return: the partial ICRC + */ +static __be32 rxe_icrc_hdr(struct sk_buff *skb, struct rxe_pkt_info *pkt) { unsigned int bth_offset = 0; struct iphdr *ip4h = NULL; struct ipv6hdr *ip6h = NULL; struct udphdr *udph; struct rxe_bth *bth; - int crc; + __be32 crc; int length; int hdr_size = sizeof(struct udphdr) + (skb->protocol == htons(ETH_P_IP) ? @@ -30,7 +91,7 @@ u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb) /* This seed is the result of computing a CRC with a seed of * 0xfffffff and 8 bytes of 0xff representing a masked LRH. */ - crc = 0xdebb20e3; + crc = (__force __be32)0xdebb20e3; if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */ memcpy(pshdr, ip_hdr(skb), hdr_size); @@ -67,3 +128,58 @@ u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb) rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES); return crc; } + +/** + * rxe_icrc_check() - Compute ICRC for a packet and compare to the ICRC + * delivered in the packet. + * @skb: packet buffer + * @pkt: packet information + * + * Return: 0 if the values match else an error + */ +int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt) +{ + __be32 *icrcp; + __be32 pkt_icrc; + __be32 icrc; + + icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE); + pkt_icrc = *icrcp; + + icrc = rxe_icrc_hdr(skb, pkt); + icrc = rxe_crc32(pkt->rxe, icrc, (u8 *)payload_addr(pkt), + payload_size(pkt) + bth_pad(pkt)); + icrc = ~icrc; + + if (unlikely(icrc != pkt_icrc)) { + if (skb->protocol == htons(ETH_P_IPV6)) + pr_warn_ratelimited("bad ICRC from %pI6c\n", + &ipv6_hdr(skb)->saddr); + else if (skb->protocol == htons(ETH_P_IP)) + pr_warn_ratelimited("bad ICRC from %pI4\n", + &ip_hdr(skb)->saddr); + else + pr_warn_ratelimited("bad ICRC from unknown\n"); + + return -EINVAL; + } + + return 0; +} + +/** + * rxe_icrc_generate() - compute ICRC for a packet. + * @skb: packet buffer + * @pkt: packet information + */ +void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt) +{ + __be32 *icrcp; + __be32 icrc; + + icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE); + icrc = rxe_icrc_hdr(skb, pkt); + icrc = rxe_crc32(pkt->rxe, icrc, (u8 *)payload_addr(pkt), + payload_size(pkt) + bth_pad(pkt)); + *icrcp = ~icrc; +} diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 1ddb20855dee..f0c954575bde 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -77,10 +77,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct rxe_mr *mr); int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr); int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, - enum rxe_mr_copy_dir dir, u32 *crcp); -int copy_data(struct rxe_pd *pd, int access, - struct rxe_dma_info *dma, void *addr, int length, - enum rxe_mr_copy_dir dir, u32 *crcp); + enum rxe_mr_copy_dir dir); +int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, + void *addr, int length, enum rxe_mr_copy_dir dir); void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length); struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, enum rxe_mr_lookup_type type); @@ -99,11 +98,11 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey); void rxe_mw_cleanup(struct rxe_pool_entry *arg); /* rxe_net.c */ -void rxe_loopback(struct sk_buff *skb); -int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb); struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt); -int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc); +int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb); +int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, + struct sk_buff *skb); const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid); int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid); @@ -193,7 +192,10 @@ int rxe_completer(void *arg); int rxe_requester(void *arg); int rxe_responder(void *arg); -u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb); +/* rxe_icrc.c */ +int rxe_icrc_init(struct rxe_dev *rxe); +int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt); +void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt); void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb); @@ -204,47 +206,4 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp) return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type]; } -static inline int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, - struct sk_buff *skb) -{ - int err; - int is_request = pkt->mask & RXE_REQ_MASK; - struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - - if ((is_request && (qp->req.state != QP_STATE_READY)) || - (!is_request && (qp->resp.state != QP_STATE_READY))) { - pr_info("Packet dropped. QP is not in ready state\n"); - goto drop; - } - - if (pkt->mask & RXE_LOOPBACK_MASK) { - memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); - rxe_loopback(skb); - err = 0; - } else { - err = rxe_send(pkt, skb); - } - - if (err) { - rxe->xmit_errors++; - rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); - return err; - } - - if ((qp_type(qp) != IB_QPT_RC) && - (pkt->mask & RXE_END_MASK)) { - pkt->wqe->state = wqe_state_done; - rxe_run_task(&qp->comp.task, 1); - } - - rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); - goto done; - -drop: - kfree_skb(skb); - err = 0; -done: - return err; -} - #endif /* RXE_LOC_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index be4bcb420fab..5890a8246216 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -123,7 +123,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, goto err_out; } - mr->umem = umem; num_buf = ib_umem_num_pages(umem); rxe_mr_init(access, mr); @@ -143,7 +142,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, if (length > 0) { buf = map[0]->buf; - for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { + for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { if (num_buf >= RXE_BUF_PER_MAP) { map++; buf = map[0]->buf; @@ -286,11 +285,10 @@ out: } /* copy data from a range (vaddr, vaddr+length-1) to or from - * a mr object starting at iova. Compute incremental value of - * crc32 if crcp is not zero. caller must hold a reference to mr + * a mr object starting at iova. */ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, - enum rxe_mr_copy_dir dir, u32 *crcp) + enum rxe_mr_copy_dir dir) { int err; int bytes; @@ -300,7 +298,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, int m; int i; size_t offset; - u32 crc = crcp ? (*crcp) : 0; if (length == 0) return 0; @@ -314,10 +311,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, memcpy(dest, src, length); - if (crcp) - *crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest, - length); - return 0; } @@ -348,10 +341,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, memcpy(dest, src, bytes); - if (crcp) - crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest, - bytes); - length -= bytes; addr += bytes; @@ -366,9 +355,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, } } - if (crcp) - *crcp = crc; - return 0; err1: @@ -384,8 +370,7 @@ int copy_data( struct rxe_dma_info *dma, void *addr, int length, - enum rxe_mr_copy_dir dir, - u32 *crcp) + enum rxe_mr_copy_dir dir) { int bytes; struct rxe_sge *sge = &dma->sge[dma->cur_sge]; @@ -446,7 +431,7 @@ int copy_data( if (bytes > 0) { iova = sge->addr + offset; - err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp); + err = rxe_mr_copy(mr, iova, addr, bytes, dir); if (err) goto err2; diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 5ac27f28ace1..2cb810cb890a 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -344,7 +344,7 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb) return 0; } -int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc) +int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb) { int err = 0; @@ -353,8 +353,6 @@ int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc) else if (skb->protocol == htons(ETH_P_IPV6)) err = prepare6(pkt, skb); - *crc = rxe_icrc_hdr(pkt, skb); - if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac)) pkt->mask |= RXE_LOOPBACK_MASK; @@ -374,7 +372,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb) rxe_drop_ref(qp); } -int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) +static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) { int err; @@ -407,19 +405,64 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) /* fix up a send packet to match the packets * received from UDP before looping them back */ -void rxe_loopback(struct sk_buff *skb) +static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) { - struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); + memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); if (skb->protocol == htons(ETH_P_IP)) skb_pull(skb, sizeof(struct iphdr)); else skb_pull(skb, sizeof(struct ipv6hdr)); - if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) + if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) { kfree_skb(skb); + return -EIO; + } + + rxe_rcv(skb); + + return 0; +} + +int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, + struct sk_buff *skb) +{ + int err; + int is_request = pkt->mask & RXE_REQ_MASK; + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + + if ((is_request && (qp->req.state != QP_STATE_READY)) || + (!is_request && (qp->resp.state != QP_STATE_READY))) { + pr_info("Packet dropped. QP is not in ready state\n"); + goto drop; + } + + rxe_icrc_generate(skb, pkt); + + if (pkt->mask & RXE_LOOPBACK_MASK) + err = rxe_loopback(skb, pkt); else - rxe_rcv(skb); + err = rxe_send(skb, pkt); + if (err) { + rxe->xmit_errors++; + rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); + return err; + } + + if ((qp_type(qp) != IB_QPT_RC) && + (pkt->mask & RXE_END_MASK)) { + pkt->wqe->state = wqe_state_done; + rxe_run_task(&qp->comp.task, 1); + } + + rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); + goto done; + +drop: + kfree_skb(skb); + err = 0; +done: + return err; } struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index 0b8e7c6255a2..ffa8420b4765 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -41,7 +41,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { .size = sizeof(struct rxe_qp), .elem_offset = offsetof(struct rxe_qp, pelem), .cleanup = rxe_qp_cleanup, - .flags = RXE_POOL_INDEX, + .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_QP_INDEX, .max_index = RXE_MAX_QP_INDEX, }, diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 7a49e27da23a..6a6cc1fa90e4 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -361,8 +361,6 @@ void rxe_rcv(struct sk_buff *skb) int err; struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); struct rxe_dev *rxe = pkt->rxe; - __be32 *icrcp; - u32 calc_icrc, pack_icrc; if (unlikely(skb->len < RXE_BTH_BYTES)) goto drop; @@ -384,26 +382,9 @@ void rxe_rcv(struct sk_buff *skb) if (unlikely(err)) goto drop; - /* Verify ICRC */ - icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE); - pack_icrc = be32_to_cpu(*icrcp); - - calc_icrc = rxe_icrc_hdr(pkt, skb); - calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt), - payload_size(pkt) + bth_pad(pkt)); - calc_icrc = (__force u32)cpu_to_be32(~calc_icrc); - if (unlikely(calc_icrc != pack_icrc)) { - if (skb->protocol == htons(ETH_P_IPV6)) - pr_warn_ratelimited("bad ICRC from %pI6c\n", - &ipv6_hdr(skb)->saddr); - else if (skb->protocol == htons(ETH_P_IP)) - pr_warn_ratelimited("bad ICRC from %pI4\n", - &ip_hdr(skb)->saddr); - else - pr_warn_ratelimited("bad ICRC from unknown\n"); - + err = rxe_icrc_check(skb, pkt); + if (unlikely(err)) goto drop; - } rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS); diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index c57699cc6578..3894197a82f6 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -466,12 +466,9 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, int paylen) { - struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - u32 crc = 0; - u32 *p; int err; - err = rxe_prepare(pkt, skb, &crc); + err = rxe_prepare(pkt, skb); if (err) return err; @@ -479,7 +476,6 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (wqe->wr.send_flags & IB_SEND_INLINE) { u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; - crc = rxe_crc32(rxe, crc, tmp, paylen); memcpy(payload_addr(pkt), tmp, paylen); wqe->dma.resid -= paylen; @@ -487,8 +483,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, } else { err = copy_data(qp->pd, 0, &wqe->dma, payload_addr(pkt), paylen, - RXE_FROM_MR_OBJ, - &crc); + RXE_FROM_MR_OBJ); if (err) return err; } @@ -496,12 +491,8 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u8 *pad = payload_addr(pkt) + paylen; memset(pad, 0, bth_pad(pkt)); - crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt)); } } - p = payload_addr(pkt) + paylen + bth_pad(pkt); - - *p = ~crc; return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 360ec67cb9e1..5501227ddc65 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -536,7 +536,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, int err; err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, - data_addr, data_len, RXE_TO_MR_OBJ, NULL); + data_addr, data_len, RXE_TO_MR_OBJ); if (unlikely(err)) return (err == -ENOSPC) ? RESPST_ERR_LENGTH : RESPST_ERR_MALFORMED_WQE; @@ -552,7 +552,7 @@ static enum resp_states write_data_in(struct rxe_qp *qp, int data_len = payload_size(pkt); err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset, - payload_addr(pkt), data_len, RXE_TO_MR_OBJ, NULL); + payload_addr(pkt), data_len, RXE_TO_MR_OBJ); if (err) { rc = RESPST_ERR_RKEY_VIOLATION; goto out; @@ -613,13 +613,10 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, int opcode, int payload, u32 psn, - u8 syndrome, - u32 *crcp) + u8 syndrome) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct sk_buff *skb; - u32 crc = 0; - u32 *p; int paylen; int pad; int err; @@ -651,20 +648,12 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, if (ack->mask & RXE_ATMACK_MASK) atmack_set_orig(ack, qp->resp.atomic_orig); - err = rxe_prepare(ack, skb, &crc); + err = rxe_prepare(ack, skb); if (err) { kfree_skb(skb); return NULL; } - if (crcp) { - /* CRC computation will be continued by the caller */ - *crcp = crc; - } else { - p = payload_addr(ack) + payload + bth_pad(ack); - *p = ~crc; - } - return skb; } @@ -682,8 +671,6 @@ static enum resp_states read_reply(struct rxe_qp *qp, int opcode; int err; struct resp_res *res = qp->resp.res; - u32 icrc; - u32 *p; if (!res) { /* This is the first time we process that request. Get a @@ -742,24 +729,20 @@ static enum resp_states read_reply(struct rxe_qp *qp, payload = min_t(int, res->read.resid, mtu); skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload, - res->cur_psn, AETH_ACK_UNLIMITED, &icrc); + res->cur_psn, AETH_ACK_UNLIMITED); if (!skb) return RESPST_ERR_RNR; err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt), - payload, RXE_FROM_MR_OBJ, &icrc); + payload, RXE_FROM_MR_OBJ); if (err) pr_err("Failed copying memory\n"); if (bth_pad(&ack_pkt)) { - struct rxe_dev *rxe = to_rdev(qp->ibqp.device); u8 *pad = payload_addr(&ack_pkt) + payload; memset(pad, 0, bth_pad(&ack_pkt)); - icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt)); } - p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); - *p = ~icrc; err = rxe_xmit_packet(qp, &ack_pkt, skb); if (err) { @@ -984,7 +967,7 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct sk_buff *skb; skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, - 0, psn, syndrome, NULL); + 0, psn, syndrome); if (!skb) { err = -ENOMEM; goto err1; @@ -1008,7 +991,7 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn, - syndrome, NULL); + syndrome); if (!skb) { rc = -ENOMEM; goto out; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index c223959ac174..267b5a9c345d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -391,59 +391,52 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, return err; } -static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd, - struct ib_qp_init_attr *init, - struct ib_udata *udata) +static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, + struct ib_udata *udata) { int err; - struct rxe_dev *rxe = to_rdev(ibpd->device); - struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_qp *qp; + struct rxe_dev *rxe = to_rdev(ibqp->device); + struct rxe_pd *pd = to_rpd(ibqp->pd); + struct rxe_qp *qp = to_rqp(ibqp); struct rxe_create_qp_resp __user *uresp = NULL; if (udata) { if (udata->outlen < sizeof(*uresp)) - return ERR_PTR(-EINVAL); + return -EINVAL; uresp = udata->outbuf; } if (init->create_flags) - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; err = rxe_qp_chk_init(rxe, init); if (err) - goto err1; - - qp = rxe_alloc(&rxe->qp_pool); - if (!qp) { - err = -ENOMEM; - goto err1; - } + return err; if (udata) { - if (udata->inlen) { - err = -EINVAL; - goto err2; - } + if (udata->inlen) + return -EINVAL; + qp->is_user = true; } else { qp->is_user = false; } - rxe_add_index(qp); + err = rxe_add_to_pool(&rxe->qp_pool, qp); + if (err) + return err; - err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata); + rxe_add_index(qp); + err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); if (err) - goto err3; + goto qp_init; - return &qp->ibqp; + return 0; -err3: +qp_init: rxe_drop_index(qp); -err2: rxe_drop_ref(qp); -err1: - return ERR_PTR(err); + return err; } static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, @@ -1145,6 +1138,7 @@ static const struct ib_device_ops rxe_dev_ops = { INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc), INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw), @@ -1154,7 +1148,6 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) { int err; struct ib_device *dev = &rxe->ib_dev; - struct crypto_shash *tfm; strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); @@ -1173,13 +1166,9 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) if (err) return err; - tfm = crypto_alloc_shash("crc32", 0, 0); - if (IS_ERR(tfm)) { - pr_err("failed to allocate crc algorithm err:%ld\n", - PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - rxe->tfm = tfm; + err = rxe_icrc_init(rxe); + if (err) + return err; err = ib_register_device(dev, ibdev_name, NULL); if (err) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 959a3260fcab..ac2a2148027f 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -210,8 +210,8 @@ struct rxe_resp_info { }; struct rxe_qp { - struct rxe_pool_entry pelem; struct ib_qp ibqp; + struct rxe_pool_entry pelem; struct ib_qp_attr attr; unsigned int valid; unsigned int mtu; |