diff options
Diffstat (limited to 'drivers/infiniband/hw')
37 files changed, 132 insertions, 114 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 3f18efc0c297..1d7a9ca5240c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -752,12 +752,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) gsi_sqp = rdev->gsi_ctx.gsi_sqp; gsi_sah = rdev->gsi_ctx.gsi_sah; - /* remove from active qp list */ - mutex_lock(&rdev->qp_lock); - list_del(&gsi_sqp->list); - mutex_unlock(&rdev->qp_lock); - atomic_dec(&rdev->qp_count); - ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n"); bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah, @@ -772,6 +766,12 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) } bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); + /* remove from active qp list */ + mutex_lock(&rdev->qp_lock); + list_del(&gsi_sqp->list); + mutex_unlock(&rdev->qp_lock); + atomic_dec(&rdev->qp_count); + kfree(rdev->gsi_ctx.sqp_tbl); kfree(gsi_sah); kfree(gsi_sqp); @@ -792,11 +792,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) unsigned int flags; int rc; - mutex_lock(&rdev->qp_lock); - list_del(&qp->list); - mutex_unlock(&rdev->qp_lock); - atomic_dec(&rdev->qp_count); - bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); @@ -819,6 +814,11 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) goto sh_fail; } + mutex_lock(&rdev->qp_lock); + list_del(&qp->list); + mutex_unlock(&rdev->qp_lock); + atomic_dec(&rdev->qp_count); + ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); @@ -2657,7 +2657,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, default: break; } - /* fall through */ + fallthrough; case IB_WR_SEND_WITH_INV: rc = bnxt_re_build_send_wqe(qp, wr, &wqe); break; @@ -3264,6 +3264,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, wc->wc_flags |= IB_WC_GRH; } +static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, + u16 vlan_id) +{ + /* + * Check if the vlan is configured in the host. If not configured, it + * can be a transparent VLAN. So dont report the vlan id. + */ + if (!__vlan_find_dev_deep_rcu(rdev->netdev, + htons(ETH_P_8021Q), vlan_id)) + return false; + return true; +} + static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, u16 *vid, u8 *sl) { @@ -3332,9 +3345,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { - wc->vlan_id = vlan_id; - wc->sl = sl; - wc->wc_flags |= IB_WC_WITH_VLAN; + if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->vlan_id = vlan_id; + wc->sl = sl; + wc->wc_flags |= IB_WC_WITH_VLAN; + } } wc->port_num = 1; wc->vendor_err = orig_cqe->status; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 17ac8b7c5710..53aee5a42ab8 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1009,7 +1009,6 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev) static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) { struct bnxt_re_ring_attr rattr = {}; - struct bnxt_qplib_ctx *qplib_ctx; int num_vec_created = 0; int rc = 0, i; u8 type; @@ -1032,13 +1031,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) if (rc) goto dealloc_res; - qplib_ctx = &rdev->qplib_ctx; for (i = 0; i < rdev->num_msix - 1; i++) { struct bnxt_qplib_nq *nq; nq = &rdev->nq[i]; - nq->hwq.max_elements = (qplib_ctx->cq_count + - qplib_ctx->srqc_count + 2); + nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); if (rc) { ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 117b42349a28..f78da54a0bc5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -818,6 +818,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) u16 cmd_flags = 0; u32 qp_flags = 0; u8 pg_sz_lvl; + u32 tbl_indx; int rc; RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags); @@ -907,8 +908,9 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) rq->dbinfo.db = qp->dpi->dbr; rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); } - rcfw->qp_tbl[qp->id].qp_id = qp->id; - rcfw->qp_tbl[qp->id].qp_handle = (void *)qp; + tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); + rcfw->qp_tbl[tbl_indx].qp_id = qp->id; + rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; return 0; @@ -935,10 +937,10 @@ static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size) sq = &qp->sq; hwq = &sq->hwq; + /* First psn entry */ fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg); if (!IS_ALIGNED(fpsne, PAGE_SIZE)) - indx_pad = ALIGN(fpsne, PAGE_SIZE) / size; - + indx_pad = (fpsne & ~PAGE_MASK) / size; hwq->pad_pgofft = indx_pad; hwq->pad_pg = (u64 *)psn_pg; hwq->pad_stride = size; @@ -959,6 +961,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) u16 cmd_flags = 0; u32 qp_flags = 0; u8 pg_sz_lvl; + u32 tbl_indx; u16 nsge; RCFW_CMD_PREP(req, CREATE_QP, cmd_flags); @@ -1111,8 +1114,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) rq->dbinfo.db = qp->dpi->dbr; rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); } - rcfw->qp_tbl[qp->id].qp_id = qp->id; - rcfw->qp_tbl[qp->id].qp_handle = (void *)qp; + tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); + rcfw->qp_tbl[tbl_indx].qp_id = qp->id; + rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; return 0; fail: @@ -1457,10 +1461,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct cmdq_destroy_qp req; struct creq_destroy_qp_resp resp; u16 cmd_flags = 0; + u32 tbl_indx; int rc; - rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID; - rcfw->qp_tbl[qp->id].qp_handle = NULL; + tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); + rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID; + rcfw->qp_tbl[tbl_indx].qp_handle = NULL; RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); @@ -1468,8 +1474,8 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, 0); if (rc) { - rcfw->qp_tbl[qp->id].qp_id = qp->id; - rcfw->qp_tbl[qp->id].qp_handle = qp; + rcfw->qp_tbl[tbl_indx].qp_id = qp->id; + rcfw->qp_tbl[tbl_indx].qp_handle = qp; return rc; } @@ -1779,7 +1785,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, break; } - /* fall thru */ + fallthrough; case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 4e211162acee..f7736e34ac64 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -307,14 +307,15 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, __le16 mcookie; u16 cookie; int rc = 0; - u32 qp_id; + u32 qp_id, tbl_indx; pdev = rcfw->pdev; switch (qp_event->event) { case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: err_event = (struct creq_qp_error_notification *)qp_event; qp_id = le32_to_cpu(err_event->xid); - qp = rcfw->qp_tbl[qp_id].qp_handle; + tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw); + qp = rcfw->qp_tbl[tbl_indx].qp_handle; dev_dbg(&pdev->dev, "Received QP error notification\n"); dev_dbg(&pdev->dev, "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", @@ -615,8 +616,9 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, cmdq->bmap_size = bmap_size; - rcfw->qp_tbl_size = qp_tbl_sz; - rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), + /* Allocate one extra to hold the QP1 entries */ + rcfw->qp_tbl_size = qp_tbl_sz + 1; + rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), GFP_KERNEL); if (!rcfw->qp_tbl) goto fail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 157387636d00..5f2f0a5a3560 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -216,4 +216,9 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn); void bnxt_qplib_mark_qp_error(void *qp_handle); +static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw) +{ + /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/ + return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2; +} #endif /* __BNXT_QPLIB_RCFW_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 4cd475ea97a2..64d44f51db4b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -149,7 +149,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); - attr->max_sgid = le32_to_cpu(sb->max_gid); + attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 6404f0da1051..967890cd81f2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -47,6 +47,7 @@ struct bnxt_qplib_dev_attr { #define FW_VER_ARR_LEN 4 u8 fw_ver[FW_VER_ARR_LEN]; +#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256 u16 max_sgid; u16 max_mrw; u32 max_qp; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 77bc02a9228e..1f288c73ccfc 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2885,7 +2885,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) case MORIBUND: case CLOSING: stop_ep_timer(ep); - /*FALLTHROUGH*/ + fallthrough; case FPDU_MODE: if (ep->com.qp && ep->com.qp->srq) { srqidx = ABORT_RSS_SRQIDX_G( @@ -3759,7 +3759,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, send_fw_act_open_req(ep, atid); return; } - /* fall through */ + fallthrough; case FW_EADDRINUSE: set_bit(ACT_RETRY_INUSE, &ep->com.history); if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index ac48012c992f..cbddb20c6121 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1165,7 +1165,7 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; } fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE; - /*FALLTHROUGH*/ + fallthrough; case IB_WR_RDMA_WRITE: fw_opcode = FW_RI_RDMA_WRITE_WR; swsqe->opcode = FW_RI_RDMA_WRITE; diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index b12e4665c9ab..4a4ec2397857 100644 --- a/drivers/infiniband/hw/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c @@ -209,7 +209,6 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n) fallthrough; case 1: *dest++ = *src++; - /* fall through */ } } diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index fa7a5ff498c7..a3b95805c154 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -2443,7 +2443,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node, case I40IW_CM_STATE_FIN_WAIT1: case I40IW_CM_STATE_LAST_ACK: cm_node->cm_id->rem_ref(cm_node->cm_id); - /* fall through */ + fallthrough; case I40IW_CM_STATE_TIME_WAIT: cm_node->state = I40IW_CM_STATE_CLOSED; i40iw_rem_ref_cm_node(cm_node); diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 688f19667221..86d3f8aff329 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -1964,7 +1964,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, info->out_rdrsp = true; break; case I40IW_AE_SOURCE_RSVD: - /* fallthrough */ default: break; } @@ -3762,14 +3761,14 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); set_64bit_val(wqe, 56, info->entry[2].data); - /* fallthrough */ + fallthrough; case 2: set_64bit_val(wqe, 32, (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) | LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); set_64bit_val(wqe, 40, info->entry[1].data); - /* fallthrough */ + fallthrough; case 1: set_64bit_val(wqe, 0, LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD)); diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index ae8b97c30665..e1085634b8d9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -353,7 +353,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) i40iw_cm_disconn(iwqp); break; case I40IW_AE_BAD_CLOSE: - /* fall through */ case I40IW_AE_RESET_SENT: i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0); i40iw_cm_disconn(iwqp); @@ -413,7 +412,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG: case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT: ctx_info->err_rq_idx_valid = false; - /* fall through */ + fallthrough; default: if (!info->sq && ctx_info->err_rq_idx_valid) { ctx_info->err_rq_idx = info->wqe_idx; diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 9c96ece5e7f3..58a433135a03 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1489,36 +1489,35 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) iwdev->iw_status = 0; i40iw_port_ibevent(iwdev); i40iw_destroy_rdma_device(iwdev->iwibdev); - /* fallthrough */ + fallthrough; case IP_ADDR_REGISTERED: if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); - /* fallthrough */ - /* fallthrough */ + fallthrough; case PBLE_CHUNK_MEM: i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); - /* fallthrough */ + fallthrough; case CEQ_CREATED: i40iw_dele_ceqs(iwdev); - /* fallthrough */ + fallthrough; case AEQ_CREATED: i40iw_destroy_aeq(iwdev); - /* fallthrough */ + fallthrough; case IEQ_CREATED: i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); - /* fallthrough */ + fallthrough; case ILQ_CREATED: i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); - /* fallthrough */ + fallthrough; case CCQ_CREATED: i40iw_destroy_ccq(iwdev); - /* fallthrough */ + fallthrough; case HMC_OBJS_CREATED: i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); - /* fallthrough */ + fallthrough; case CQP_CREATED: i40iw_destroy_cqp(iwdev, true); - /* fallthrough */ + fallthrough; case INITIAL_STATE: i40iw_cleanup_cm_core(&iwdev->cm_core); if (iwdev->vsi.pestat) { @@ -1528,7 +1527,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) i40iw_del_init_mem(iwdev); break; case INVALID_STATE: - /* fallthrough */ default: i40iw_pr_err("bad init_state = %d\n", iwdev->init_state); break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index d9c7ae6a7030..924be4b03c9a 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -814,13 +814,13 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi, switch (rsrc->completion) { case PUDA_HASH_CRC_COMPLETE: i40iw_free_hash_desc(rsrc->hash_desc); - /* fall through */ + fallthrough; case PUDA_QP_CREATED: if (!reset) i40iw_puda_free_qp(rsrc); i40iw_free_dma_mem(dev->hw, &rsrc->qpmem); - /* fallthrough */ + fallthrough; case PUDA_CQ_CREATED: if (!reset) i40iw_puda_free_cq(rsrc); diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 016524683e17..e07fb37af086 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -190,9 +190,8 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: action = I40IW_ARP_DELETE; - /* Fall through */ + fallthrough; case NETDEV_UP: - /* Fall through */ case NETDEV_CHANGEADDR: /* Just skip if no need to handle ARP cache */ @@ -247,9 +246,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: action = I40IW_ARP_DELETE; - /* Fall through */ + fallthrough; case NETDEV_UP: - /* Fall through */ case NETDEV_CHANGEADDR: i40iw_manage_arp_cache(iwdev, netdev->dev_addr, @@ -344,7 +342,7 @@ int i40iw_netdevice_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: iwdev->iw_status = 0; - /* Fall through */ + fallthrough; case NETDEV_UP: i40iw_port_ibevent(iwdev); break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 6957e4f3404b..b51339328a51 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -810,7 +810,7 @@ void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, case I40IW_QP_STATE_RTS: if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE) i40iw_send_reset(iwqp->cm_node); - /* fall through */ + fallthrough; case I40IW_QP_STATE_IDLE: case I40IW_QP_STATE_TERMINATE: case I40IW_QP_STATE_CLOSING: @@ -2144,7 +2144,6 @@ static int i40iw_post_send(struct ib_qp *ibqp, switch (ib_wr->opcode) { case IB_WR_SEND: - /* fall-through */ case IB_WR_SEND_WITH_INV: if (ib_wr->opcode == IB_WR_SEND) { if (ib_wr->send_flags & IB_SEND_SOLICITED) @@ -2201,7 +2200,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, break; case IB_WR_RDMA_READ_WITH_INV: inv_stag = true; - /* fall-through*/ + fallthrough; case IB_WR_RDMA_READ: if (ib_wr->num_sge > I40IW_MAX_SGE_RD) { err = -EINVAL; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index f8b936b76dcd..8a3436994f80 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -765,13 +765,13 @@ repoll: switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { case MLX4_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX4_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX4_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX4_OPCODE_SEND: case MLX4_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 5e7910a517da..bd4f975e7f9a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -784,7 +784,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->ip_gids = true; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->max_msg_sz = mdev->dev->caps.max_msg_sz; - props->pkey_tbl_len = 1; + if (mdev->dev->caps.pkey_table_len[port]) + props->pkey_tbl_len = 1; props->max_mtu = IB_MTU_4096; props->max_vl_num = 2; props->state = IB_PORT_DOWN; diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index d844831179cf..5e4ec9786081 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -944,7 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, switch (sa_mad->mad_hdr.method) { case IB_MGMT_METHOD_SET: may_create = 1; - /* fall through */ + fallthrough; case IB_SA_METHOD_DELETE: req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f9ca6e000a81..2975f350b9fd 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1578,12 +1578,12 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, pd = to_mxrcd(init_attr->xrcd)->pd; xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; - /* fall through */ + fallthrough; case IB_QPT_XRC_INI: if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return ERR_PTR(-ENOSYS); init_attr->recv_cq = init_attr->send_cq; - /* fall through */ + fallthrough; case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: @@ -1592,7 +1592,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, return ERR_PTR(-ENOMEM); qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; - /* fall through */ + fallthrough; case IB_QPT_UD: { err = create_qp_common(pd, init_attr, udata, 0, &qp); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 0133ebb8d740..dceb0eb2bed1 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -121,13 +121,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { case MLX5_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX5_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX5_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX5_OPCODE_SEND: case MLX5_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 454ce5de2de7..9bb9bb058932 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -250,9 +250,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, if (MLX5_CAP_GEN(dev->mdev, vport_counters) && method == IB_MGMT_METHOD_GET) return process_pma_cmd(dev, port_num, in, out); - /* fallthrough */ + fallthrough; case MLX5_IB_VENDOR_CLASS1: - /* fallthrough */ case MLX5_IB_VENDOR_CLASS2: case IB_MGMT_CLASS_CONG_MGMT: { if (method != IB_MGMT_METHOD_GET && diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index fbc45a5e76c5..d60d63221b14 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2872,7 +2872,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) break; case MLX5_EVENT_TYPE_GENERAL_EVENT: handle_general_event(ibdev, work->param, &ibev); - /* fall through */ + fallthrough; default: goto out; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 59fce5fac7a3..5758dbe64045 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -416,7 +416,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr) switch (attr->qp_type) { case IB_QPT_XRC_INI: size += sizeof(struct mlx5_wqe_xrc_seg); - /* fall through */ + fallthrough; case IB_QPT_RC: size += sizeof(struct mlx5_wqe_ctrl_seg) + max(sizeof(struct mlx5_wqe_atomic_seg) + @@ -441,7 +441,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr) if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) size += sizeof(struct mlx5_wqe_eth_pad) + sizeof(struct mlx5_wqe_eth_seg); - /* fall through */ + fallthrough; case IB_QPT_SMI: case MLX5_IB_QPT_HW_GSI: size += sizeof(struct mlx5_wqe_ctrl_seg) + diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index 0823c0bc7e73..f051f4e06b53 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -115,7 +115,7 @@ static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate) switch ((cur_rate - 1) / req_rate) { case 0: return MTHCA_RATE_MEMFREE_FULL; case 1: return MTHCA_RATE_MEMFREE_HALF; - case 2: /* fall through */ + case 2: case 3: return MTHCA_RATE_MEMFREE_QUARTER; default: return MTHCA_RATE_MEMFREE_EIGHTH; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 6cdbec13756a..c1751c9a0f62 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -2134,7 +2134,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, case IB_WR_SEND_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); - /* fall through */ + fallthrough; case IB_WR_SEND: hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); ocrdma_build_send(qp, hdr, wr); @@ -2148,7 +2148,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, case IB_WR_RDMA_WRITE_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); - /* fall through */ + fallthrough; case IB_WR_RDMA_WRITE: hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); status = ocrdma_build_write(qp, hdr, wr); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 4ce4e2eef6cc..b49bef94637e 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3528,7 +3528,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; case IB_WR_RDMA_READ_WITH_INV: SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1); - /* fallthrough -- same is identical to RDMA READ */ + fallthrough; /* same is identical to RDMA READ */ case IB_WR_RDMA_READ: wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index ca5ea734e3d0..44150be215bf 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2973,11 +2973,11 @@ static u32 qib_6120_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_6120_L_STATE_ACTIVE: - /* fall through */ case IB_6120_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_6120_L_STATE_DOWN: state = IB_PORT_DOWN; break; diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index ea3ddb05cbad..0a6f26d4cb31 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -3586,11 +3586,11 @@ static u32 qib_7220_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_7220_L_STATE_ACTIVE: - /* fall through */ case IB_7220_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_7220_L_STATE_DOWN: state = IB_PORT_DOWN; break; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 8bcbc884e5b6..a10eab89aee4 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5508,11 +5508,11 @@ static u32 qib_7322_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_7322_L_STATE_ACTIVE: - /* fall through */ case IB_7322_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_7322_L_STATE_DOWN: state = IB_PORT_DOWN; break; @@ -6533,7 +6533,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) "Invalid num_vls %u, using 4 VLs\n", qib_num_cfg_vls); qib_num_cfg_vls = 4; - /* fall through */ + fallthrough; case 4: ppd->vls_supported = IB_VL_VL0_3; break; diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 79bb83222e8d..e7789e724f56 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -433,7 +433,7 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) /* Bad mkey not a violation below level 2 */ if (ibp->rvp.mkeyprot < 2) break; - /* fall through */ + fallthrough; case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_TRAP_REPRESS: if (ibp->rvp.mkey_violations != 0xFFFF) @@ -828,7 +828,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, case IB_PORT_NOP: if (lstate == 0) break; - /* FALLTHROUGH */ + fallthrough; case IB_PORT_DOWN: if (lstate == 0) lstate = QIB_IB_LINKDOWN_ONLY; @@ -1928,7 +1928,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = IB_MAD_RESULT_SUCCESS; goto bail; } - /* FALLTHROUGH */ + fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); @@ -1962,7 +1962,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = IB_MAD_RESULT_SUCCESS; goto bail; } - /* FALLTHROUGH */ + fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); @@ -2322,7 +2322,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags, ret = cc_get_congestion_control_table(ccp, ibdev, port); goto bail; - /* FALLTHROUGH */ + fallthrough; default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); @@ -2339,7 +2339,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags, ret = cc_set_congestion_control_table(ccp, ibdev, port); goto bail; - /* FALLTHROUGH */ + fallthrough; default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index aaf7438258fa..3915e5b4a9bc 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -83,7 +83,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } - /* FALLTHROUGH */ + fallthrough; case OP(ATOMIC_ACKNOWLEDGE): /* * We can increment the tail pointer now that the last @@ -92,7 +92,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, */ if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) qp->s_tail_ack_queue = 0; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_ONLY): case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ @@ -149,7 +149,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, case OP(RDMA_READ_RESPONSE_FIRST): qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_READ_RESPONSE_MIDDLE): qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; @@ -471,10 +471,10 @@ no_flow_control: * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; ss = &qp->s_sge; @@ -510,10 +510,10 @@ no_flow_control: * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; ss = &qp->s_sge; @@ -1807,7 +1807,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, if (!ret) goto rnr_nak; qp->r_rcv_len = 0; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): case OP(RDMA_WRITE_MIDDLE): send_middle: @@ -1839,7 +1839,7 @@ send_middle: qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) goto no_immediate_data; - /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */ + fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */ case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 99e11c347130..8f8d61736656 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -763,7 +763,7 @@ void __qib_sdma_process_event(struct qib_pportdata *ppd, * bringing the link up with traffic active on * 7220, e.g. */ ss->go_s99_running = 1; - /* fall through -- and start dma engine */ + fallthrough; /* and start dma engine */ case qib_sdma_event_e10_go_hw_start: /* This reference means the state machine is started */ sdma_get(&ppd->sdma_state); diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index e17b91e2c22a..554af4273a13 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -161,7 +161,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): len = qp->s_len; if (len > pmtu) { @@ -185,7 +185,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): len = qp->s_len; if (len > pmtu) { @@ -351,7 +351,7 @@ send_first: goto no_immediate_data; else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) goto send_last_imm; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) @@ -440,7 +440,7 @@ rdma_first: wc.ex.imm_data = ohdr->u.rc.imm_data; goto rdma_last_imm; } - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 7acf9ba5358a..f6c01bad5a74 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -237,7 +237,7 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, case IB_QPT_GSI: if (ib_qib_disable_sma) break; - /* FALLTHROUGH */ + fallthrough; case IB_QPT_UD: qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); break; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index afcc2abcf55c..9a8f2a9507be 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -238,7 +238,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ret = -EINVAL; goto err_qp; } - /* fall through */ + fallthrough; case IB_QPT_RC: case IB_QPT_UD: qp = kzalloc(sizeof(*qp), GFP_KERNEL); |