summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTyrel Datwyler <tyreld@linux.ibm.com>2021-01-14 14:31:42 -0600
committerMartin K. Petersen <martin.petersen@oracle.com>2021-01-14 22:31:03 -0500
commit31750fbd7b6decc81d7736f236ea8be0f397df08 (patch)
tree8cf83877dcb37f0cec01a2ee17df4e1a0cf3fe29
parentcb72477be7290ce81ad81838039e106c194ab16f (diff)
scsi: ibmvfc: Send commands down HW Sub-CRQ when channelized
When the client has negotiated the use of channels all vfcFrames are required to go down a Sub-CRQ channel or it is a protocoal violation. If the adapter state is channelized submit vfcFrames to the appropriate Sub-CRQ via the h_send_sub_crq() helper. Link: https://lore.kernel.org/r/20210114203148.246656-16-tyreld@linux.ibm.com Reviewed-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c39
1 files changed, 33 insertions, 6 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index dd117ca8fef6..ca36c882a7e6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -704,6 +704,15 @@ static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
+static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
+ u64 word2, u64 word3, u64 word4)
+{
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+ return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
+ word1, word2, word3, word4);
+}
+
/**
* ibmvfc_send_crq_init - Send a CRQ init message
* @vhost: ibmvfc host struct
@@ -1623,8 +1632,17 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
mb();
- if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
- be64_to_cpu(crq_as_u64[1])))) {
+ if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
+ rc = ibmvfc_send_sub_crq(vhost,
+ evt->queue->vios_cookie,
+ be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]),
+ 0, 0);
+ else
+ rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+
+ if (rc) {
list_del(&evt->queue_list);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
del_timer(&evt->timer);
@@ -1842,6 +1860,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct ibmvfc_event *evt;
u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
+ u16 scsi_channel;
int rc;
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
@@ -1852,7 +1871,13 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
cmnd->result = (DID_OK << 16);
- evt = ibmvfc_get_event(&vhost->crq);
+ if (vhost->using_channels) {
+ scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
+ evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+ } else
+ evt = ibmvfc_get_event(&vhost->crq);
+
ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
evt->cmnd = cmnd;
@@ -1868,8 +1893,6 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
vfc_cmd->correlation = cpu_to_be64(evt);
- if (vhost->using_channels)
- evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
@@ -2200,7 +2223,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
spin_lock_irqsave(vhost->host->host_lock, flags);
if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(&vhost->crq);
+ if (vhost->using_channels)
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
+ else
+ evt = ibmvfc_get_event(&vhost->crq);
+
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
tmf = ibmvfc_init_vfc_cmd(evt, sdev);
iu = ibmvfc_get_fcp_iu(vhost, tmf);