diff options
author | Tyrel Datwyler <tyreld@linux.ibm.com> | 2021-01-14 14:31:46 -0600 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2021-01-14 22:31:04 -0500 |
commit | 7eb3ccd884aec8591f78b5a5b39b6783db681c99 (patch) | |
tree | e93b69769d267e2de410ba5a627356ca0bba57aa | |
parent | a835f386f9709504a99346be011da92b5ea905e5 (diff) |
scsi: ibmvfc: Purge SCSI channels after transport loss/reset
Grab the queue and list lock for each Sub-CRQ and add any uncompleted
events to the host purge list.
Link: https://lore.kernel.org/r/20210114203148.246656-20-tyreld@linux.ibm.com
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index fc94665ead68..b2a7601d2c5b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1056,7 +1056,13 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) { struct ibmvfc_event *evt, *pos; + struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs; unsigned long flags; + int hwqs = 0; + int i; + + if (vhost->using_channels) + hwqs = vhost->scsi_scrqs.active_queues; ibmvfc_dbg(vhost, "Purging all requests\n"); spin_lock_irqsave(&vhost->crq.l_lock, flags); @@ -1064,6 +1070,16 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) ibmvfc_fail_request(evt, error_code); list_splice_init(&vhost->crq.sent, &vhost->purge); spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + + for (i = 0; i < hwqs; i++) { + spin_lock_irqsave(queues[i].q_lock, flags); + spin_lock(&queues[i].l_lock); + list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list) + ibmvfc_fail_request(evt, error_code); + list_splice_init(&queues[i].sent, &vhost->purge); + spin_unlock(&queues[i].l_lock); + spin_unlock_irqrestore(queues[i].q_lock, flags); + } } /** |