diff options
author | Tyrel Datwyler <tyreld@linux.ibm.com> | 2021-01-14 14:31:36 -0600 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2021-01-14 22:27:44 -0500 |
commit | 1d956ad853fc70f611ea47cef1792385dc1de1b3 (patch) | |
tree | 08738f5d28105addf30a4ca0d7f63da79334d44c | |
parent | d20046e64c099377f7e82583eb4ddf3a800fd19f (diff) |
scsi: ibmvfc: Add handlers to drain and complete Sub-CRQ responses
The logic for iterating over the Sub-CRQ responses is similiar to that of
the primary CRQ. Add the necessary handlers for processing those responses.
Link: https://lore.kernel.org/r/20210114203148.246656-10-tyreld@linux.ibm.com
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 86 |
1 files changed, 86 insertions, 0 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 04efaa4e97b6..aba59a63318c 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -3485,6 +3485,92 @@ static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable) return rc; } +static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, + struct list_head *evt_doneq) +{ + struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); + + switch (crq->valid) { + case IBMVFC_CRQ_CMD_RSP: + break; + case IBMVFC_CRQ_XPORT_EVENT: + return; + default: + dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid); + return; + } + + /* The only kind of payload CRQs we should get are responses to + * things we send. Make sure this response is to something we + * actually sent + */ + if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) { + dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", + crq->ioba); + return; + } + + if (unlikely(atomic_read(&evt->free))) { + dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", + crq->ioba); + return; + } + + spin_lock(&evt->queue->l_lock); + list_move_tail(&evt->queue_list, evt_doneq); + spin_unlock(&evt->queue->l_lock); +} + +static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq) +{ + struct ibmvfc_crq *crq; + + crq = &scrq->msgs.scrq[scrq->cur].crq; + if (crq->valid & 0x80) { + if (++scrq->cur == scrq->size) + scrq->cur = 0; + rmb(); + } else + crq = NULL; + + return crq; +} + +static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq) +{ + struct ibmvfc_crq *crq; + struct ibmvfc_event *evt, *temp; + unsigned long flags; + int done = 0; + LIST_HEAD(evt_doneq); + + spin_lock_irqsave(scrq->q_lock, flags); + while (!done) { + while ((crq = ibmvfc_next_scrq(scrq)) != NULL) { + ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); + crq->valid = 0; + wmb(); + } + + ibmvfc_toggle_scrq_irq(scrq, 1); + if ((crq = ibmvfc_next_scrq(scrq)) != NULL) { + ibmvfc_toggle_scrq_irq(scrq, 0); + ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); + crq->valid = 0; + wmb(); + } else + done = 1; + } + spin_unlock_irqrestore(scrq->q_lock, flags); + + list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { + del_timer(&evt->timer); + list_del(&evt->queue_list); + ibmvfc_trc_end(evt); + evt->done(evt); + } +} + /** * ibmvfc_init_tgt - Set the next init job step for the target * @tgt: ibmvfc target struct |