diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 62 |
1 files changed, 29 insertions, 33 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 182aaae60386..c21187c93a5f 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -11270,6 +11270,30 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) } } +inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + /* + * Unlocking an irq is one of the entry point to check + * for re-schedule, but we are good for io submission + * path as midlayer does a get_cpu to glue us in. Flush + * out the invalidate queue so we can see the updated + * value for flag. + */ + smp_rmb(); + + if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) + /* We will not likely get the completion for the caller + * during this iteration but i guess that's fine. + * Future io's coming on this eq should be able to + * pick it up. As for the case of single io's, they + * will be handled through a sched from polling timer + * function which is currently triggered every 1msec. + */ + lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); +} + /** * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. @@ -11309,7 +11333,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH); + lpfc_sli4_poll_eq(eq); } else { /* For now, SLI2/3 will still use hbalock */ spin_lock_irqsave(&phba->hbalock, iflags); @@ -15625,12 +15649,11 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t) { struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); struct lpfc_queue *eq; - int i = 0; rcu_read_lock(); list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) - i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH); + lpfc_sli4_poll_eq(eq); if (!list_empty(&phba->poll_list)) mod_timer(&phba->cpuhp_poll_timer, jiffies + msecs_to_jiffies(LPFC_POLL_HB)); @@ -15638,33 +15661,6 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t) rcu_read_unlock(); } -inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path) -{ - struct lpfc_hba *phba = eq->phba; - int i = 0; - - /* - * Unlocking an irq is one of the entry point to check - * for re-schedule, but we are good for io submission - * path as midlayer does a get_cpu to glue us in. Flush - * out the invalidate queue so we can see the updated - * value for flag. - */ - smp_rmb(); - - if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) - /* We will not likely get the completion for the caller - * during this iteration but i guess that's fine. - * Future io's coming on this eq should be able to - * pick it up. As for the case of single io's, they - * will be handled through a sched from polling timer - * function which is currently triggered every 1msec. - */ - i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); - - return i; -} - static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) { struct lpfc_hba *phba = eq->phba; @@ -21276,7 +21272,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); + lpfc_sli4_poll_eq(qp->hba_eq); return 0; } @@ -21298,7 +21294,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); + lpfc_sli4_poll_eq(qp->hba_eq); return 0; } @@ -21328,7 +21324,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); + lpfc_sli4_poll_eq(qp->hba_eq); return 0; } return WQE_ERROR; |