diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_mem.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mem.c | 278 |
1 files changed, 251 insertions, 27 deletions
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 3fa65338d3f5..c61d8d692ede 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,9 +1,11 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * + * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * @@ -24,10 +26,12 @@ #include <linux/pci.h> #include <linux/interrupt.h> +#include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> -#include <scsi/scsi.h> +#include <linux/nvme-fc-driver.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -35,8 +39,10 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" +#include "lpfc_nvme.h" +#include "lpfc_nvmet.h" #include "lpfc_crtn.h" #include "lpfc_logmsg.h" @@ -66,7 +72,7 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { * lpfc_mem_alloc - create and allocate all PCI and memory pools * @phba: HBA to allocate pools for * - * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, + * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool, * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * @@ -90,21 +96,23 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) else i = SLI4_PAGE_SIZE; - phba->lpfc_scsi_dma_buf_pool = - pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, - phba->cfg_sg_dma_buf_size, - i, - 0); + phba->lpfc_sg_dma_buf_pool = + pci_pool_create("lpfc_sg_dma_buf_pool", + phba->pcidev, + phba->cfg_sg_dma_buf_size, + i, 0); + if (!phba->lpfc_sg_dma_buf_pool) + goto fail; + } else { - phba->lpfc_scsi_dma_buf_pool = - pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, phba->cfg_sg_dma_buf_size, - align, 0); - } + phba->lpfc_sg_dma_buf_pool = + pci_pool_create("lpfc_sg_dma_buf_pool", + phba->pcidev, phba->cfg_sg_dma_buf_size, + align, 0); - if (!phba->lpfc_scsi_dma_buf_pool) - goto fail; + if (!phba->lpfc_sg_dma_buf_pool) + goto fail; + } phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, LPFC_BPL_SIZE, @@ -170,12 +178,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) LPFC_DEVICE_DATA_POOL_SIZE, sizeof(struct lpfc_device_data)); if (!phba->device_data_mem_pool) - goto fail_free_hrb_pool; + goto fail_free_drb_pool; } else { phba->device_data_mem_pool = NULL; } return 0; +fail_free_drb_pool: + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; fail_free_hrb_pool: pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; @@ -197,8 +208,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) pci_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; fail_free_dma_buf_pool: - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - phba->lpfc_scsi_dma_buf_pool = NULL; + pci_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; fail: return -ENOMEM; } @@ -227,6 +238,9 @@ lpfc_mem_free(struct lpfc_hba *phba) if (phba->lpfc_hrb_pool) pci_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; + if (phba->txrdy_payload_pool) + pci_pool_destroy(phba->txrdy_payload_pool); + phba->txrdy_payload_pool = NULL; if (phba->lpfc_hbq_pool) pci_pool_destroy(phba->lpfc_hbq_pool); @@ -258,8 +272,8 @@ lpfc_mem_free(struct lpfc_hba *phba) phba->lpfc_mbuf_pool = NULL; /* Free DMA buffer memory pool */ - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - phba->lpfc_scsi_dma_buf_pool = NULL; + pci_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; /* Free Device Data memory pool */ if (phba->device_data_mem_pool) { @@ -282,7 +296,7 @@ lpfc_mem_free(struct lpfc_hba *phba) * @phba: HBA to free memory for * * Description: Free memory from PCI and driver memory pools and also those - * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees + * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees * the VPI bitmask. * @@ -431,6 +445,44 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) } /** + * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the + * lpfc_sg_dma_buf_pool PCI pool + * @phba: HBA which owns the pool to allocate from + * @mem_flags: indicates if this is a priority (MEM_PRI) allocation + * @handle: used to return the DMA-mapped address of the nvmet_buf + * + * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool + * PCI pool. Allocates from generic pci_pool_alloc function. + * + * Returns: + * pointer to the allocated nvmet_buf on success + * NULL on failure + **/ +void * +lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) +{ + void *ret; + + ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); + return ret; +} + +/** + * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool + * PCI pool + * @phba: HBA which owns the pool to return to + * @virt: nvmet_buf to free + * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed + * + * Returns: None + **/ +void +lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) +{ + pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); +} + +/** * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for * @@ -458,7 +510,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) kfree(hbqbp); return NULL; } - hbqbp->size = LPFC_BPL_SIZE; + hbqbp->total_size = LPFC_BPL_SIZE; return hbqbp; } @@ -518,7 +570,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba) kfree(dma_buf); return NULL; } - dma_buf->size = LPFC_BPL_SIZE; + dma_buf->total_size = LPFC_DATA_BUF_SIZE; return dma_buf; } @@ -540,7 +592,134 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); - return; +} + +/** + * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer + * @phba: HBA to allocate a receive buffer for + * + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Notes: Not interrupt-safe. Must be called with no locks held. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct rqb_dmabuf * +lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) +{ + struct rqb_dmabuf *dma_buf; + struct lpfc_iocbq *nvmewqe; + union lpfc_wqe128 *wqe; + + dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); + if (!dma_buf) + return NULL; + + dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + &dma_buf->hbuf.phys); + if (!dma_buf->hbuf.virt) { + kfree(dma_buf); + return NULL; + } + dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &dma_buf->dbuf.phys); + if (!dma_buf->dbuf.virt) { + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + dma_buf->total_size = LPFC_DATA_BUF_SIZE; + + dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), + GFP_KERNEL); + if (!dma_buf->context) { + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + + dma_buf->iocbq = lpfc_sli_get_iocbq(phba); + dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET; + if (!dma_buf->iocbq) { + kfree(dma_buf->context); + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "2621 Ran out of nvmet iocb/WQEs\n"); + return NULL; + } + nvmewqe = dma_buf->iocbq; + wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; + /* Initialize WQE */ + memset(wqe, 0, sizeof(union lpfc_wqe)); + /* Word 7 */ + bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe->generic.wqe_com, 1); + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); + bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); + + dma_buf->iocbq->context1 = NULL; + spin_lock(&phba->sli4_hba.sgl_list_lock); + dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + if (!dma_buf->sglq) { + lpfc_sli_release_iocbq(phba, dma_buf->iocbq); + kfree(dma_buf->context); + pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, + dma_buf->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6132 Ran out of nvmet XRIs\n"); + return NULL; + } + return dma_buf; +} + +/** + * lpfc_sli4_nvmet_free - Frees a receive buffer + * @phba: HBA buffer was allocated for + * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffers returned by + * lpfc_sli4_nvmet_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) +{ + unsigned long flags; + + __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag); + dmab->sglq->state = SGL_FREED; + dmab->sglq->ndlp = NULL; + + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); + list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags); + + lpfc_sli_release_iocbq(phba, dmab->iocbq); + kfree(dmab->context); + pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + kfree(dmab); } /** @@ -565,13 +744,13 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) return; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); /* Check whether HBQ is still in use */ spin_lock_irqsave(&phba->hbalock, flags); if (!phba->hbq_in_use) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } - hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); list_del(&hbq_entry->dbuf.list); if (hbq_entry->tag == -1) { (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) @@ -586,3 +765,48 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) } return; } + +/** + * lpfc_rq_buf_free - Free a RQ DMA buffer + * @phba: HBA buffer is associated with + * @mp: Buffer to free + * + * Description: Frees the given DMA buffer in the appropriate way given by + * reposting it to its associated RQ so it can be reused. + * + * Notes: Takes phba->hbalock. Can be called with or without other locks held. + * + * Returns: None + **/ +void +lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) +{ + struct lpfc_rqb *rqbp; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct rqb_dmabuf *rqb_entry; + unsigned long flags; + int rc; + + if (!mp) + return; + + rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); + rqbp = rqb_entry->hrq->rqbp; + + spin_lock_irqsave(&phba->hbalock, flags); + list_del(&rqb_entry->hbuf.list); + hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); + hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); + drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); + if (rc < 0) { + (rqbp->rqb_free_buffer)(phba, rqb_entry); + } else { + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); + rqbp->buffer_count++; + } + + spin_unlock_irqrestore(&phba->hbalock, flags); +} |