diff options
author | Tyrel Datwyler <tyreld@linux.ibm.com> | 2021-01-14 14:31:29 -0600 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2021-01-14 22:27:43 -0500 |
commit | 225acf5f1aba3b469c1f762cbd14cdb4bd7aefc5 (patch) | |
tree | 1195c106eb6c469733d3d4a969d4e294004c799c | |
parent | 6ae208e5d2db6a99a8503a5571a775d27e8dd608 (diff) |
scsi: ibmvfc: Move event pool init/free routines
The next patch in this series reworks the event pool allocation calls to
happen within the individual queue allocation routines instead of as
independent calls.
Move the init/free routines earlier in ibmvfc.c to prevent undefined
reference errors when calling these functions from the queue allocation
code. No functional change.
Link: https://lore.kernel.org/r/20210114203148.246656-3-tyreld@linux.ibm.com
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 151 |
1 files changed, 76 insertions, 75 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index e8a63cf69b5e..445d5d5bccb6 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -717,6 +717,82 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost) } /** + * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host + * @vhost: ibmvfc host who owns the event pool + * + * Returns zero on success. + **/ +static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue) +{ + int i; + struct ibmvfc_event_pool *pool = &queue->evt_pool; + + ENTER; + pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ; + pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); + if (!pool->events) + return -ENOMEM; + + pool->iu_storage = dma_alloc_coherent(vhost->dev, + pool->size * sizeof(*pool->iu_storage), + &pool->iu_token, 0); + + if (!pool->iu_storage) { + kfree(pool->events); + return -ENOMEM; + } + + INIT_LIST_HEAD(&queue->sent); + INIT_LIST_HEAD(&queue->free); + spin_lock_init(&queue->l_lock); + + for (i = 0; i < pool->size; ++i) { + struct ibmvfc_event *evt = &pool->events[i]; + + atomic_set(&evt->free, 1); + evt->crq.valid = 0x80; + evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); + evt->xfer_iu = pool->iu_storage + i; + evt->vhost = vhost; + evt->queue = queue; + evt->ext_list = NULL; + list_add_tail(&evt->queue_list, &queue->free); + } + + LEAVE; + return 0; +} + +/** + * ibmvfc_free_event_pool - Frees memory of the event pool of a host + * @vhost: ibmvfc host who owns the event pool + * + **/ +static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue) +{ + int i; + struct ibmvfc_event_pool *pool = &queue->evt_pool; + + ENTER; + for (i = 0; i < pool->size; ++i) { + list_del(&pool->events[i].queue_list); + BUG_ON(atomic_read(&pool->events[i].free) != 1); + if (pool->events[i].ext_list) + dma_pool_free(vhost->sg_pool, + pool->events[i].ext_list, + pool->events[i].ext_list_token); + } + + kfree(pool->events); + dma_free_coherent(vhost->dev, + pool->size * sizeof(*pool->iu_storage), + pool->iu_storage, pool->iu_token); + LEAVE; +} + +/** * ibmvfc_free_queue - Deallocate queue * @vhost: ibmvfc host struct * @queue: ibmvfc queue struct @@ -1313,81 +1389,6 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) } /** - * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host - * @vhost: ibmvfc host who owns the event pool - * - * Returns zero on success. - **/ -static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, - struct ibmvfc_queue *queue) -{ - int i; - struct ibmvfc_event_pool *pool = &queue->evt_pool; - - ENTER; - pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ; - pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); - if (!pool->events) - return -ENOMEM; - - pool->iu_storage = dma_alloc_coherent(vhost->dev, - pool->size * sizeof(*pool->iu_storage), - &pool->iu_token, 0); - - if (!pool->iu_storage) { - kfree(pool->events); - return -ENOMEM; - } - - INIT_LIST_HEAD(&queue->sent); - INIT_LIST_HEAD(&queue->free); - spin_lock_init(&queue->l_lock); - - for (i = 0; i < pool->size; ++i) { - struct ibmvfc_event *evt = &pool->events[i]; - atomic_set(&evt->free, 1); - evt->crq.valid = 0x80; - evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); - evt->xfer_iu = pool->iu_storage + i; - evt->vhost = vhost; - evt->queue = queue; - evt->ext_list = NULL; - list_add_tail(&evt->queue_list, &queue->free); - } - - LEAVE; - return 0; -} - -/** - * ibmvfc_free_event_pool - Frees memory of the event pool of a host - * @vhost: ibmvfc host who owns the event pool - * - **/ -static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost, - struct ibmvfc_queue *queue) -{ - int i; - struct ibmvfc_event_pool *pool = &queue->evt_pool; - - ENTER; - for (i = 0; i < pool->size; ++i) { - list_del(&pool->events[i].queue_list); - BUG_ON(atomic_read(&pool->events[i].free) != 1); - if (pool->events[i].ext_list) - dma_pool_free(vhost->sg_pool, - pool->events[i].ext_list, - pool->events[i].ext_list_token); - } - - kfree(pool->events); - dma_free_coherent(vhost->dev, - pool->size * sizeof(*pool->iu_storage), - pool->iu_storage, pool->iu_token); - LEAVE; -} - -/** * ibmvfc_get_event - Gets the next free event in pool * @vhost: ibmvfc host struct * |