summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNiklas Schnelle <schnelle@linux.ibm.com>2023-09-28 16:31:40 +0200
committerJoerg Roedel <jroedel@suse.de>2023-10-02 08:43:03 +0200
commit9f5b681e2a3f7d5bab02e68f3e68a50b23968c7c (patch)
treecadaf6afa6ae8f173e26b68fd8aee494e1503252
parent32d5bc8b09c7cc48c511809e7c3b1755c7ecc5fa (diff)
iommu/dma: Use a large flush queue and timeout for shadow_on_flush
Flush queues currently use a fixed compile time size of 256 entries. This being a power of 2 allows the compiler to use shift and mask instead of more expensive modulo operations. With per-CPU flush queues larger queue sizes would hit per-CPU allocation limits, with a single flush queue these limits do not apply however. Also with single queues being particularly suitable for virtualized environments with expensive IOTLB flushes these benefit especially from larger queues and thus fewer flushes. To this end re-order struct iova_fq so we can use a dynamic array and introduce the flush queue size and timeouts as new options in the iommu_dma_options struct. So as not to lose the shift and mask optimization, use a power of 2 for the length and use explicit shift and mask instead of letting the compiler optimize this. A large queue size and 1 second timeout is then set for the shadow on flush case set by s390 paged memory guests. This then brings performance on par with the previous s390 specific DMA API implementation. Acked-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com> #s390 Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com> Link: https://lore.kernel.org/r/20230928-dma_iommu-v13-6-9e5fc4dacc36@linux.ibm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/dma-iommu.c50
1 files changed, 32 insertions, 18 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a85ff75ad531..85163a83df2f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -50,6 +50,8 @@ enum iommu_dma_queue_type {
struct iommu_dma_options {
enum iommu_dma_queue_type qt;
+ size_t fq_size;
+ unsigned int fq_timeout;
};
struct iommu_dma_cookie {
@@ -98,10 +100,12 @@ static int __init iommu_dma_forcedac_setup(char *str)
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
/* Number of entries per flush queue */
-#define IOVA_FQ_SIZE 256
+#define IOVA_DEFAULT_FQ_SIZE 256
+#define IOVA_SINGLE_FQ_SIZE 32768
/* Timeout (in ms) after which entries are flushed from the queue */
-#define IOVA_FQ_TIMEOUT 10
+#define IOVA_DEFAULT_FQ_TIMEOUT 10
+#define IOVA_SINGLE_FQ_TIMEOUT 1000
/* Flush queue entry for deferred flushing */
struct iova_fq_entry {
@@ -113,18 +117,19 @@ struct iova_fq_entry {
/* Per-CPU flush queue structure */
struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned int head, tail;
spinlock_t lock;
+ unsigned int head, tail;
+ unsigned int mod_mask;
+ struct iova_fq_entry entries[];
};
#define fq_ring_for_each(i, fq) \
- for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
+ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
static inline bool fq_full(struct iova_fq *fq)
{
assert_spin_locked(&fq->lock);
- return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
+ return (((fq->tail + 1) & fq->mod_mask) == fq->head);
}
static inline unsigned int fq_ring_add(struct iova_fq *fq)
@@ -133,7 +138,7 @@ static inline unsigned int fq_ring_add(struct iova_fq *fq)
assert_spin_locked(&fq->lock);
- fq->tail = (idx + 1) % IOVA_FQ_SIZE;
+ fq->tail = (idx + 1) & fq->mod_mask;
return idx;
}
@@ -155,7 +160,7 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
- fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
+ fq->head = (fq->head + 1) & fq->mod_mask;
}
}
@@ -240,7 +245,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
if (!atomic_read(&cookie->fq_timer_on) &&
!atomic_xchg(&cookie->fq_timer_on, 1))
mod_timer(&cookie->fq_timer,
- jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+ jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
}
static void iommu_dma_free_fq_single(struct iova_fq *fq)
@@ -279,27 +284,29 @@ static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
iommu_dma_free_fq_percpu(cookie->percpu_fq);
}
-static void iommu_dma_init_one_fq(struct iova_fq *fq)
+static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
{
int i;
fq->head = 0;
fq->tail = 0;
+ fq->mod_mask = fq_size - 1;
spin_lock_init(&fq->lock);
- for (i = 0; i < IOVA_FQ_SIZE; i++)
+ for (i = 0; i < fq_size; i++)
INIT_LIST_HEAD(&fq->entries[i].freelist);
}
static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
{
+ size_t fq_size = cookie->options.fq_size;
struct iova_fq *queue;
- queue = vmalloc(sizeof(*queue));
+ queue = vmalloc(struct_size(queue, entries, fq_size));
if (!queue)
return -ENOMEM;
- iommu_dma_init_one_fq(queue);
+ iommu_dma_init_one_fq(queue, fq_size);
cookie->single_fq = queue;
return 0;
@@ -307,15 +314,17 @@ static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
{
+ size_t fq_size = cookie->options.fq_size;
struct iova_fq __percpu *queue;
int cpu;
- queue = alloc_percpu(struct iova_fq);
+ queue = __alloc_percpu(struct_size(queue, entries, fq_size),
+ __alignof__(*queue));
if (!queue)
return -ENOMEM;
for_each_possible_cpu(cpu)
- iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu));
+ iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
cookie->percpu_fq = queue;
return 0;
}
@@ -635,11 +644,16 @@ static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
static void iommu_dma_init_options(struct iommu_dma_options *options,
struct device *dev)
{
- /* Shadowing IOTLB flushes do better with a single queue */
- if (dev->iommu->shadow_on_flush)
+ /* Shadowing IOTLB flushes do better with a single large queue */
+ if (dev->iommu->shadow_on_flush) {
options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
- else
+ options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
+ options->fq_size = IOVA_SINGLE_FQ_SIZE;
+ } else {
options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
+ options->fq_size = IOVA_DEFAULT_FQ_SIZE;
+ options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
+ }
}
/**