summaryrefslogtreecommitdiff
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c50
1 files changed, 32 insertions, 18 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a85ff75ad531..85163a83df2f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -50,6 +50,8 @@ enum iommu_dma_queue_type {
struct iommu_dma_options {
enum iommu_dma_queue_type qt;
+ size_t fq_size;
+ unsigned int fq_timeout;
};
struct iommu_dma_cookie {
@@ -98,10 +100,12 @@ static int __init iommu_dma_forcedac_setup(char *str)
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
/* Number of entries per flush queue */
-#define IOVA_FQ_SIZE 256
+#define IOVA_DEFAULT_FQ_SIZE 256
+#define IOVA_SINGLE_FQ_SIZE 32768
/* Timeout (in ms) after which entries are flushed from the queue */
-#define IOVA_FQ_TIMEOUT 10
+#define IOVA_DEFAULT_FQ_TIMEOUT 10
+#define IOVA_SINGLE_FQ_TIMEOUT 1000
/* Flush queue entry for deferred flushing */
struct iova_fq_entry {
@@ -113,18 +117,19 @@ struct iova_fq_entry {
/* Per-CPU flush queue structure */
struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned int head, tail;
spinlock_t lock;
+ unsigned int head, tail;
+ unsigned int mod_mask;
+ struct iova_fq_entry entries[];
};
#define fq_ring_for_each(i, fq) \
- for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
+ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
static inline bool fq_full(struct iova_fq *fq)
{
assert_spin_locked(&fq->lock);
- return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
+ return (((fq->tail + 1) & fq->mod_mask) == fq->head);
}
static inline unsigned int fq_ring_add(struct iova_fq *fq)
@@ -133,7 +138,7 @@ static inline unsigned int fq_ring_add(struct iova_fq *fq)
assert_spin_locked(&fq->lock);
- fq->tail = (idx + 1) % IOVA_FQ_SIZE;
+ fq->tail = (idx + 1) & fq->mod_mask;
return idx;
}
@@ -155,7 +160,7 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
- fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
+ fq->head = (fq->head + 1) & fq->mod_mask;
}
}
@@ -240,7 +245,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
if (!atomic_read(&cookie->fq_timer_on) &&
!atomic_xchg(&cookie->fq_timer_on, 1))
mod_timer(&cookie->fq_timer,
- jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+ jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
}
static void iommu_dma_free_fq_single(struct iova_fq *fq)
@@ -279,27 +284,29 @@ static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
iommu_dma_free_fq_percpu(cookie->percpu_fq);
}
-static void iommu_dma_init_one_fq(struct iova_fq *fq)
+static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
{
int i;
fq->head = 0;
fq->tail = 0;
+ fq->mod_mask = fq_size - 1;
spin_lock_init(&fq->lock);
- for (i = 0; i < IOVA_FQ_SIZE; i++)
+ for (i = 0; i < fq_size; i++)
INIT_LIST_HEAD(&fq->entries[i].freelist);
}
static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
{
+ size_t fq_size = cookie->options.fq_size;
struct iova_fq *queue;
- queue = vmalloc(sizeof(*queue));
+ queue = vmalloc(struct_size(queue, entries, fq_size));
if (!queue)
return -ENOMEM;
- iommu_dma_init_one_fq(queue);
+ iommu_dma_init_one_fq(queue, fq_size);
cookie->single_fq = queue;
return 0;
@@ -307,15 +314,17 @@ static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
{
+ size_t fq_size = cookie->options.fq_size;
struct iova_fq __percpu *queue;
int cpu;
- queue = alloc_percpu(struct iova_fq);
+ queue = __alloc_percpu(struct_size(queue, entries, fq_size),
+ __alignof__(*queue));
if (!queue)
return -ENOMEM;
for_each_possible_cpu(cpu)
- iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu));
+ iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
cookie->percpu_fq = queue;
return 0;
}
@@ -635,11 +644,16 @@ static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
static void iommu_dma_init_options(struct iommu_dma_options *options,
struct device *dev)
{
- /* Shadowing IOTLB flushes do better with a single queue */
- if (dev->iommu->shadow_on_flush)
+ /* Shadowing IOTLB flushes do better with a single large queue */
+ if (dev->iommu->shadow_on_flush) {
options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
- else
+ options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
+ options->fq_size = IOVA_SINGLE_FQ_SIZE;
+ } else {
options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
+ options->fq_size = IOVA_DEFAULT_FQ_SIZE;
+ options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
+ }
}
/**