diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-25 15:17:51 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-25 15:17:51 -0800 |
commit | e4b99d415c3908581d4703203e1e805f043a3e71 (patch) | |
tree | 8f5f88af8e3f583840ec40312466a36ae64a9a30 /kernel | |
parent | d8924c0d76aaa52e4811b5c64115d9a7f36cc73a (diff) | |
parent | c410abbbacb9b378365ba17a30df08b4b9eec64f (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner:
"The interrupt department provides:
Core updates:
- Better spreading to NUMA nodes in the affinity management
- Support for more than one set of interrupts to spread out to allow
separate queues for separate functionality of a single device.
- Decouple the non queue interrupts from being managed. Those are
usually general interrupts for error handling etc. and those should
never be shut down. This also a preparation to utilize the
spreading mechanism for initial spreading of non-managed interrupts
later.
- Make the single CPU target selection in the matrix allocator more
balanced so interrupts won't accumulate on single CPUs in certain
situations.
- A large spell checking patch so we don't end up fixing single typos
over and over.
Driver updates:
- A bunch of new irqchip drivers (RDA8810PL, Madera, imx-irqsteer)
- Updates for the 8MQ, F1C100s platform drivers
- A number of SPDX cleanups
- A workaround for a very broken GICv3 implementation on msm8996
which sports a botched register set.
- A platform-msi fix to prevent memory leakage
- Various cleanups"
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
genirq/affinity: Add is_managed to struct irq_affinity_desc
genirq/core: Introduce struct irq_affinity_desc
genirq/affinity: Remove excess indentation
irqchip/stm32: protect configuration registers with hwspinlock
dt-bindings: interrupt-controller: stm32: Document hwlock properties
irqchip: Add driver for imx-irqsteer controller
dt-bindings/irq: Add binding for Freescale IRQSTEER multiplexer
irqchip: Add driver for Cirrus Logic Madera codecs
genirq: Fix various typos in comments
irqchip/irq-imx-gpcv2: Add IRQCHIP_DECLARE for i.MX8MQ compatible
irqchip/irq-rda-intc: Fix return value check in rda8810_intc_init()
irqchip/irq-imx-gpcv2: Silence "fall through" warning
irqchip/gic-v3: Add quirk for msm8996 broken registers
irqchip/gic: Add support to device tree based quirks
dt-bindings/gic-v3: Add msm8996 compatible string
irqchip/sun4i: Add support for Allwinner ARMv5 F1C100s
irqchip/sun4i: Move IC specific register offsets to struct
irqchip/sun4i: Add a struct to hold global variables
dt-bindings: interrupt-controller: Add suniv interrupt-controller
irqchip: Add RDA8810PL interrupt driver
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/affinity.c | 176 | ||||
-rw-r--r-- | kernel/irq/chip.c | 2 | ||||
-rw-r--r-- | kernel/irq/devres.c | 4 | ||||
-rw-r--r-- | kernel/irq/ipi.c | 4 | ||||
-rw-r--r-- | kernel/irq/irq_sim.c | 23 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 28 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 4 | ||||
-rw-r--r-- | kernel/irq/manage.c | 2 | ||||
-rw-r--r-- | kernel/irq/matrix.c | 34 | ||||
-rw-r--r-- | kernel/irq/msi.c | 8 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 6 |
11 files changed, 199 insertions, 92 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index f4f29b9d90ee..45b68b4ea48b 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -94,15 +94,15 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, return nodes; } -static int irq_build_affinity_masks(const struct irq_affinity *affd, - int startvec, int numvecs, - cpumask_var_t *node_to_cpumask, - const struct cpumask *cpu_mask, - struct cpumask *nmsk, - struct cpumask *masks) +static int __irq_build_affinity_masks(const struct irq_affinity *affd, + int startvec, int numvecs, int firstvec, + cpumask_var_t *node_to_cpumask, + const struct cpumask *cpu_mask, + struct cpumask *nmsk, + struct irq_affinity_desc *masks) { int n, nodes, cpus_per_vec, extra_vecs, done = 0; - int last_affv = affd->pre_vectors + numvecs; + int last_affv = firstvec + numvecs; int curvec = startvec; nodemask_t nodemsk = NODE_MASK_NONE; @@ -117,12 +117,13 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, */ if (numvecs <= nodes) { for_each_node_mask(n, nodemsk) { - cpumask_copy(masks + curvec, node_to_cpumask[n]); - if (++done == numvecs) - break; + cpumask_or(&masks[curvec].mask, + &masks[curvec].mask, + node_to_cpumask[n]); if (++curvec == last_affv) - curvec = affd->pre_vectors; + curvec = firstvec; } + done = numvecs; goto out; } @@ -130,7 +131,7 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, int ncpus, v, vecs_to_assign, vecs_per_node; /* Spread the vectors per node */ - vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes; + vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; /* Get the cpus on this node which are in the mask */ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); @@ -151,14 +152,15 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, cpus_per_vec++; --extra_vecs; } - irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); + irq_spread_init_one(&masks[curvec].mask, nmsk, + cpus_per_vec); } done += v; if (done >= numvecs) break; if (curvec >= last_affv) - curvec = affd->pre_vectors; + curvec = firstvec; --nodes; } @@ -166,20 +168,77 @@ out: return done; } +/* + * build affinity in two stages: + * 1) spread present CPU on these vectors + * 2) spread other possible CPUs on these vectors + */ +static int irq_build_affinity_masks(const struct irq_affinity *affd, + int startvec, int numvecs, int firstvec, + cpumask_var_t *node_to_cpumask, + struct irq_affinity_desc *masks) +{ + int curvec = startvec, nr_present, nr_others; + int ret = -ENOMEM; + cpumask_var_t nmsk, npresmsk; + + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) + return ret; + + if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) + goto fail; + + ret = 0; + /* Stabilize the cpumasks */ + get_online_cpus(); + build_node_to_cpumask(node_to_cpumask); + + /* Spread on present CPUs starting from affd->pre_vectors */ + nr_present = __irq_build_affinity_masks(affd, curvec, numvecs, + firstvec, node_to_cpumask, + cpu_present_mask, nmsk, masks); + + /* + * Spread on non present CPUs starting from the next vector to be + * handled. If the spreading of present CPUs already exhausted the + * vector space, assign the non present CPUs to the already spread + * out vectors. + */ + if (nr_present >= numvecs) + curvec = firstvec; + else + curvec = firstvec + nr_present; + cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); + nr_others = __irq_build_affinity_masks(affd, curvec, numvecs, + firstvec, node_to_cpumask, + npresmsk, nmsk, masks); + put_online_cpus(); + + if (nr_present < numvecs) + WARN_ON(nr_present + nr_others < numvecs); + + free_cpumask_var(npresmsk); + + fail: + free_cpumask_var(nmsk); + return ret; +} + /** * irq_create_affinity_masks - Create affinity masks for multiqueue spreading * @nvecs: The total number of vectors * @affd: Description of the affinity requirements * - * Returns the masks pointer or NULL if allocation failed. + * Returns the irq_affinity_desc pointer or NULL if allocation failed. */ -struct cpumask * +struct irq_affinity_desc * irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) { int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; int curvec, usedvecs; - cpumask_var_t nmsk, npresmsk, *node_to_cpumask; - struct cpumask *masks = NULL; + cpumask_var_t *node_to_cpumask; + struct irq_affinity_desc *masks = NULL; + int i, nr_sets; /* * If there aren't any vectors left after applying the pre/post @@ -188,15 +247,9 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) if (nvecs == affd->pre_vectors + affd->post_vectors) return NULL; - if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) - return NULL; - - if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) - goto outcpumsk; - node_to_cpumask = alloc_node_to_cpumask(); if (!node_to_cpumask) - goto outnpresmsk; + return NULL; masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); if (!masks) @@ -204,32 +257,29 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) /* Fill out vectors at the beginning that don't need affinity */ for (curvec = 0; curvec < affd->pre_vectors; curvec++) - cpumask_copy(masks + curvec, irq_default_affinity); - - /* Stabilize the cpumasks */ - get_online_cpus(); - build_node_to_cpumask(node_to_cpumask); - - /* Spread on present CPUs starting from affd->pre_vectors */ - usedvecs = irq_build_affinity_masks(affd, curvec, affvecs, - node_to_cpumask, cpu_present_mask, - nmsk, masks); - + cpumask_copy(&masks[curvec].mask, irq_default_affinity); /* - * Spread on non present CPUs starting from the next vector to be - * handled. If the spreading of present CPUs already exhausted the - * vector space, assign the non present CPUs to the already spread - * out vectors. + * Spread on present CPUs starting from affd->pre_vectors. If we + * have multiple sets, build each sets affinity mask separately. */ - if (usedvecs >= affvecs) - curvec = affd->pre_vectors; - else - curvec = affd->pre_vectors + usedvecs; - cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); - usedvecs += irq_build_affinity_masks(affd, curvec, affvecs, - node_to_cpumask, npresmsk, - nmsk, masks); - put_online_cpus(); + nr_sets = affd->nr_sets; + if (!nr_sets) + nr_sets = 1; + + for (i = 0, usedvecs = 0; i < nr_sets; i++) { + int this_vecs = affd->sets ? affd->sets[i] : affvecs; + int ret; + + ret = irq_build_affinity_masks(affd, curvec, this_vecs, + curvec, node_to_cpumask, masks); + if (ret) { + kfree(masks); + masks = NULL; + goto outnodemsk; + } + curvec += this_vecs; + usedvecs += this_vecs; + } /* Fill out vectors at the end that don't need affinity */ if (usedvecs >= affvecs) @@ -237,14 +287,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) else curvec = affd->pre_vectors + usedvecs; for (; curvec < nvecs; curvec++) - cpumask_copy(masks + curvec, irq_default_affinity); + cpumask_copy(&masks[curvec].mask, irq_default_affinity); + + /* Mark the managed interrupts */ + for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++) + masks[i].is_managed = 1; outnodemsk: free_node_to_cpumask(node_to_cpumask); -outnpresmsk: - free_cpumask_var(npresmsk); -outcpumsk: - free_cpumask_var(nmsk); return masks; } @@ -258,13 +308,21 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity { int resv = affd->pre_vectors + affd->post_vectors; int vecs = maxvec - resv; - int ret; + int set_vecs; if (resv > minvec) return 0; - get_online_cpus(); - ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv; - put_online_cpus(); - return ret; + if (affd->nr_sets) { + int i; + + for (i = 0, set_vecs = 0; i < affd->nr_sets; i++) + set_vecs += affd->sets[i]; + } else { + get_online_cpus(); + set_vecs = cpumask_weight(cpu_possible_mask); + put_online_cpus(); + } + + return resv + min(set_vecs, vecs); } diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index a2b3d9de999c..34e969069488 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -929,7 +929,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, break; /* * Bail out if the outer chip is not set up - * and the interrrupt supposed to be started + * and the interrupt supposed to be started * right away. */ if (WARN_ON(is_chained)) diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index 6a682c229e10..5d5378ea0afe 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c @@ -169,7 +169,7 @@ static void devm_irq_desc_release(struct device *dev, void *res) * @cnt: Number of consecutive irqs to allocate * @node: Preferred node on which the irq descriptor should be allocated * @owner: Owning module (can be NULL) - * @affinity: Optional pointer to an affinity mask array of size @cnt + * @affinity: Optional pointer to an irq_affinity_desc array of size @cnt * which hints where the irq descriptors should be allocated * and which default affinities to use * @@ -179,7 +179,7 @@ static void devm_irq_desc_release(struct device *dev, void *res) */ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, - const struct cpumask *affinity) + const struct irq_affinity_desc *affinity) { struct irq_desc_devres *dr; int base; diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index 8b778e37dc6d..43e3d1be622c 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c @@ -56,7 +56,7 @@ int irq_reserve_ipi(struct irq_domain *domain, unsigned int next; /* - * The IPI requires a seperate HW irq on each CPU. We require + * The IPI requires a separate HW irq on each CPU. We require * that the destination mask is consecutive. If an * implementation needs to support holes, it can reserve * several IPI ranges. @@ -172,7 +172,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) /* * Get the real hardware irq number if the underlying implementation - * uses a seperate irq per cpu. If the underlying implementation uses + * uses a separate irq per cpu. If the underlying implementation uses * a single hardware irq for all cpus then the IPI send mechanism * needs to take care of the cpu destinations. */ diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c index dd20d0d528d4..98a20e1594ce 100644 --- a/kernel/irq/irq_sim.c +++ b/kernel/irq/irq_sim.c @@ -34,9 +34,20 @@ static struct irq_chip irq_sim_irqchip = { static void irq_sim_handle_irq(struct irq_work *work) { struct irq_sim_work_ctx *work_ctx; + unsigned int offset = 0; + struct irq_sim *sim; + int irqnum; work_ctx = container_of(work, struct irq_sim_work_ctx, work); - handle_simple_irq(irq_to_desc(work_ctx->irq)); + sim = container_of(work_ctx, struct irq_sim, work_ctx); + + while (!bitmap_empty(work_ctx->pending, sim->irq_count)) { + offset = find_next_bit(work_ctx->pending, + sim->irq_count, offset); + clear_bit(offset, work_ctx->pending); + irqnum = irq_sim_irqnum(sim, offset); + handle_simple_irq(irq_to_desc(irqnum)); + } } /** @@ -63,6 +74,13 @@ int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs) return sim->irq_base; } + sim->work_ctx.pending = bitmap_zalloc(num_irqs, GFP_KERNEL); + if (!sim->work_ctx.pending) { + kfree(sim->irqs); + irq_free_descs(sim->irq_base, num_irqs); + return -ENOMEM; + } + for (i = 0; i < num_irqs; i++) { sim->irqs[i].irqnum = sim->irq_base + i; sim->irqs[i].enabled = false; @@ -89,6 +107,7 @@ EXPORT_SYMBOL_GPL(irq_sim_init); void irq_sim_fini(struct irq_sim *sim) { irq_work_sync(&sim->work_ctx.work); + bitmap_free(sim->work_ctx.pending); irq_free_descs(sim->irq_base, sim->irq_count); kfree(sim->irqs); } @@ -143,7 +162,7 @@ EXPORT_SYMBOL_GPL(devm_irq_sim_init); void irq_sim_fire(struct irq_sim *sim, unsigned int offset) { if (sim->irqs[offset].enabled) { - sim->work_ctx.irq = irq_sim_irqnum(sim, offset); + set_bit(offset, sim->work_ctx.pending); irq_work_queue(&sim->work_ctx.work); } } diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 578d0e5f1b5b..ee062b7939d3 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -449,30 +449,34 @@ static void free_desc(unsigned int irq) } static int alloc_descs(unsigned int start, unsigned int cnt, int node, - const struct cpumask *affinity, struct module *owner) + const struct irq_affinity_desc *affinity, + struct module *owner) { - const struct cpumask *mask = NULL; struct irq_desc *desc; - unsigned int flags; int i; /* Validate affinity mask(s) */ if (affinity) { - for (i = 0, mask = affinity; i < cnt; i++, mask++) { - if (cpumask_empty(mask)) + for (i = 0; i < cnt; i++, i++) { + if (cpumask_empty(&affinity[i].mask)) return -EINVAL; } } - flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0; - mask = NULL; - for (i = 0; i < cnt; i++) { + const struct cpumask *mask = NULL; + unsigned int flags = 0; + if (affinity) { - node = cpu_to_node(cpumask_first(affinity)); - mask = affinity; + if (affinity->is_managed) { + flags = IRQD_AFFINITY_MANAGED | + IRQD_MANAGED_SHUTDOWN; + } + mask = &affinity->mask; + node = cpu_to_node(cpumask_first(mask)); affinity++; } + desc = alloc_desc(start + i, node, flags, mask, owner); if (!desc) goto err; @@ -575,7 +579,7 @@ static void free_desc(unsigned int irq) } static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, - const struct cpumask *affinity, + const struct irq_affinity_desc *affinity, struct module *owner) { u32 i; @@ -705,7 +709,7 @@ EXPORT_SYMBOL_GPL(irq_free_descs); */ int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner, const struct cpumask *affinity) + struct module *owner, const struct irq_affinity_desc *affinity) { int start, ret; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 3366d11c3e02..8b0be4bd6565 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -969,7 +969,7 @@ const struct irq_domain_ops irq_domain_simple_ops = { EXPORT_SYMBOL_GPL(irq_domain_simple_ops); int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, - int node, const struct cpumask *affinity) + int node, const struct irq_affinity_desc *affinity) { unsigned int hint; @@ -1281,7 +1281,7 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, */ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, - bool realloc, const struct cpumask *affinity) + bool realloc, const struct irq_affinity_desc *affinity) { int i, ret, virq; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9dbdccab3b6a..a4888ce4667a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -915,7 +915,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } #endif /* - * Interrupts which are not explicitely requested as threaded + * Interrupts which are not explicitly requested as threaded * interrupts rely on the implicit bh/preempt disable of the hard irq * context. So we need to disable bh here to avoid deadlocks and other * side effects. diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 1f0985adf193..30cc217b8631 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -14,6 +14,7 @@ struct cpumap { unsigned int available; unsigned int allocated; unsigned int managed; + unsigned int managed_allocated; bool initialized; bool online; unsigned long alloc_map[IRQ_MATRIX_SIZE]; @@ -145,6 +146,27 @@ static unsigned int matrix_find_best_cpu(struct irq_matrix *m, return best_cpu; } +/* Find the best CPU which has the lowest number of managed IRQs allocated */ +static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m, + const struct cpumask *msk) +{ + unsigned int cpu, best_cpu, allocated = UINT_MAX; + struct cpumap *cm; + + best_cpu = UINT_MAX; + + for_each_cpu(cpu, msk) { + cm = per_cpu_ptr(m->maps, cpu); + + if (!cm->online || cm->managed_allocated > allocated) + continue; + + best_cpu = cpu; + allocated = cm->managed_allocated; + } + return best_cpu; +} + /** * irq_matrix_assign_system - Assign system wide entry in the matrix * @m: Matrix pointer @@ -269,7 +291,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, if (cpumask_empty(msk)) return -EINVAL; - cpu = matrix_find_best_cpu(m, msk); + cpu = matrix_find_best_cpu_managed(m, msk); if (cpu == UINT_MAX) return -ENOSPC; @@ -282,6 +304,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, return -ENOSPC; set_bit(bit, cm->alloc_map); cm->allocated++; + cm->managed_allocated++; m->total_allocated++; *mapped_cpu = cpu; trace_irq_matrix_alloc_managed(bit, cpu, m, cm); @@ -395,6 +418,8 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, clear_bit(bit, cm->alloc_map); cm->allocated--; + if(managed) + cm->managed_allocated--; if (cm->online) m->total_allocated--; @@ -464,13 +489,14 @@ void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind) seq_printf(sf, "Total allocated: %6u\n", m->total_allocated); seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits, m->system_map); - seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " "); + seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " "); cpus_read_lock(); for_each_online_cpu(cpu) { struct cpumap *cm = per_cpu_ptr(m->maps, cpu); - seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ", - cpu, cm->available, cm->managed, cm->allocated, + seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ", + cpu, cm->available, cm->managed, + cm->managed_allocated, cm->allocated, m->matrix_bits, cm->alloc_map); } cpus_read_unlock(); diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 4ca2fd46645d..ad26fbcfbfc8 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -23,11 +23,11 @@ * @nvec: The number of vectors used in this entry * @affinity: Optional pointer to an affinity mask array size of @nvec * - * If @affinity is not NULL then a an affinity array[@nvec] is allocated - * and the affinity masks from @affinity are copied. + * If @affinity is not NULL then an affinity array[@nvec] is allocated + * and the affinity masks and flags from @affinity are copied. */ -struct msi_desc * -alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity) +struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, + const struct irq_affinity_desc *affinity) { struct msi_desc *desc; diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index d867d6ddafdd..6d2fa6914b30 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -66,7 +66,7 @@ static int try_one_irq(struct irq_desc *desc, bool force) raw_spin_lock(&desc->lock); /* - * PER_CPU, nested thread interrupts and interrupts explicitely + * PER_CPU, nested thread interrupts and interrupts explicitly * marked polled are excluded from polling. */ if (irq_settings_is_per_cpu(desc) || @@ -76,7 +76,7 @@ static int try_one_irq(struct irq_desc *desc, bool force) /* * Do not poll disabled interrupts unless the spurious - * disabled poller asks explicitely. + * disabled poller asks explicitly. */ if (irqd_irq_disabled(&desc->irq_data) && !force) goto out; @@ -292,7 +292,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) * So in case a thread is woken, we just note the fact and * defer the analysis to the next hardware interrupt. * - * The threaded handlers store whether they sucessfully + * The threaded handlers store whether they successfully * handled an interrupt and we check whether that number * changed versus the last invocation. * |