diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-17 11:42:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-17 11:42:15 -0700 |
commit | a572ba63298d04e2c5178e2abd82d6bd6e5677e7 (patch) | |
tree | fd0eacfc05ccedb988505d28539d03919ac03349 /kernel | |
parent | 258b16ec9a542d57e78f82e0af0e600bb4aec7fa (diff) | |
parent | 9cc5b7fba57919a7d96f89ce79ba99d1dd1af965 (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core irq updates from Thomas Gleixner:
"Updates from the irq departement:
- Update the interrupt spreading code so it handles numa node with
different CPU counts properly.
- A large overhaul of the ARM GiCv3 driver to support new PPI and SPI
ranges.
- Conversion of all alloc_fwnode() users to use physical addresses
instead of virtual addresses so the virtual addresses are not
leaked. The physical address is sufficient to identify the
associated interrupt chip.
- Add support for Marvel MMP3, Amlogic Meson SM1 interrupt chips.
- Enforce interrupt threading at compile time if RT is enabled.
- Small updates and improvements all over the place"
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
irqchip/gic-v3-its: Fix LPI release for Multi-MSI devices
irqchip/uniphier-aidet: Use devm_platform_ioremap_resource()
irqdomain: Add the missing assignment of domain->fwnode for named fwnode
irqchip/mmp: Coexist with GIC root IRQ controller
irqchip/mmp: Mask off interrupts from other cores
irqchip/mmp: Add missing chained_irq_{enter,exit}()
irqchip/mmp: Do not use of_address_to_resource() to get mux regs
irqchip/meson-gpio: Add support for meson sm1 SoCs
dt-bindings: interrupt-controller: New binding for the meson sm1 SoCs
genirq/affinity: Remove const qualifier from node_to_cpumask argument
genirq/affinity: Spread vectors on node according to nr_cpu ratio
genirq/affinity: Improve __irq_build_affinity_masks()
irqchip: Remove dev_err() usage after platform_get_irq()
irqchip: Add include guard to irq-partition-percpu.h
irqchip/mmp: Do not call irq_set_default_host() on DT platforms
irqchip/gic-v3-its: Remove the redundant set_bit for lpi_map
irqchip/gic-v3: Add quirks for HIP06/07 invalid GICD_TYPER erratum 161010803
irqchip/gic: Skip DT quirks when evaluating IIDR-based quirks
irqchip/gic-v3: Warn about inconsistent implementations of extended ranges
irqchip/gic-v3: Add EPPI range support
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/affinity.c | 231 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 10 | ||||
-rw-r--r-- | kernel/irq/manage.c | 2 |
3 files changed, 208 insertions, 35 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 6fef48033f96..4d89ad4fae3b 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/cpu.h> +#include <linux/sort.h> static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, unsigned int cpus_per_vec) @@ -94,6 +95,155 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, return nodes; } +struct node_vectors { + unsigned id; + + union { + unsigned nvectors; + unsigned ncpus; + }; +}; + +static int ncpus_cmp_func(const void *l, const void *r) +{ + const struct node_vectors *ln = l; + const struct node_vectors *rn = r; + + return ln->ncpus - rn->ncpus; +} + +/* + * Allocate vector number for each node, so that for each node: + * + * 1) the allocated number is >= 1 + * + * 2) the allocated numbver is <= active CPU number of this node + * + * The actual allocated total vectors may be less than @numvecs when + * active total CPU number is less than @numvecs. + * + * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]' + * for each node. + */ +static void alloc_nodes_vectors(unsigned int numvecs, + cpumask_var_t *node_to_cpumask, + const struct cpumask *cpu_mask, + const nodemask_t nodemsk, + struct cpumask *nmsk, + struct node_vectors *node_vectors) +{ + unsigned n, remaining_ncpus = 0; + + for (n = 0; n < nr_node_ids; n++) { + node_vectors[n].id = n; + node_vectors[n].ncpus = UINT_MAX; + } + + for_each_node_mask(n, nodemsk) { + unsigned ncpus; + + cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); + ncpus = cpumask_weight(nmsk); + + if (!ncpus) + continue; + remaining_ncpus += ncpus; + node_vectors[n].ncpus = ncpus; + } + + numvecs = min_t(unsigned, remaining_ncpus, numvecs); + + sort(node_vectors, nr_node_ids, sizeof(node_vectors[0]), + ncpus_cmp_func, NULL); + + /* + * Allocate vectors for each node according to the ratio of this + * node's nr_cpus to remaining un-assigned ncpus. 'numvecs' is + * bigger than number of active numa nodes. Always start the + * allocation from the node with minimized nr_cpus. + * + * This way guarantees that each active node gets allocated at + * least one vector, and the theory is simple: over-allocation + * is only done when this node is assigned by one vector, so + * other nodes will be allocated >= 1 vector, since 'numvecs' is + * bigger than number of numa nodes. + * + * One perfect invariant is that number of allocated vectors for + * each node is <= CPU count of this node: + * + * 1) suppose there are two nodes: A and B + * ncpu(X) is CPU count of node X + * vecs(X) is the vector count allocated to node X via this + * algorithm + * + * ncpu(A) <= ncpu(B) + * ncpu(A) + ncpu(B) = N + * vecs(A) + vecs(B) = V + * + * vecs(A) = max(1, round_down(V * ncpu(A) / N)) + * vecs(B) = V - vecs(A) + * + * both N and V are integer, and 2 <= V <= N, suppose + * V = N - delta, and 0 <= delta <= N - 2 + * + * 2) obviously vecs(A) <= ncpu(A) because: + * + * if vecs(A) is 1, then vecs(A) <= ncpu(A) given + * ncpu(A) >= 1 + * + * otherwise, + * vecs(A) <= V * ncpu(A) / N <= ncpu(A), given V <= N + * + * 3) prove how vecs(B) <= ncpu(B): + * + * if round_down(V * ncpu(A) / N) == 0, vecs(B) won't be + * over-allocated, so vecs(B) <= ncpu(B), + * + * otherwise: + * + * vecs(A) = + * round_down(V * ncpu(A) / N) = + * round_down((N - delta) * ncpu(A) / N) = + * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >= + * round_down((N * ncpu(A) - delta * N) / N) = + * cpu(A) - delta + * + * then: + * + * vecs(A) - V >= ncpu(A) - delta - V + * => + * V - vecs(A) <= V + delta - ncpu(A) + * => + * vecs(B) <= N - ncpu(A) + * => + * vecs(B) <= cpu(B) + * + * For nodes >= 3, it can be thought as one node and another big + * node given that is exactly what this algorithm is implemented, + * and we always re-calculate 'remaining_ncpus' & 'numvecs', and + * finally for each node X: vecs(X) <= ncpu(X). + * + */ + for (n = 0; n < nr_node_ids; n++) { + unsigned nvectors, ncpus; + + if (node_vectors[n].ncpus == UINT_MAX) + continue; + + WARN_ON_ONCE(numvecs == 0); + + ncpus = node_vectors[n].ncpus; + nvectors = max_t(unsigned, 1, + numvecs * ncpus / remaining_ncpus); + WARN_ON_ONCE(nvectors > ncpus); + + node_vectors[n].nvectors = nvectors; + + remaining_ncpus -= ncpus; + numvecs -= nvectors; + } +} + static int __irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs, unsigned int firstvec, @@ -102,10 +252,11 @@ static int __irq_build_affinity_masks(unsigned int startvec, struct cpumask *nmsk, struct irq_affinity_desc *masks) { - unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0; + unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0; unsigned int last_affv = firstvec + numvecs; unsigned int curvec = startvec; nodemask_t nodemsk = NODE_MASK_NONE; + struct node_vectors *node_vectors; if (!cpumask_weight(cpu_mask)) return 0; @@ -126,42 +277,56 @@ static int __irq_build_affinity_masks(unsigned int startvec, return numvecs; } - for_each_node_mask(n, nodemsk) { - unsigned int ncpus, v, vecs_to_assign, vecs_per_node; + node_vectors = kcalloc(nr_node_ids, + sizeof(struct node_vectors), + GFP_KERNEL); + if (!node_vectors) + return -ENOMEM; - /* Spread the vectors per node */ - vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; + /* allocate vector number for each node */ + alloc_nodes_vectors(numvecs, node_to_cpumask, cpu_mask, + nodemsk, nmsk, node_vectors); - /* Get the cpus on this node which are in the mask */ - cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); + for (i = 0; i < nr_node_ids; i++) { + unsigned int ncpus, v; + struct node_vectors *nv = &node_vectors[i]; + + if (nv->nvectors == UINT_MAX) + continue; - /* Calculate the number of cpus per vector */ + /* Get the cpus on this node which are in the mask */ + cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]); ncpus = cpumask_weight(nmsk); - vecs_to_assign = min(vecs_per_node, ncpus); + if (!ncpus) + continue; + + WARN_ON_ONCE(nv->nvectors > ncpus); /* Account for rounding errors */ - extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); + extra_vecs = ncpus - nv->nvectors * (ncpus / nv->nvectors); - for (v = 0; curvec < last_affv && v < vecs_to_assign; - curvec++, v++) { - cpus_per_vec = ncpus / vecs_to_assign; + /* Spread allocated vectors on CPUs of the current node */ + for (v = 0; v < nv->nvectors; v++, curvec++) { + cpus_per_vec = ncpus / nv->nvectors; /* Account for extra vectors to compensate rounding errors */ if (extra_vecs) { cpus_per_vec++; --extra_vecs; } + + /* + * wrapping has to be considered given 'startvec' + * may start anywhere + */ + if (curvec >= last_affv) + curvec = firstvec; irq_spread_init_one(&masks[curvec].mask, nmsk, cpus_per_vec); } - - done += v; - if (done >= numvecs) - break; - if (curvec >= last_affv) - curvec = firstvec; - --nodes; + done += nv->nvectors; } + kfree(node_vectors); return done; } @@ -174,7 +339,7 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs, unsigned int firstvec, struct irq_affinity_desc *masks) { - unsigned int curvec = startvec, nr_present, nr_others; + unsigned int curvec = startvec, nr_present = 0, nr_others = 0; cpumask_var_t *node_to_cpumask; cpumask_var_t nmsk, npresmsk; int ret = -ENOMEM; @@ -189,15 +354,17 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs, if (!node_to_cpumask) goto fail_npresmsk; - ret = 0; /* Stabilize the cpumasks */ get_online_cpus(); build_node_to_cpumask(node_to_cpumask); /* Spread on present CPUs starting from affd->pre_vectors */ - nr_present = __irq_build_affinity_masks(curvec, numvecs, - firstvec, node_to_cpumask, - cpu_present_mask, nmsk, masks); + ret = __irq_build_affinity_masks(curvec, numvecs, firstvec, + node_to_cpumask, cpu_present_mask, + nmsk, masks); + if (ret < 0) + goto fail_build_affinity; + nr_present = ret; /* * Spread on non present CPUs starting from the next vector to be @@ -210,12 +377,16 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs, else curvec = firstvec + nr_present; cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); - nr_others = __irq_build_affinity_masks(curvec, numvecs, - firstvec, node_to_cpumask, - npresmsk, nmsk, masks); + ret = __irq_build_affinity_masks(curvec, numvecs, firstvec, + node_to_cpumask, npresmsk, nmsk, + masks); + if (ret >= 0) + nr_others = ret; + + fail_build_affinity: put_online_cpus(); - if (nr_present < numvecs) + if (ret >= 0) WARN_ON(nr_present + nr_others < numvecs); free_node_to_cpumask(node_to_cpumask); @@ -225,7 +396,7 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs, fail_nmsk: free_cpumask_var(nmsk); - return ret; + return ret < 0 ? ret : 0; } static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs) diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 3078d0e48bba..132672b74e4b 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -31,7 +31,7 @@ struct irqchip_fwid { struct fwnode_handle fwnode; unsigned int type; char *name; - void *data; + phys_addr_t *pa; }; #ifdef CONFIG_GENERIC_IRQ_DEBUGFS @@ -62,7 +62,8 @@ EXPORT_SYMBOL_GPL(irqchip_fwnode_ops); * domain struct. */ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, - const char *name, void *data) + const char *name, + phys_addr_t *pa) { struct irqchip_fwid *fwid; char *n; @@ -77,7 +78,7 @@ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, n = kasprintf(GFP_KERNEL, "%s-%d", name, id); break; default: - n = kasprintf(GFP_KERNEL, "irqchip@%p", data); + n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa); break; } @@ -89,7 +90,7 @@ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, fwid->type = type; fwid->name = n; - fwid->data = data; + fwid->pa = pa; fwid->fwnode.ops = &irqchip_fwnode_ops; return &fwid->fwnode; } @@ -148,6 +149,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, switch (fwid->type) { case IRQCHIP_FWNODE_NAMED: case IRQCHIP_FWNODE_NAMED_ID: + domain->fwnode = fwnode; domain->name = kstrdup(fwid->name, GFP_KERNEL); if (!domain->name) { kfree(domain); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9d50fbe5531a..1753486b440c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -23,7 +23,7 @@ #include "internals.h" -#ifdef CONFIG_IRQ_FORCED_THREADING +#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) __read_mostly bool force_irqthreads; EXPORT_SYMBOL_GPL(force_irqthreads); |