diff options
author | Waiman Long <longman@redhat.com> | 2023-08-12 19:55:49 -0400 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2023-08-16 14:16:03 +0100 |
commit | 4c1d2f56d685406fc6b452ca5f797bda62a06609 (patch) | |
tree | 3205645f5ef1c130a76a414e1c609f12920d98e3 /drivers/perf | |
parent | 1b0e3ea9301a422003d385cda8f8dee6c878ad05 (diff) |
perf/arm-dmc620: Fix dmc620_pmu_irqs_lock/cpu_hotplug_lock circular lock dependency
The following circular locking dependency was reported when running
cpus online/offline test on an arm64 system.
[ 84.195923] Chain exists of:
dmc620_pmu_irqs_lock --> cpu_hotplug_lock --> cpuhp_state-down
[ 84.207305] Possible unsafe locking scenario:
[ 84.213212] CPU0 CPU1
[ 84.217729] ---- ----
[ 84.222247] lock(cpuhp_state-down);
[ 84.225899] lock(cpu_hotplug_lock);
[ 84.232068] lock(cpuhp_state-down);
[ 84.238237] lock(dmc620_pmu_irqs_lock);
[ 84.242236]
*** DEADLOCK ***
The following locking order happens when dmc620_pmu_get_irq() calls
cpuhp_state_add_instance_nocalls().
lock(dmc620_pmu_irqs_lock) --> lock(cpu_hotplug_lock)
On the other hand, the calling sequence
cpuhp_thread_fun()
=> cpuhp_invoke_callback()
=> dmc620_pmu_cpu_teardown()
leads to the locking sequence
lock(cpuhp_state-down) => lock(dmc620_pmu_irqs_lock)
Here dmc620_pmu_irqs_lock protects both the dmc620_pmu_irqs and the
pmus_node lists in various dmc620_pmu instances. dmc620_pmu_get_irq()
requires protected access to dmc620_pmu_irqs whereas
dmc620_pmu_cpu_teardown() needs protection to the pmus_node lists.
Break this circular locking dependency by using two separate locks to
protect dmc620_pmu_irqs list and the pmus_node lists respectively.
Suggested-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Waiman Long <longman@redhat.com>
Link: https://lore.kernel.org/r/20230812235549.494174-1-longman@redhat.com
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/perf')
-rw-r--r-- | drivers/perf/arm_dmc620_pmu.c | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c index 9d0f01c4455a..30cea6859574 100644 --- a/drivers/perf/arm_dmc620_pmu.c +++ b/drivers/perf/arm_dmc620_pmu.c @@ -66,8 +66,13 @@ #define DMC620_PMU_COUNTERn_OFFSET(n) \ (DMC620_PMU_COUNTERS_BASE + 0x28 * (n)) -static LIST_HEAD(dmc620_pmu_irqs); +/* + * dmc620_pmu_irqs_lock: protects dmc620_pmu_irqs list + * dmc620_pmu_node_lock: protects pmus_node lists in all dmc620_pmu instances + */ static DEFINE_MUTEX(dmc620_pmu_irqs_lock); +static DEFINE_MUTEX(dmc620_pmu_node_lock); +static LIST_HEAD(dmc620_pmu_irqs); struct dmc620_pmu_irq { struct hlist_node node; @@ -475,9 +480,9 @@ static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num) return PTR_ERR(irq); dmc620_pmu->irq = irq; - mutex_lock(&dmc620_pmu_irqs_lock); + mutex_lock(&dmc620_pmu_node_lock); list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node); - mutex_unlock(&dmc620_pmu_irqs_lock); + mutex_unlock(&dmc620_pmu_node_lock); return 0; } @@ -486,9 +491,11 @@ static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu) { struct dmc620_pmu_irq *irq = dmc620_pmu->irq; - mutex_lock(&dmc620_pmu_irqs_lock); + mutex_lock(&dmc620_pmu_node_lock); list_del_rcu(&dmc620_pmu->pmus_node); + mutex_unlock(&dmc620_pmu_node_lock); + mutex_lock(&dmc620_pmu_irqs_lock); if (!refcount_dec_and_test(&irq->refcount)) { mutex_unlock(&dmc620_pmu_irqs_lock); return; @@ -638,10 +645,10 @@ static int dmc620_pmu_cpu_teardown(unsigned int cpu, return 0; /* We're only reading, but this isn't the place to be involving RCU */ - mutex_lock(&dmc620_pmu_irqs_lock); + mutex_lock(&dmc620_pmu_node_lock); list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node) perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target); - mutex_unlock(&dmc620_pmu_irqs_lock); + mutex_unlock(&dmc620_pmu_node_lock); WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target))); irq->cpu = target; |