From f8e48a3dca060e80f672d398d181db1298fbc86c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Oct 2020 12:23:02 +0200 Subject: lockdep: Fix preemption WARN for spurious IRQ-enable It is valid (albeit uncommon) to call local_irq_enable() without first having called local_irq_disable(). In this case we enter lockdep_hardirqs_on*() with IRQs enabled and trip a preemption warning for using __this_cpu_read(). Use this_cpu_read() instead to avoid the warning. Fixes: 4d004099a6 ("lockdep: Fix lockdep recursion") Reported-by: syzbot+53f8ce8bbc07924b6417@syzkaller.appspotmail.com Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) --- kernel/locking/lockdep.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 3e99dfef8408..fc206aefa970 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4057,7 +4057,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) if (unlikely(in_nmi())) return; - if (unlikely(__this_cpu_read(lockdep_recursion))) + if (unlikely(this_cpu_read(lockdep_recursion))) return; if (unlikely(lockdep_hardirqs_enabled())) { @@ -4126,7 +4126,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) goto skip_checks; } - if (unlikely(__this_cpu_read(lockdep_recursion))) + if (unlikely(this_cpu_read(lockdep_recursion))) return; if (lockdep_hardirqs_enabled()) { -- cgit v1.2.3-58-ga151 From d48e3850030623e1c20785bceaaf78f916d0b1a3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 26 Oct 2020 16:22:56 +0100 Subject: locking/lockdep: Remove more raw_cpu_read() usage I initially thought raw_cpu_read() was OK, since if it is !0 we have IRQs disabled and can't get migrated, so if we get migrated both CPUs must have 0 and it doesn't matter which 0 we read. And while that is true; it isn't the whole store, on pretty much all architectures (except x86) this can result in computing the address for one CPU, getting migrated, the old CPU continuing execution with another task (possibly setting recursion) and then the new CPU reading the value of the old CPU, which is no longer 0. Similer to: baffd723e44d ("lockdep: Revert "lockdep: Use raw_cpu_*() for per-cpu variables"") Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20201026152256.GB2651@hirez.programming.kicks-ass.net --- kernel/locking/lockdep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index fc206aefa970..11028497d4df 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -84,7 +84,7 @@ static inline bool lockdep_enabled(void) if (!debug_locks) return false; - if (raw_cpu_read(lockdep_recursion)) + if (this_cpu_read(lockdep_recursion)) return false; if (current->lockdep_recursion) -- cgit v1.2.3-58-ga151 From 1a39340865ce505a029b37aeb47a3e4c8db5f6c6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Oct 2020 13:48:34 +0100 Subject: lockdep: Fix nr_unused_locks accounting Chris reported that commit 24d5a3bffef1 ("lockdep: Fix usage_traceoverflow") breaks the nr_unused_locks validation code triggered by /proc/lockdep_stats. By fully splitting LOCK_USED and LOCK_USED_READ it becomes a bad indicator for accounting nr_unused_locks; simplyfy by using any first bit. Fixes: 24d5a3bffef1 ("lockdep: Fix usage_traceoverflow") Reported-by: Chris Wilson Signed-off-by: Peter Zijlstra (Intel) Tested-by: Chris Wilson Link: https://lkml.kernel.org/r/20201027124834.GL2628@hirez.programming.kicks-ass.net --- kernel/locking/lockdep.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'kernel/locking') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 11028497d4df..b71ad8d9f1c9 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4396,6 +4396,9 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, if (unlikely(hlock_class(this)->usage_mask & new_mask)) goto unlock; + if (!hlock_class(this)->usage_mask) + debug_atomic_dec(nr_unused_locks); + hlock_class(this)->usage_mask |= new_mask; if (new_bit < LOCK_TRACE_STATES) { @@ -4403,19 +4406,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, return 0; } - switch (new_bit) { - case 0 ... LOCK_USED-1: + if (new_bit < LOCK_USED) { ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; - break; - - case LOCK_USED: - debug_atomic_dec(nr_unused_locks); - break; - - default: - break; } unlock: -- cgit v1.2.3-58-ga151