summaryrefslogtreecommitdiff
path: root/kernel/locking
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c26
-rw-r--r--kernel/locking/rtmutex.c3
-rw-r--r--kernel/locking/rtmutex_common.h11
-rw-r--r--kernel/locking/rwsem.c4
-rw-r--r--kernel/locking/rwsem.h8
5 files changed, 31 insertions, 21 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 89b5f83f1969..023386338269 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -556,9 +556,9 @@ static void print_lock(struct held_lock *hlock)
return;
}
+ printk(KERN_CONT "%p", hlock->instance);
print_lock_name(lock_classes + class_idx - 1);
- printk(KERN_CONT ", at: [<%p>] %pS\n",
- (void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
+ printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
}
static void lockdep_print_held_locks(struct task_struct *curr)
@@ -808,7 +808,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (verbose(class)) {
graph_unlock();
- printk("\nnew class %p: %s", class->key, class->name);
+ printk("\nnew class %px: %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
@@ -1407,7 +1407,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
}
printk("%*s }\n", depth, "");
- printk("%*s ... key at: [<%p>] %pS\n",
+ printk("%*s ... key at: [<%px>] %pS\n",
depth, "", class->key, class->key);
}
@@ -2340,7 +2340,7 @@ cache_hit:
if (very_verbose(class)) {
printk("\nhash chain already cached, key: "
- "%016Lx tail class: [%p] %s\n",
+ "%016Lx tail class: [%px] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
}
@@ -2349,7 +2349,7 @@ cache_hit:
}
if (very_verbose(class)) {
- printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
+ printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
(unsigned long long)chain_key, class->key, class->name);
}
@@ -2676,16 +2676,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
void print_irqtrace_events(struct task_struct *curr)
{
printk("irq event stamp: %u\n", curr->irq_events);
- printk("hardirqs last enabled at (%u): [<%p>] %pS\n",
+ printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
(void *)curr->hardirq_enable_ip);
- printk("hardirqs last disabled at (%u): [<%p>] %pS\n",
+ printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
(void *)curr->hardirq_disable_ip);
- printk("softirqs last enabled at (%u): [<%p>] %pS\n",
+ printk("softirqs last enabled at (%u): [<%px>] %pS\n",
curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
(void *)curr->softirq_enable_ip);
- printk("softirqs last disabled at (%u): [<%p>] %pS\n",
+ printk("softirqs last disabled at (%u): [<%px>] %pS\n",
curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
(void *)curr->softirq_disable_ip);
}
@@ -3207,7 +3207,7 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
* Sanity check, the lock-class key must be persistent:
*/
if (!static_obj(key)) {
- printk("BUG: key %p not in .data!\n", key);
+ printk("BUG: key %px not in .data!\n", key);
/*
* What it says above ^^^^^, I suggest you read it.
*/
@@ -3322,7 +3322,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
atomic_inc((atomic_t *)&class->ops);
if (very_verbose(class)) {
- printk("\nacquire class [%p] %s", class->key, class->name);
+ printk("\nacquire class [%px] %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
@@ -4376,7 +4376,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
pr_warn("WARNING: held lock freed!\n");
print_kernel_ident();
pr_warn("-------------------------\n");
- pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
+ pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
print_lock(hlock);
lockdep_print_held_locks(curr);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 940633c63254..4f014be7a4b8 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1268,8 +1268,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
if (unlikely(ret)) {
__set_current_state(TASK_RUNNING);
- if (rt_mutex_has_waiters(lock))
- remove_waiter(lock, &waiter);
+ remove_waiter(lock, &waiter);
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
}
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 68686b3ec3c1..d1d62f942be2 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -52,12 +52,13 @@ static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
static inline struct rt_mutex_waiter *
rt_mutex_top_waiter(struct rt_mutex *lock)
{
- struct rt_mutex_waiter *w;
-
- w = rb_entry(lock->waiters.rb_leftmost,
- struct rt_mutex_waiter, tree_entry);
- BUG_ON(w->lock != lock);
+ struct rb_node *leftmost = rb_first_cached(&lock->waiters);
+ struct rt_mutex_waiter *w = NULL;
+ if (leftmost) {
+ w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
+ BUG_ON(w->lock != lock);
+ }
return w;
}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index f549c552dbf1..30465a2f2b6c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -117,6 +117,7 @@ EXPORT_SYMBOL(down_write_trylock);
void up_read(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ DEBUG_RWSEMS_WARN_ON(sem->owner != RWSEM_READER_OWNED);
__up_read(sem);
}
@@ -129,6 +130,7 @@ EXPORT_SYMBOL(up_read);
void up_write(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ DEBUG_RWSEMS_WARN_ON(sem->owner != current);
rwsem_clear_owner(sem);
__up_write(sem);
@@ -142,6 +144,7 @@ EXPORT_SYMBOL(up_write);
void downgrade_write(struct rw_semaphore *sem)
{
lock_downgrade(&sem->dep_map, _RET_IP_);
+ DEBUG_RWSEMS_WARN_ON(sem->owner != current);
rwsem_set_reader_owned(sem);
__downgrade_write(sem);
@@ -211,6 +214,7 @@ EXPORT_SYMBOL(down_write_killable_nested);
void up_read_non_owner(struct rw_semaphore *sem)
{
+ DEBUG_RWSEMS_WARN_ON(sem->owner != RWSEM_READER_OWNED);
__up_read(sem);
}
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index a883b8f1fdc6..a17cba8d94bb 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -16,6 +16,12 @@
*/
#define RWSEM_READER_OWNED ((struct task_struct *)1UL)
+#ifdef CONFIG_DEBUG_RWSEMS
+# define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
+#else
+# define DEBUG_RWSEMS_WARN_ON(c)
+#endif
+
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* All writes to owner are protected by WRITE_ONCE() to make sure that
@@ -41,7 +47,7 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
* do a write to the rwsem cacheline when it is really necessary
* to minimize cacheline contention.
*/
- if (sem->owner != RWSEM_READER_OWNED)
+ if (READ_ONCE(sem->owner) != RWSEM_READER_OWNED)
WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
}