diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-04 16:40:11 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-04 16:40:11 -0700 |
commit | 92400b8c8b42e53abb0fcb4ac75cb85d4177a891 (patch) | |
tree | b6c7ef758d1c2b5e32e2483a0dbde7cd23a6d8a0 /arch | |
parent | 31a85cb35c82d686a95f903fdf9a346aba818290 (diff) | |
parent | 1b22fc609cecd1b16c4a015e1a6b3c9717484e3a (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Lots of tidying up changes all across the map for Linux's formal
memory/locking-model tooling, by Alan Stern, Akira Yokosawa, Andrea
Parri, Paul E. McKenney and SeongJae Park.
Notable changes beyond an overall update in the tooling itself is the
tidying up of spin_is_locked() semantics, which spills over into the
kernel proper as well.
- qspinlock improvements: the locking algorithm now guarantees forward
progress whereas the previous implementation in mainline could starve
threads indefinitely in cmpxchg() loops. Also other related cleanups
to the qspinlock code (Will Deacon)
- misc smaller improvements, cleanups and fixes all across the locking
subsystem
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (51 commits)
locking/rwsem: Simplify the is-owner-spinnable checks
tools/memory-model: Add reference for 'Simplifying ARM concurrency'
tools/memory-model: Update ASPLOS information
MAINTAINERS, tools/memory-model: Update e-mail address for Andrea Parri
tools/memory-model: Fix coding style in 'lock.cat'
tools/memory-model: Remove out-of-date comments and code from lock.cat
tools/memory-model: Improve mixed-access checking in lock.cat
tools/memory-model: Improve comments in lock.cat
tools/memory-model: Remove duplicated code from lock.cat
tools/memory-model: Flag "cumulativity" and "propagation" tests
tools/memory-model: Add model support for spin_is_locked()
tools/memory-model: Add scripts to test memory model
tools/memory-model: Fix coding style in 'linux-kernel.def'
tools/memory-model: Model 'smp_store_mb()'
tools/memory-order: Update the cheat-sheet to show that smp_mb__after_atomic() orders later RMW operations
tools/memory-order: Improve key for SELF and SV
tools/memory-model: Fix cheat sheet typo
tools/memory-model: Update required version of herdtools7
tools/memory-model: Redefine rb in terms of rcu-fence
tools/memory-model: Rename link and rcu-path to rcu-link and rb
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/include/asm/spinlock.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/qspinlock.h | 21 | ||||
-rw-r--r-- | arch/x86/include/asm/qspinlock_paravirt.h | 3 |
3 files changed, 10 insertions, 19 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index ebdae15d665d..26c5bd7d88d8 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -122,11 +122,6 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) static inline int arch_spin_is_locked(arch_spinlock_t *lock) { - /* - * Ensure prior spin_lock operations to other locks have completed - * on this CPU before we test whether "lock" is locked. - */ - smp_mb(); /* ^^^ */ return !arch_spin_value_unlocked(READ_ONCE(*lock)); } diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 5e16b5d40d32..3e70bed8a978 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -7,6 +7,14 @@ #include <asm-generic/qspinlock_types.h> #include <asm/paravirt.h> +#define _Q_PENDING_LOOPS (1 << 9) + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); + #define queued_spin_unlock queued_spin_unlock /** * queued_spin_unlock - release a queued spinlock @@ -16,15 +24,9 @@ */ static inline void native_queued_spin_unlock(struct qspinlock *lock) { - smp_store_release((u8 *)lock, 0); + smp_store_release(&lock->locked, 0); } -#ifdef CONFIG_PARAVIRT_SPINLOCKS -extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __pv_init_lock_hash(void); -extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); - static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { pv_queued_spin_lock_slowpath(lock, val); @@ -40,11 +42,6 @@ static inline bool vcpu_is_preempted(long cpu) { return pv_vcpu_is_preempted(cpu); } -#else -static inline void queued_spin_unlock(struct qspinlock *lock) -{ - native_queued_spin_unlock(lock); -} #endif #ifdef CONFIG_PARAVIRT diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h index 923307ea11c7..9ef5ee03d2d7 100644 --- a/arch/x86/include/asm/qspinlock_paravirt.h +++ b/arch/x86/include/asm/qspinlock_paravirt.h @@ -22,8 +22,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath); * * void __pv_queued_spin_unlock(struct qspinlock *lock) * { - * struct __qspinlock *l = (void *)lock; - * u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); + * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0); * * if (likely(lockval == _Q_LOCKED_VAL)) * return; |