diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-16 17:00:50 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-16 17:00:50 -0700 |
commit | 4a996d90b9e046c6d59845acf00a54d464c34ff3 (patch) | |
tree | 52fa410c742aaec6b469a3907e71eefc75e999b3 /include | |
parent | 0c182ac2ebc5470a725632b08cee9a52065bbe71 (diff) | |
parent | db43a609d01e8bf9b812d45dc2945c65b57dd793 (diff) |
Merge tag 'sched-core-2024-07-16' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Update Daniel Bristot de Oliveira's entry in MAINTAINERS,
and credit him in CREDITS
- Harmonize the lock-yielding behavior on dynamically selected
preemption models with static ones
- Reorganize the code a bit: split out sched/syscalls.c to reduce
the size of sched/core.c
- Micro-optimize psi_group_change()
- Fix set_load_weight() for SCHED_IDLE tasks
- Misc cleanups & fixes
* tag 'sched-core-2024-07-16' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Update MAINTAINERS and CREDITS
sched/fair: set_load_weight() must also call reweight_task() for SCHED_IDLE tasks
sched/psi: Optimise psi_group_change a bit
sched/core: Drop spinlocks on contention iff kernel is preemptible
sched/core: Move preempt_model_*() helpers from sched.h to preempt.h
sched/balance: Skip unnecessary updates to idle load balancer's flags
idle: Remove stale RCU comment
sched/headers: Move struct pre-declarations to the beginning of the header
sched/core: Clean up kernel/sched/sched.h a bit
sched/core: Simplify prefetch_curr_exec_start()
sched: Fix spelling in comments
sched/syscalls: Split out kernel/sched/syscalls.c from kernel/sched/core.c
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/preempt.h | 41 | ||||
-rw-r--r-- | include/linux/sched.h | 41 | ||||
-rw-r--r-- | include/linux/spinlock.h | 14 |
3 files changed, 47 insertions, 49 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7233e9cf1bab..ce76f1a45722 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -481,4 +481,45 @@ DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable()) DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) +#ifdef CONFIG_PREEMPT_DYNAMIC + +extern bool preempt_model_none(void); +extern bool preempt_model_voluntary(void); +extern bool preempt_model_full(void); + +#else + +static inline bool preempt_model_none(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_NONE); +} +static inline bool preempt_model_voluntary(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); +} +static inline bool preempt_model_full(void) +{ + return IS_ENABLED(CONFIG_PREEMPT); +} + +#endif + +static inline bool preempt_model_rt(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_RT); +} + +/* + * Does the preemption model allow non-cooperative preemption? + * + * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with + * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the + * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the + * PREEMPT_NONE model. + */ +static inline bool preempt_model_preemptible(void) +{ + return preempt_model_full() || preempt_model_rt(); +} + #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index a5f4b48fca18..76214d7c819d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2064,47 +2064,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock); __cond_resched_rwlock_write(lock); \ }) -#ifdef CONFIG_PREEMPT_DYNAMIC - -extern bool preempt_model_none(void); -extern bool preempt_model_voluntary(void); -extern bool preempt_model_full(void); - -#else - -static inline bool preempt_model_none(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_NONE); -} -static inline bool preempt_model_voluntary(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); -} -static inline bool preempt_model_full(void) -{ - return IS_ENABLED(CONFIG_PREEMPT); -} - -#endif - -static inline bool preempt_model_rt(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_RT); -} - -/* - * Does the preemption model allow non-cooperative preemption? - * - * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with - * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the - * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the - * PREEMPT_NONE model. - */ -static inline bool preempt_model_preemptible(void) -{ - return preempt_model_full() || preempt_model_rt(); -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3fcd20de6ca8..63dd8cf3c3c2 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -462,11 +462,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock) */ static inline int spin_needbreak(spinlock_t *lock) { -#ifdef CONFIG_PREEMPTION + if (!preempt_model_preemptible()) + return 0; + return spin_is_contended(lock); -#else - return 0; -#endif } /* @@ -479,11 +478,10 @@ static inline int spin_needbreak(spinlock_t *lock) */ static inline int rwlock_needbreak(rwlock_t *lock) { -#ifdef CONFIG_PREEMPTION + if (!preempt_model_preemptible()) + return 0; + return rwlock_is_contended(lock); -#else - return 0; -#endif } /* |