From 402de7fc880fef055bc984957454b532987e9ad0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 27 May 2024 16:54:52 +0200 Subject: sched: Fix spelling in comments Do a spell-checking pass. Signed-off-by: Ingo Molnar Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/sched/fair.c') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8a5b1ae0aa55..63113dcb8d1a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -61,7 +61,7 @@ * Options are: * * SCHED_TUNABLESCALING_NONE - unscaled, always *1 - * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) + * SCHED_TUNABLESCALING_LOG - scaled logarithmically, *1+ilog(ncpus) * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus * * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) @@ -8719,7 +8719,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) * topology where each level pairs two lower groups (or better). This results * in O(log n) layers. Furthermore we reduce the number of CPUs going up the * tree to only the first of the previous level and we decrease the frequency - * of load-balance at each level inv. proportional to the number of CPUs in + * of load-balance at each level inversely proportional to the number of CPUs in * the groups. * * This yields: -- cgit v1.2.3-58-ga151 From f90cc919f9e5cbfcd0b952290c57ef1317f4e91e Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Fri, 31 May 2024 13:54:52 -0700 Subject: sched/balance: Skip unnecessary updates to idle load balancer's flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We observed that the overhead on trigger_load_balance(), now renamed sched_balance_trigger(), has risen with a system's core counts. For an OLTP workload running 6.8 kernel on a 2 socket x86 systems having 96 cores/socket, we saw that 0.7% cpu cycles are spent in trigger_load_balance(). On older systems with fewer cores/socket, this function's overhead was less than 0.1%. The cause of this overhead was that there are multiple cpus calling kick_ilb(flags), updating the balancing work needed to a common idle load balancer cpu. The ilb_cpu's flags field got updated unconditionally with atomic_fetch_or(). The atomic read and writes to ilb_cpu's flags causes much cache bouncing and cpu cycles overhead. This is seen in the annotated profile below. kick_ilb(): if (ilb_cpu < 0) test %r14d,%r14d ↑ js 6c flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); mov $0x2d600,%rdi movslq %r14d,%r8 mov %rdi,%rdx add -0x7dd0c3e0(,%r8,8),%rdx arch_atomic_read(): 0.01 mov 0x64(%rdx),%esi 35.58 add $0x64,%rdx arch_atomic_fetch_or(): static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v) { int val = arch_atomic_read(v); do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i)); 0.03 157: mov %r12d,%ecx arch_atomic_try_cmpxchg(): return arch_try_cmpxchg(&v->counter, old, new); 0.00 mov %esi,%eax arch_atomic_fetch_or(): do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i)); or %esi,%ecx arch_atomic_try_cmpxchg(): return arch_try_cmpxchg(&v->counter, old, new); 0.01 lock cmpxchg %ecx,(%rdx) 42.96 ↓ jne 2d2 kick_ilb(): With instrumentation, we found that 81% of the updates do not result in any change in the ilb_cpu's flags. That is, multiple cpus are asking the ilb_cpu to do the same things over and over again, before the ilb_cpu has a chance to run NOHZ load balance. Skip updates to ilb_cpu's flags if no new work needs to be done. Such updates do not change ilb_cpu's NOHZ flags. This requires an extra atomic read but it is less expensive than frequent unnecessary atomic updates that generate cache bounces. We saw that on the OLTP workload, cpu cycles from trigger_load_balance() (or sched_balance_trigger()) got reduced from 0.7% to 0.2%. Signed-off-by: Tim Chen Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Chen Yu Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240531205452.65781-1-tim.c.chen@linux.intel.com --- kernel/sched/fair.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel/sched/fair.c') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 63113dcb8d1a..41b58387023d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11891,6 +11891,13 @@ static void kick_ilb(unsigned int flags) if (ilb_cpu < 0) return; + /* + * Don't bother if no new NOHZ balance work items for ilb_cpu, + * i.e. all bits in flags are already set in ilb_cpu. + */ + if ((atomic_read(nohz_flags(ilb_cpu)) & flags) == flags) + return; + /* * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets * the first flag owns it; cleared by nohz_csd_func(). -- cgit v1.2.3-58-ga151 From d329605287020c3d1c3b0dadc63d8208e7251382 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jun 2024 15:29:58 -1000 Subject: sched/fair: set_load_weight() must also call reweight_task() for SCHED_IDLE tasks When a task's weight is being changed, set_load_weight() is called with @update_load set. As weight changes aren't trivial for the fair class, set_load_weight() calls fair.c::reweight_task() for fair class tasks. However, set_load_weight() first tests task_has_idle_policy() on entry and skips calling reweight_task() for SCHED_IDLE tasks. This is buggy as SCHED_IDLE tasks are just fair tasks with a very low weight and they would incorrectly skip load, vlag and position updates. Fix it by updating reweight_task() to take struct load_weight as idle weight can't be expressed with prio and making set_load_weight() call reweight_task() for SCHED_IDLE tasks too when @update_load is set. Fixes: 9059393e4ec1 ("sched/fair: Use reweight_entity() for set_user_nice()") Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Tejun Heo Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org # v4.15+ Link: http://lkml.kernel.org/r/20240624102331.GI31592@noisy.programming.kicks-ass.net --- kernel/sched/core.c | 23 ++++++++++------------- kernel/sched/fair.c | 7 +++---- kernel/sched/sched.h | 2 +- 3 files changed, 14 insertions(+), 18 deletions(-) (limited to 'kernel/sched/fair.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0935f9d4bb7b..747683487be7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1328,27 +1328,24 @@ int tg_nop(struct task_group *tg, void *data) void set_load_weight(struct task_struct *p, bool update_load) { int prio = p->static_prio - MAX_RT_PRIO; - struct load_weight *load = &p->se.load; + struct load_weight lw; - /* - * SCHED_IDLE tasks get minimal weight: - */ if (task_has_idle_policy(p)) { - load->weight = scale_load(WEIGHT_IDLEPRIO); - load->inv_weight = WMULT_IDLEPRIO; - return; + lw.weight = scale_load(WEIGHT_IDLEPRIO); + lw.inv_weight = WMULT_IDLEPRIO; + } else { + lw.weight = scale_load(sched_prio_to_weight[prio]); + lw.inv_weight = sched_prio_to_wmult[prio]; } /* * SCHED_OTHER tasks have to update their load when changing their * weight */ - if (update_load && p->sched_class == &fair_sched_class) { - reweight_task(p, prio); - } else { - load->weight = scale_load(sched_prio_to_weight[prio]); - load->inv_weight = sched_prio_to_wmult[prio]; - } + if (update_load && p->sched_class == &fair_sched_class) + reweight_task(p, &lw); + else + p->se.load = lw; } #ifdef CONFIG_UCLAMP_TASK diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 41b58387023d..f205e2482690 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3835,15 +3835,14 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, } } -void reweight_task(struct task_struct *p, int prio) +void reweight_task(struct task_struct *p, const struct load_weight *lw) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); struct load_weight *load = &se->load; - unsigned long weight = scale_load(sched_prio_to_weight[prio]); - reweight_entity(cfs_rq, se, weight); - load->inv_weight = sched_prio_to_wmult[prio]; + reweight_entity(cfs_rq, se, lw->weight); + load->inv_weight = lw->inv_weight; } static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 62fd8bc6fd08..9ab53435debd 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2509,7 +2509,7 @@ extern void init_sched_dl_class(void); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); -extern void reweight_task(struct task_struct *p, int prio); +extern void reweight_task(struct task_struct *p, const struct load_weight *lw); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); -- cgit v1.2.3-58-ga151