summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorAbel Wu <wuyun.abel@bytedance.com>2022-09-07 19:19:56 +0800
committerPeter Zijlstra <peterz@infradead.org>2022-09-07 21:53:46 +0200
commit3e6efe87cd5ccabf0f1d4e3ef25881ca0fd337e7 (patch)
tree94f321aeaa8b48861875c8d44dad301e5e481d6c /kernel/sched
parent33f93525799fa3c841b2ba93a56b2bb32ab11dc9 (diff)
sched/fair: Remove redundant check in select_idle_smt()
If two cpus share LLC cache, then the two cores they belong to are also in the same LLC domain. Signed-off-by: Abel Wu <wuyun.abel@bytedance.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Josh Don <joshdon@google.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Link: https://lore.kernel.org/r/20220907112000.1854-2-wuyun.abel@bytedance.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index efceb670e755..9657c7de5f57 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6350,14 +6350,11 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
/*
* Scan the local SMT mask for idle CPUs.
*/
-static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static int select_idle_smt(struct task_struct *p, int target)
{
int cpu;
- for_each_cpu(cpu, cpu_smt_mask(target)) {
- if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
- !cpumask_test_cpu(cpu, sched_domain_span(sd)))
- continue;
+ for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
return cpu;
}
@@ -6381,7 +6378,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
return __select_idle_cpu(core, p);
}
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static inline int select_idle_smt(struct task_struct *p, int target)
{
return -1;
}
@@ -6615,7 +6612,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
has_idle_core = test_idle_cores(target, false);
if (!has_idle_core && cpus_share_cache(prev, target)) {
- i = select_idle_smt(p, sd, prev);
+ i = select_idle_smt(p, prev);
if ((unsigned int)i < nr_cpumask_bits)
return i;
}