summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2022-09-22 14:00:38 -0400
committerPeter Zijlstra <peterz@infradead.org>2022-10-27 11:01:22 +0200
commit8f9ea86fdf99b81458cc21fc1c591fcd4a0fa1f4 (patch)
tree43639cec1cbe2b23961c078f9062ab051019f65a /kernel/sched/sched.h
parent713a2e21a5137e96d2594f53d19784ffde3ddbd0 (diff)
sched: Always preserve the user requested cpumask
Unconditionally preserve the user requested cpumask on sched_setaffinity() calls. This allows using it outside of the fairly narrow restrict_cpus_allowed_ptr() use-case and fix some cpuset issues that currently suffer destruction of cpumasks. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220922180041.1768141-3-longman@redhat.com
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h8
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6c91fb78e04e..04f571df385f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1878,6 +1878,13 @@ static inline void dirty_sched_domain_sysctl(int cpu)
#endif
extern int sched_update_scaling(void);
+
+static inline const struct cpumask *task_user_cpus(struct task_struct *p)
+{
+ if (!p->user_cpus_ptr)
+ return cpu_possible_mask; /* &init_task.cpus_mask */
+ return p->user_cpus_ptr;
+}
#endif /* CONFIG_SMP */
#include "stats.h"
@@ -2147,6 +2154,7 @@ extern const u32 sched_prio_to_wmult[40];
struct affinity_context {
const struct cpumask *new_mask;
+ struct cpumask *user_mask;
unsigned int flags;
};