diff options
author | Peter Zijlstra <peterz@infradead.org> | 2023-06-09 20:41:09 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2023-09-13 15:01:38 +0200 |
commit | fa614b4feb5a246474ac71b45e520a8ddefc809c (patch) | |
tree | d2dfa78b910780e71113746d43e02e5941f8c475 /kernel | |
parent | af7c5763f5e8bc1b3f827354a283ccaf6a8c8098 (diff) |
sched: Simplify sched_move_task()
Use guards to reduce gotos and simplify control flow.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d298176367f7..a3f4fb8a6841 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10437,17 +10437,18 @@ void sched_move_task(struct task_struct *tsk) int queued, running, queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; struct task_group *group; - struct rq_flags rf; struct rq *rq; - rq = task_rq_lock(tsk, &rf); + CLASS(task_rq_lock, rq_guard)(tsk); + rq = rq_guard.rq; + /* * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous * group changes. */ group = sched_get_task_group(tsk); if (group == tsk->sched_task_group) - goto unlock; + return; update_rq_clock(rq); @@ -10472,9 +10473,6 @@ void sched_move_task(struct task_struct *tsk) */ resched_curr(rq); } - -unlock: - task_rq_unlock(rq, tsk, &rf); } static inline struct task_group *css_tg(struct cgroup_subsys_state *css) |