diff options
author | Hillf Danton <dhillf@gmail.com> | 2011-06-16 21:55:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-08-14 12:00:48 +0200 |
commit | 311e800e16f63d909136a64ed17ca353a160be59 (patch) | |
tree | c5d65afd1b0abd748ad50dd15bb81434fce9a17a /kernel/sched_rt.c | |
parent | 0835471697255b415edcefd6b1e25b6c034439f2 (diff) |
sched, rt: Fix rq->rt.pushable_tasks bug in push_rt_task()
Do not call dequeue_pushable_task() when failing to push an eligible
task, as it remains pushable, merely not at this particular moment.
Signed-off-by: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Mike Galbraith <mgalbraith@gmx.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Yong Zhang <yong.zhang0@gmail.com>
Link: http://lkml.kernel.org/r/1306895385.4791.26.camel@marge.simson.net
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index e2698c0fc697..8e189455ed12 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1394,6 +1394,7 @@ static int push_rt_task(struct rq *rq) { struct task_struct *next_task; struct rq *lowest_rq; + int ret = 0; if (!rq->rt.overloaded) return 0; @@ -1426,7 +1427,7 @@ retry: if (!lowest_rq) { struct task_struct *task; /* - * find lock_lowest_rq releases rq->lock + * find_lock_lowest_rq releases rq->lock * so it is possible that next_task has migrated. * * We need to make sure that the task is still on the same @@ -1436,12 +1437,11 @@ retry: task = pick_next_pushable_task(rq); if (task_cpu(next_task) == rq->cpu && task == next_task) { /* - * If we get here, the task hasn't moved at all, but - * it has failed to push. We will not try again, - * since the other cpus will pull from us when they - * are ready. + * The task hasn't migrated, and is still the next + * eligible task, but we failed to find a run-queue + * to push it to. Do not retry in this case, since + * other cpus will pull from us when ready. */ - dequeue_pushable_task(rq, next_task); goto out; } @@ -1460,6 +1460,7 @@ retry: deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); + ret = 1; resched_task(lowest_rq->curr); @@ -1468,7 +1469,7 @@ retry: out: put_task_struct(next_task); - return 1; + return ret; } static void push_rt_tasks(struct rq *rq) |