diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-04-19 19:45:00 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 19:45:00 +0200 |
commit | 58d6c2d72f8628f39e8689fbde8aa177fcf00a37 (patch) | |
tree | 0be40bd788856b3cabb99ff258561b15a574f2f3 /kernel | |
parent | d19ca30874f2ad343d054e0b5c0576744afeecd4 (diff) |
sched: rt-group: optimize dequeue_rt_stack
Now that the group hierarchy can have an arbitrary depth the O(n^2) nature
of RT task dequeues will really hurt. Optimize this by providing space to
store the tree path, so we can walk it the other way.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_rt.c | 27 |
1 files changed, 11 insertions, 16 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 736fb8fd8977..c2730a5a4f05 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -479,26 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) /* * Because the prio of an upper entry depends on the lower * entries, we must remove entries top - down. - * - * XXX: O(1/2 h^2) because we can only walk up, not down the chain. */ static void dequeue_rt_stack(struct task_struct *p) { - struct sched_rt_entity *rt_se, *top_se; + struct sched_rt_entity *rt_se, *back = NULL; - /* - * dequeue all, top - down. - */ - do { - rt_se = &p->rt; - top_se = NULL; - for_each_sched_rt_entity(rt_se) { - if (on_rt_rq(rt_se)) - top_se = rt_se; - } - if (top_se) - dequeue_rt_entity(top_se); - } while (top_se); + rt_se = &p->rt; + for_each_sched_rt_entity(rt_se) { + rt_se->back = back; + back = rt_se; + } + + for (rt_se = back; rt_se; rt_se = rt_se->back) { + if (on_rt_rq(rt_se)) + dequeue_rt_entity(rt_se); + } } /* |