diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-11-09 22:39:39 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-11-09 22:39:39 +0100 |
commit | 3e3e13f399ac8060a20d14d210a28dc02dda372e (patch) | |
tree | b560a614e926f5f90e4096b6d4743b1b5fdfccb2 | |
parent | 52d3da1ad4f442cec877fbeb83902707b56da0cf (diff) |
sched: remove PREEMPT_RESTRICT
remove PREEMPT_RESTRICT. (this is a separate commit so that any
regression related to the removal itself is bisectable)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/sched_fair.c | 10 |
3 files changed, 3 insertions, 12 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 951759e30c09..93fd30d6dac4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -863,7 +863,6 @@ struct sched_entity { struct load_weight load; /* for load-balancing */ struct rb_node run_node; unsigned int on_rq; - int peer_preempt; u64 exec_start; u64 sum_exec_runtime; diff --git a/kernel/sched.c b/kernel/sched.c index 4b23dfb4c80f..2a107e4ad5ed 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -460,7 +460,6 @@ enum { SCHED_FEAT_TREE_AVG = 4, SCHED_FEAT_APPROX_AVG = 8, SCHED_FEAT_WAKEUP_PREEMPT = 16, - SCHED_FEAT_PREEMPT_RESTRICT = 32, }; const_debug unsigned int sysctl_sched_features = @@ -468,8 +467,7 @@ const_debug unsigned int sysctl_sched_features = SCHED_FEAT_START_DEBIT * 1 | SCHED_FEAT_TREE_AVG * 0 | SCHED_FEAT_APPROX_AVG * 0 | - SCHED_FEAT_WAKEUP_PREEMPT * 1 | - SCHED_FEAT_PREEMPT_RESTRICT * 0; + SCHED_FEAT_WAKEUP_PREEMPT * 1; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7264814ba62a..fbcb426029d0 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -546,7 +546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) update_stats_dequeue(cfs_rq, se); if (sleep) { - se->peer_preempt = 0; #ifdef CONFIG_SCHEDSTATS if (entity_is_task(se)) { struct task_struct *tsk = task_of(se); @@ -574,10 +573,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime || - (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt)) + if (delta_exec > ideal_runtime) resched_task(rq_of(cfs_rq)->curr); - curr->peer_preempt = 0; } static void @@ -867,9 +864,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) gran = calc_delta_fair(gran, &se->load); if (delta > gran) { - int now = !sched_feat(PREEMPT_RESTRICT); - - if (now || p->prio < curr->prio || !se->peer_preempt++) + if (p->prio < curr->prio) resched_task(curr); } } @@ -1083,7 +1078,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) swap(curr->vruntime, se->vruntime); } - se->peer_preempt = 0; enqueue_task_fair(rq, p, 0); resched_task(rq->curr); } |