diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 6 | ||||
-rw-r--r-- | kernel/sched_fair.c | 3 |
2 files changed, 5 insertions, 4 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 28a740151988..8ca1a14cdc8c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1123,6 +1123,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->blocked_on = NULL; /* not blocked yet */ #endif + /* Perform scheduler related setup. Assign this task to a CPU. */ + sched_fork(p, clone_flags); + if ((retval = security_task_alloc(p))) goto bad_fork_cleanup_policy; if ((retval = audit_alloc(p))) @@ -1212,9 +1215,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); - /* Perform scheduler related setup. Assign this task to a CPU. */ - sched_fork(p, clone_flags); - /* Now that the task is set up, run cgroup callbacks if * necessary. We need to run them before the task is visible * on the tasklist. */ diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6c361472cc74..d3c03070872d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1067,8 +1067,9 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) update_curr(cfs_rq); place_entity(cfs_rq, se, 1); + /* 'curr' will be NULL if the child belongs to a different group */ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && - curr->vruntime < se->vruntime) { + curr && curr->vruntime < se->vruntime) { /* * Upon rescheduling, sched_class::put_prev_task() will place * 'current' within the tree based on its new key value. |