diff options
author | Tejun Heo <tj@kernel.org> | 2024-09-26 12:56:46 -1000 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2024-09-26 12:56:46 -1000 |
commit | 6f34d8d382d64e7d8e77f5a9ddfd06f4c04937b0 (patch) | |
tree | 4ba0d5e4b480765329f359d41383e93ffbc5e578 /kernel/sched | |
parent | b7b3b2dbae73b412c2d24b3d0ebf1110991e4510 (diff) |
sched_ext: Use shorter slice while bypassing
While bypassing, tasks are scheduled in FIFO order which favors tasks that
hog CPUs. This can slow down e.g. unloading of the BPF scheduler. While
bypassing, guaranteeing timely forward progress is the main goal. There's no
point in giving long slices. Shorten the time slice used while bypassing
from 20ms to 5ms.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David Vernet <void@manifault.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/ext.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 949a3c43000a..d6f6bf6caecc 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -9,6 +9,7 @@ #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void))) enum scx_consts { + SCX_SLICE_BYPASS = SCX_SLICE_DFL / 4, SCX_DSP_DFL_MAX_BATCH = 32, SCX_DSP_MAX_LOOPS = 32, SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, @@ -1944,6 +1945,7 @@ static bool scx_rq_online(struct rq *rq) static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, int sticky_cpu) { + bool bypassing = scx_rq_bypassing(rq); struct task_struct **ddsp_taskp; unsigned long qseq; @@ -1961,7 +1963,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, if (!scx_rq_online(rq)) goto local; - if (scx_rq_bypassing(rq)) + if (bypassing) goto global; if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) @@ -2016,7 +2018,7 @@ local_norefill: global: touch_core_sched(rq, p); /* see the comment in local: */ - p->scx.slice = SCX_SLICE_DFL; + p->scx.slice = bypassing ? SCX_SLICE_BYPASS : SCX_SLICE_DFL; dispatch_enqueue(find_global_dsq(p), p, enq_flags); } |