diff options
author | Peter Zijlstra <peterz@infradead.org> | 2021-01-22 13:01:58 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2021-02-17 14:12:42 +0100 |
commit | e59e10f8ef63d42fbb99776a5a112841e798b3b5 (patch) | |
tree | b20fea3e3521b3fc47fc4ab4bc460cd10386b748 /kernel | |
parent | 826bfeb37bb4302ee6042f330c4c0c757152bdb8 (diff) |
sched: Add /debug/sched_preempt
Add a debugfs file to muck about with the preempt mode at runtime.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/YAsGiUYf6NyaTplX@hirez.programming.kicks-ass.net
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 135 |
1 files changed, 126 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0c067176edc5..4a17bb5f28b0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5363,37 +5363,154 @@ EXPORT_STATIC_CALL(preempt_schedule_notrace); * preempt_schedule_notrace <- preempt_schedule_notrace * irqentry_exit_cond_resched <- irqentry_exit_cond_resched */ -static int __init setup_preempt_mode(char *str) + +enum { + preempt_dynamic_none = 0, + preempt_dynamic_voluntary, + preempt_dynamic_full, +}; + +static int preempt_dynamic_mode = preempt_dynamic_full; + +static int sched_dynamic_mode(const char *str) { - if (!strcmp(str, "none")) { + if (!strcmp(str, "none")) + return 0; + + if (!strcmp(str, "voluntary")) + return 1; + + if (!strcmp(str, "full")) + return 2; + + return -1; +} + +static void sched_dynamic_update(int mode) +{ + /* + * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in + * the ZERO state, which is invalid. + */ + static_call_update(cond_resched, __cond_resched); + static_call_update(might_resched, __cond_resched); + static_call_update(preempt_schedule, __preempt_schedule_func); + static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); + static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); + + switch (mode) { + case preempt_dynamic_none: static_call_update(cond_resched, __cond_resched); static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); - pr_info("Dynamic Preempt: %s\n", str); - } else if (!strcmp(str, "voluntary")) { + pr_info("Dynamic Preempt: none\n"); + break; + + case preempt_dynamic_voluntary: static_call_update(cond_resched, __cond_resched); static_call_update(might_resched, __cond_resched); static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); - pr_info("Dynamic Preempt: %s\n", str); - } else if (!strcmp(str, "full")) { + pr_info("Dynamic Preempt: voluntary\n"); + break; + + case preempt_dynamic_full: static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0); static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); static_call_update(preempt_schedule, __preempt_schedule_func); static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); - pr_info("Dynamic Preempt: %s\n", str); - } else { - pr_warn("Dynamic Preempt: Unsupported preempt mode %s, default to full\n", str); + pr_info("Dynamic Preempt: full\n"); + break; + } + + preempt_dynamic_mode = mode; +} + +static int __init setup_preempt_mode(char *str) +{ + int mode = sched_dynamic_mode(str); + if (mode < 0) { + pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); return 1; } + + sched_dynamic_update(mode); return 0; } __setup("preempt=", setup_preempt_mode); +#ifdef CONFIG_SCHED_DEBUG + +static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[16]; + int mode; + + if (cnt > 15) + cnt = 15; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + mode = sched_dynamic_mode(strstrip(buf)); + if (mode < 0) + return mode; + + sched_dynamic_update(mode); + + *ppos += cnt; + + return cnt; +} + +static int sched_dynamic_show(struct seq_file *m, void *v) +{ + static const char * preempt_modes[] = { + "none", "voluntary", "full" + }; + int i; + + for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) { + if (preempt_dynamic_mode == i) + seq_puts(m, "("); + seq_puts(m, preempt_modes[i]); + if (preempt_dynamic_mode == i) + seq_puts(m, ")"); + + seq_puts(m, " "); + } + + seq_puts(m, "\n"); + return 0; +} + +static int sched_dynamic_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, sched_dynamic_show, NULL); +} + +static const struct file_operations sched_dynamic_fops = { + .open = sched_dynamic_open, + .write = sched_dynamic_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static __init int sched_init_debug_dynamic(void) +{ + debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops); + return 0; +} +late_initcall(sched_init_debug_dynamic); + +#endif /* CONFIG_SCHED_DEBUG */ #endif /* CONFIG_PREEMPT_DYNAMIC */ |