diff options
author | Kefeng Wang <wangkefeng.wang@huawei.com> | 2022-09-08 20:07:14 +0800 |
---|---|---|
committer | Luis Chamberlain <mcgrof@kernel.org> | 2022-11-20 20:55:26 -0800 |
commit | 0dff89c4488f90c01807d9c12023433703206523 (patch) | |
tree | b44cef4a9e2d024c8570590309956c105dba4299 /kernel/sched | |
parent | 81e7cfa3a9eb4ba6993a9c71772fdab21bc5d870 (diff) |
sched: Move numa_balancing sysctls to its own file
The sysctl_numa_balancing_promote_rate_limit and sysctl_numa_balancing
are part of sched, move them to its own file.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 13 | ||||
-rw-r--r-- | kernel/sched/fair.c | 18 |
2 files changed, 27 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index cb2aa2b54c7a..64f9242328a6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4401,7 +4401,7 @@ static void reset_memory_tiering(void) } } -int sysctl_numa_balancing(struct ctl_table *table, int write, +static int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; @@ -4528,6 +4528,17 @@ static struct ctl_table sched_core_sysctls[] = { .proc_handler = sysctl_sched_uclamp_handler, }, #endif /* CONFIG_UCLAMP_TASK */ +#ifdef CONFIG_NUMA_BALANCING + { + .procname = "numa_balancing", + .data = NULL, /* filled in by handler */ + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sysctl_numa_balancing, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_FOUR, + }, +#endif /* CONFIG_NUMA_BALANCING */ {} }; static int __init sched_core_sysctl_init(void) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e4a0b8bd941c..8e029a6460bb 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -178,6 +178,11 @@ int __weak arch_asym_cpu_priority(int cpu) static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; #endif +#ifdef CONFIG_NUMA_BALANCING +/* Restrict the NUMA promotion throughput (MB/s) for each target node. */ +static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; +#endif + #ifdef CONFIG_SYSCTL static struct ctl_table sched_fair_sysctls[] = { { @@ -197,6 +202,16 @@ static struct ctl_table sched_fair_sysctls[] = { .extra1 = SYSCTL_ONE, }, #endif +#ifdef CONFIG_NUMA_BALANCING + { + .procname = "numa_balancing_promote_rate_limit_MBps", + .data = &sysctl_numa_balancing_promote_rate_limit, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, +#endif /* CONFIG_NUMA_BALANCING */ {} }; @@ -1094,9 +1109,6 @@ unsigned int sysctl_numa_balancing_scan_delay = 1000; /* The page with hint page fault latency < threshold in ms is considered hot */ unsigned int sysctl_numa_balancing_hot_threshold = MSEC_PER_SEC; -/* Restrict the NUMA promotion throughput (MB/s) for each target node. */ -unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; - struct numa_group { refcount_t refcount; |