summaryrefslogtreecommitdiff
path: root/kernel/cgroup/cpuset.c
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2021-08-10 23:06:02 -0400
committerTejun Heo <tj@kernel.org>2021-08-11 08:04:43 -1000
commite7cc9888dc57927f0977d346dab64a5bdfc3da59 (patch)
tree8dfdf14085907d9548698f00e879eeb777a4b937 /kernel/cgroup/cpuset.c
parentb4cc61960879025665a46085cc9b3fa334b34cc8 (diff)
cgroup/cpuset: Enable event notification when partition state changes
A valid cpuset partition can become invalid if all its CPUs are offlined or somehow removed. This can happen through external events without "cpuset.cpus.partition" being touched at all. Users that rely on the property of a partition being present do not currently have a simple way to get such an event notified other than constant periodic polling which is both inefficient and cumbersome. To make life easier for those users, event notification is now enabled for "cpuset.cpus.partition" whenever its state changes. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup/cpuset.c')
-rw-r--r--kernel/cgroup/cpuset.c46
1 files changed, 35 insertions, 11 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 65258102e030..fcc11f2d3b5b 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -160,6 +160,9 @@ struct cpuset {
*/
int use_parent_ecpus;
int child_ecpus_count;
+
+ /* Handle for cpuset.cpus.partition */
+ struct cgroup_file partition_file;
};
/*
@@ -263,6 +266,16 @@ static inline int is_partition_root(const struct cpuset *cs)
return cs->partition_root_state > 0;
}
+/*
+ * Send notification event of whenever partition_root_state changes.
+ */
+static inline void notify_partition_change(struct cpuset *cs,
+ int old_prs, int new_prs)
+{
+ if (old_prs != new_prs)
+ cgroup_file_notify(&cs->partition_file);
+}
+
static struct cpuset top_cpuset = {
.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
(1 << CS_MEM_EXCLUSIVE)),
@@ -1148,7 +1161,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
struct cpuset *parent = parent_cs(cpuset);
int adding; /* Moving cpus from effective_cpus to subparts_cpus */
int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
- int new_prs;
+ int old_prs, new_prs;
bool part_error = false; /* Partition error? */
percpu_rwsem_assert_held(&cpuset_rwsem);
@@ -1184,7 +1197,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
* A cpumask update cannot make parent's effective_cpus become empty.
*/
adding = deleting = false;
- new_prs = cpuset->partition_root_state;
+ old_prs = new_prs = cpuset->partition_root_state;
if (cmd == partcmd_enable) {
cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
adding = true;
@@ -1274,7 +1287,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
parent->subparts_cpus);
}
- if (!adding && !deleting && (new_prs == cpuset->partition_root_state))
+ if (!adding && !deleting && (new_prs == old_prs))
return 0;
/*
@@ -1302,9 +1315,11 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
- if (cpuset->partition_root_state != new_prs)
+ if (old_prs != new_prs)
cpuset->partition_root_state = new_prs;
+
spin_unlock_irq(&callback_lock);
+ notify_partition_change(cpuset, old_prs, new_prs);
return cmd == partcmd_update;
}
@@ -1326,7 +1341,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
bool need_rebuild_sched_domains = false;
- int new_prs;
+ int old_prs, new_prs;
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
@@ -1366,8 +1381,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
* update_tasks_cpumask() again for tasks in the parent
* cpuset if the parent's subparts_cpus changes.
*/
- new_prs = cp->partition_root_state;
- if ((cp != cs) && new_prs) {
+ old_prs = new_prs = cp->partition_root_state;
+ if ((cp != cs) && old_prs) {
switch (parent->partition_root_state) {
case PRS_DISABLED:
/*
@@ -1438,10 +1453,11 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
}
}
- if (new_prs != cp->partition_root_state)
+ if (new_prs != old_prs)
cp->partition_root_state = new_prs;
spin_unlock_irq(&callback_lock);
+ notify_partition_change(cp, old_prs, new_prs);
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
@@ -2023,6 +2039,7 @@ out:
spin_lock_irq(&callback_lock);
cs->partition_root_state = new_prs;
spin_unlock_irq(&callback_lock);
+ notify_partition_change(cs, old_prs, new_prs);
}
free_cpumasks(NULL, &tmpmask);
@@ -2708,6 +2725,7 @@ static struct cftype dfl_files[] = {
.write = sched_partition_write,
.private = FILE_PARTITION_ROOT,
.flags = CFTYPE_NOT_ON_ROOT,
+ .file_offset = offsetof(struct cpuset, partition_file),
},
{
@@ -3103,11 +3121,17 @@ retry:
*/
if ((parent->partition_root_state == PRS_ERROR) ||
cpumask_empty(&new_cpus)) {
+ int old_prs;
+
update_parent_subparts_cpumask(cs, partcmd_disable,
NULL, tmp);
- spin_lock_irq(&callback_lock);
- cs->partition_root_state = PRS_ERROR;
- spin_unlock_irq(&callback_lock);
+ old_prs = cs->partition_root_state;
+ if (old_prs != PRS_ERROR) {
+ spin_lock_irq(&callback_lock);
+ cs->partition_root_state = PRS_ERROR;
+ spin_unlock_irq(&callback_lock);
+ notify_partition_change(cs, old_prs, PRS_ERROR);
+ }
}
cpuset_force_rebuild();
}