summaryrefslogtreecommitdiff
path: root/drivers/base/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2024-01-29 17:27:33 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2024-02-05 14:28:54 +0100
commita4b64b893428734fc872a162ca7d92aa1ca1c429 (patch)
tree71c95c5c8f17e100f5c2cc7f6ef45eff04a6d8a9 /drivers/base/power
parentac6f87aaa26fa88680db23051a5d2b13a08d13b8 (diff)
PM: sleep: Move some assignments from under a lock
The async_error and pm_transition variables are set under dpm_list_mtx in multiple places in the system-wide device PM core code, which is unnecessary and confusing, so rearrange the code so that the variables in question are set before acquiring the lock. While at it, add some empty code lines around locking to improve the consistency of the code. No intentional functional impact. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/main.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 892d706af353..89a3ddd642c9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -707,9 +707,9 @@ static void dpm_noirq_resume_devices(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
async_error = 0;
+ pm_transition = state;
mutex_lock(&dpm_list_mtx);
- pm_transition = state;
/*
* Trigger the resume of "async" devices upfront so they don't have to
@@ -847,9 +847,9 @@ void dpm_resume_early(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
async_error = 0;
+ pm_transition = state;
mutex_lock(&dpm_list_mtx);
- pm_transition = state;
/*
* Trigger the resume of "async" devices upfront so they don't have to
@@ -1012,10 +1012,11 @@ void dpm_resume(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -1294,10 +1295,12 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
@@ -1320,7 +1323,9 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
@@ -1470,11 +1475,14 @@ int dpm_suspend_late(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
- wake_up_all_idle_cpus();
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ wake_up_all_idle_cpus();
+
+ mutex_lock(&dpm_list_mtx);
+
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
@@ -1498,7 +1506,9 @@ int dpm_suspend_late(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
@@ -1745,9 +1755,11 @@ int dpm_suspend(pm_message_t state)
devfreq_suspend();
cpufreq_suspend();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+
+ mutex_lock(&dpm_list_mtx);
+
while (!list_empty(&dpm_prepared_list)) {
struct device *dev = to_device(dpm_prepared_list.prev);
@@ -1771,7 +1783,9 @@ int dpm_suspend(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;