diff options
author | Dmitry Osipenko <digetx@gmail.com> | 2019-11-05 00:56:14 +0300 |
---|---|---|
committer | Chanwoo Choi <cw00.choi@samsung.com> | 2019-11-06 12:04:01 +0900 |
commit | 5c0f6c79595760c9e366c3517314051af530e3e6 (patch) | |
tree | 47f8943363f826ff6882136883d0aa13aafe2cef /drivers/devfreq/devfreq.c | |
parent | 28615e37be96877e5bb3559f566e50a291cf7a05 (diff) |
PM / devfreq: Add new interrupt_driven flag for governors
Currently interrupt-driven governors (like NVIDIA Tegra30 ACTMON governor)
are used to set polling_ms=0 in order to avoid periodic polling of device
status by devfreq core. This means that polling interval can't be changed
by userspace for such governors.
The new governor flag allows interrupt-driven governors to convey that
devfreq core shouldn't perform polling of device status and thus generic
devfreq polling interval could be supported by these governors now.
Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
Reviewed-by: Chanwoo Choi <cw00.choi@samsung.com>
Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
Diffstat (limited to 'drivers/devfreq/devfreq.c')
-rw-r--r-- | drivers/devfreq/devfreq.c | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index d6c3dce9e9d5..f840e61e5a27 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -410,6 +410,9 @@ static void devfreq_monitor(struct work_struct *work) */ void devfreq_monitor_start(struct devfreq *devfreq) { + if (devfreq->governor->interrupt_driven) + return; + INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); if (devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, @@ -427,6 +430,9 @@ EXPORT_SYMBOL(devfreq_monitor_start); */ void devfreq_monitor_stop(struct devfreq *devfreq) { + if (devfreq->governor->interrupt_driven) + return; + cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_stop); @@ -454,6 +460,10 @@ void devfreq_monitor_suspend(struct devfreq *devfreq) devfreq_update_status(devfreq, devfreq->previous_freq); devfreq->stop_polling = true; mutex_unlock(&devfreq->lock); + + if (devfreq->governor->interrupt_driven) + return; + cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_suspend); @@ -474,11 +484,15 @@ void devfreq_monitor_resume(struct devfreq *devfreq) if (!devfreq->stop_polling) goto out; + if (devfreq->governor->interrupt_driven) + goto out_update; + if (!delayed_work_pending(&devfreq->work) && devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); +out_update: devfreq->last_stat_updated = jiffies; devfreq->stop_polling = false; @@ -510,6 +524,9 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) if (devfreq->stop_polling) goto out; + if (devfreq->governor->interrupt_driven) + goto out; + /* if new delay is zero, stop polling */ if (!new_delay) { mutex_unlock(&devfreq->lock); |