diff options
author | Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> | 2024-06-11 14:04:20 +0200 |
---|---|---|
committer | Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> | 2024-06-14 09:12:24 +0200 |
commit | 7d4b4c74432d536a4d8139e0504cf41993d5ee6e (patch) | |
tree | ea1671dfa4230bee8ec5877a9822b1da191a9804 /drivers/accel | |
parent | b7ed87ffc7341d108bc0ce14b15f8546eca4aa50 (diff) |
accel/ivpu: Remove suspend_reschedule_counter
Don't retry runtime suspend. It is now expected to succeed on the first
try. After autosuspend_delay passed, FW should already be idle and
ready for warm suspend.
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: Wachowski, Karol <karol.wachowski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240611120433.1012423-4-jacek.lawrynowicz@linux.intel.com
Diffstat (limited to 'drivers/accel')
-rw-r--r-- | drivers/accel/ivpu/ivpu_pm.c | 31 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_pm.h | 3 |
2 files changed, 12 insertions, 22 deletions
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 02b4eac13f8b..9d5f500afd20 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -237,33 +237,28 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct ivpu_device *vdev = to_ivpu_device(drm); - bool hw_is_idle = true; - int ret; + int ret, ret_d0i3; + bool is_idle; drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work)); ivpu_dbg(vdev, PM, "Runtime suspend..\n"); - if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) { - ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n", - vdev->pm->suspend_reschedule_counter); - pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); - vdev->pm->suspend_reschedule_counter--; - return -EAGAIN; - } + is_idle = ivpu_hw_is_idle(vdev); + if (!is_idle) + ivpu_err(vdev, "NPU is not idle before autosuspend\n"); - if (!vdev->pm->suspend_reschedule_counter) - hw_is_idle = false; - else if (ivpu_jsm_pwr_d0i3_enter(vdev)) - hw_is_idle = false; + ret_d0i3 = ivpu_jsm_pwr_d0i3_enter(vdev); + if (ret_d0i3) + ivpu_err(vdev, "Failed to prepare for d0i3: %d\n", ret_d0i3); ret = ivpu_suspend(vdev); if (ret) ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret); - if (!hw_is_idle) { - ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n"); + if (!is_idle || ret_d0i3) { + ivpu_err(vdev, "Forcing cold boot due to previous errors\n"); atomic_inc(&vdev->pm->reset_counter); ivpu_fw_log_dump(vdev); ivpu_pm_prepare_cold_boot(vdev); @@ -271,8 +266,6 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) ivpu_pm_prepare_warm_boot(vdev); } - vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; - ivpu_dbg(vdev, PM, "Runtime suspend done.\n"); return 0; @@ -300,8 +293,7 @@ int ivpu_rpm_get(struct ivpu_device *vdev) int ret; ret = pm_runtime_resume_and_get(vdev->drm.dev); - if (!drm_WARN_ON(&vdev->drm, ret < 0)) - vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; + drm_WARN_ON(&vdev->drm, ret < 0); return ret; } @@ -365,7 +357,6 @@ void ivpu_pm_init(struct ivpu_device *vdev) int delay; pm->vdev = vdev; - pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; init_rwsem(&pm->reset_lock); atomic_set(&pm->reset_pending, 0); diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h index ec60fbeefefc..e524412765be 100644 --- a/drivers/accel/ivpu/ivpu_pm.h +++ b/drivers/accel/ivpu/ivpu_pm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #ifndef __IVPU_PM_H__ @@ -19,7 +19,6 @@ struct ivpu_pm_info { atomic_t reset_counter; atomic_t reset_pending; bool is_warmboot; - u32 suspend_reschedule_counter; }; void ivpu_pm_init(struct ivpu_device *vdev); |