diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/power/common.c | 21 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 33 | ||||
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 6 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt-platdev.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 14 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 76 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 6 | ||||
-rw-r--r-- | drivers/cpufreq/pmac32-cpufreq.c | 7 | ||||
-rw-r--r-- | drivers/cpufreq/qcom-cpufreq-nvmem.c | 208 | ||||
-rw-r--r-- | drivers/cpufreq/tegra194-cpufreq.c | 153 | ||||
-rw-r--r-- | drivers/cpufreq/ti-cpufreq.c | 1 | ||||
-rw-r--r-- | drivers/devfreq/devfreq.c | 14 | ||||
-rw-r--r-- | drivers/devfreq/event/exynos-ppmu.c | 13 | ||||
-rw-r--r-- | drivers/devfreq/event/rockchip-dfi.c | 814 | ||||
-rw-r--r-- | drivers/devfreq/mtk-cci-devfreq.c | 9 | ||||
-rw-r--r-- | drivers/devfreq/rk3399_dmc.c | 10 | ||||
-rw-r--r-- | drivers/opp/core.c | 231 | ||||
-rw-r--r-- | drivers/opp/debugfs.c | 2 | ||||
-rw-r--r-- | drivers/opp/of.c | 74 | ||||
-rw-r--r-- | drivers/opp/opp.h | 4 | ||||
-rw-r--r-- | drivers/powercap/intel_rapl_common.c | 2 |
23 files changed, 1250 insertions, 458 deletions
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 72115917e0bd..44ec20918a4d 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -228,3 +228,24 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd) device_pm_check_callbacks(dev); } EXPORT_SYMBOL_GPL(dev_pm_domain_set); + +/** + * dev_pm_domain_set_performance_state - Request a new performance state. + * @dev: The device to make the request for. + * @state: Target performance state for the device. + * + * This function should be called when a new performance state needs to be + * requested for a device that is attached to a PM domain. Note that, the + * support for performance scaling for PM domains is optional. + * + * Returns 0 on success and when performance scaling isn't supported, negative + * error code on failure. + */ +int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state) +{ + if (dev->pm_domain && dev->pm_domain->set_performance_state) + return dev->pm_domain->set_performance_state(dev, state); + + return 0; +} +EXPORT_SYMBOL_GPL(dev_pm_domain_set_performance_state); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 5cb2023581d4..e38fc140d113 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -419,6 +419,25 @@ static void genpd_restore_performance_state(struct device *dev, genpd_set_performance_state(dev, state); } +static int genpd_dev_pm_set_performance_state(struct device *dev, + unsigned int state) +{ + struct generic_pm_domain *genpd = dev_to_genpd(dev); + int ret = 0; + + genpd_lock(genpd); + if (pm_runtime_suspended(dev)) { + dev_gpd_data(dev)->rpm_pstate = state; + } else { + ret = genpd_set_performance_state(dev, state); + if (!ret) + dev_gpd_data(dev)->rpm_pstate = 0; + } + genpd_unlock(genpd); + + return ret; +} + /** * dev_pm_genpd_set_performance_state- Set performance state of device's power * domain. @@ -437,7 +456,6 @@ static void genpd_restore_performance_state(struct device *dev, int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) { struct generic_pm_domain *genpd; - int ret = 0; genpd = dev_to_genpd_safe(dev); if (!genpd) @@ -447,17 +465,7 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) !dev->power.subsys_data->domain_data)) return -EINVAL; - genpd_lock(genpd); - if (pm_runtime_suspended(dev)) { - dev_gpd_data(dev)->rpm_pstate = state; - } else { - ret = genpd_set_performance_state(dev, state); - if (!ret) - dev_gpd_data(dev)->rpm_pstate = 0; - } - genpd_unlock(genpd); - - return ret; + return genpd_dev_pm_set_performance_state(dev, state); } EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); @@ -2079,6 +2087,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->domain.ops.restore_noirq = genpd_restore_noirq; genpd->domain.ops.complete = genpd_complete; genpd->domain.start = genpd_dev_pm_start; + genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state; if (genpd->flags & GENPD_FLAG_PM_CLK) { genpd->dev_ops.stop = pm_clk_suspend; diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 123b4bbfcfee..f911606897b8 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -90,7 +90,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ config ARM_BRCMSTB_AVS_CPUFREQ tristate "Broadcom STB AVS CPUfreq driver" - depends on ARCH_BRCMSTB || COMPILE_TEST + depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST default y help Some Broadcom STB SoCs use a co-processor running proprietary firmware @@ -124,8 +124,8 @@ config ARM_IMX_CPUFREQ_DT tristate "Freescale i.MX8M cpufreq support" depends on ARCH_MXC && CPUFREQ_DT help - This adds cpufreq driver support for Freescale i.MX8M series SoCs, - based on cpufreq-dt. + This adds cpufreq driver support for Freescale i.MX7/i.MX8M + series SoCs, based on cpufreq-dt. If in doubt, say N. diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index fb2875ce1fdd..11b3e34b7696 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -142,9 +142,11 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "nvidia,tegra234", }, { .compatible = "qcom,apq8096", }, + { .compatible = "qcom,msm8909", }, { .compatible = "qcom,msm8996", }, { .compatible = "qcom,msm8998", }, { .compatible = "qcom,qcm2290", }, + { .compatible = "qcom,qcm6490", }, { .compatible = "qcom,qcs404", }, { .compatible = "qcom,qdu1000", }, { .compatible = "qcom,sa8155p" }, @@ -176,7 +178,9 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "ti,omap3", }, { .compatible = "ti,am625", }, { .compatible = "ti,am62a7", }, + { .compatible = "ti,am62p5", }, + { .compatible = "qcom,ipq6018", }, { .compatible = "qcom,ipq8064", }, { .compatible = "qcom,apq8064", }, { .compatible = "qcom,msm8974", }, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 15c440e5c773..934d35f570b7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1544,7 +1544,7 @@ static int cpufreq_online(unsigned int cpu) /* * Register with the energy model before - * sched_cpufreq_governor_change() is called, which will result + * sugov_eas_rebuild_sd() is called, which will result * in rebuilding of the sched domains, which should only be done * once the energy model is properly initialized for the policy * first. @@ -2652,7 +2652,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ret = cpufreq_start_governor(policy); if (!ret) { pr_debug("governor change\n"); - sched_cpufreq_governor_change(policy, old_gov); return 0; } cpufreq_exit_governor(policy); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index b6bd0ff35323..56500b25d77c 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -187,8 +187,7 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set, ret = sscanf(buf, "%u", &input); /* cannot be lower than 1 otherwise freq will not fall */ - if (ret != 1 || input < 1 || input > 100 || - input >= dbs_data->up_threshold) + if (ret != 1 || input < 1 || input >= dbs_data->up_threshold) return -EINVAL; cs_tuners->down_threshold = input; diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index a33df3c66c88..40a9ff18da06 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) len += sysfs_emit_at(buf, len, " From : To\n"); len += sysfs_emit_at(buf, len, " : "); for (i = 0; i < stats->state_num; i++) { - if (len >= PAGE_SIZE) + if (len >= PAGE_SIZE - 1) break; len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]); } - if (len >= PAGE_SIZE) - return PAGE_SIZE; + if (len >= PAGE_SIZE - 1) + return PAGE_SIZE - 1; len += sysfs_emit_at(buf, len, "\n"); for (i = 0; i < stats->state_num; i++) { - if (len >= PAGE_SIZE) + if (len >= PAGE_SIZE - 1) break; len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]); for (j = 0; j < stats->state_num; j++) { - if (len >= PAGE_SIZE) + if (len >= PAGE_SIZE - 1) break; if (pending) @@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) len += sysfs_emit_at(buf, len, "%9u ", count); } - if (len >= PAGE_SIZE) + if (len >= PAGE_SIZE - 1) break; len += sysfs_emit_at(buf, len, "\n"); } - if (len >= PAGE_SIZE) { + if (len >= PAGE_SIZE - 1) { pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n"); return -EFBIG; } diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 50a4d7846580..2c42fee76daa 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -15,8 +15,11 @@ #include <linux/mutex.h> #include <linux/slab.h> -static DEFINE_PER_CPU(unsigned int, cpu_is_managed); -static DEFINE_MUTEX(userspace_mutex); +struct userspace_policy { + unsigned int is_managed; + unsigned int setspeed; + struct mutex mutex; +}; /** * cpufreq_set - set the CPU frequency @@ -28,19 +31,19 @@ static DEFINE_MUTEX(userspace_mutex); static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) { int ret = -EINVAL; - unsigned int *setspeed = policy->governor_data; + struct userspace_policy *userspace = policy->governor_data; pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); - mutex_lock(&userspace_mutex); - if (!per_cpu(cpu_is_managed, policy->cpu)) + mutex_lock(&userspace->mutex); + if (!userspace->is_managed) goto err; - *setspeed = freq; + userspace->setspeed = freq; ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); err: - mutex_unlock(&userspace_mutex); + mutex_unlock(&userspace->mutex); return ret; } @@ -51,67 +54,74 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy) { - unsigned int *setspeed; + struct userspace_policy *userspace; - setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL); - if (!setspeed) + userspace = kzalloc(sizeof(*userspace), GFP_KERNEL); + if (!userspace) return -ENOMEM; - policy->governor_data = setspeed; + mutex_init(&userspace->mutex); + + policy->governor_data = userspace; return 0; } +/* + * Any routine that writes to the policy struct will hold the "rwsem" of + * policy struct that means it is free to free "governor_data" here. + */ static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy) { - mutex_lock(&userspace_mutex); kfree(policy->governor_data); policy->governor_data = NULL; - mutex_unlock(&userspace_mutex); } static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy) { - unsigned int *setspeed = policy->governor_data; + struct userspace_policy *userspace = policy->governor_data; BUG_ON(!policy->cur); pr_debug("started managing cpu %u\n", policy->cpu); - mutex_lock(&userspace_mutex); - per_cpu(cpu_is_managed, policy->cpu) = 1; - *setspeed = policy->cur; - mutex_unlock(&userspace_mutex); + mutex_lock(&userspace->mutex); + userspace->is_managed = 1; + userspace->setspeed = policy->cur; + mutex_unlock(&userspace->mutex); return 0; } static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy) { - unsigned int *setspeed = policy->governor_data; + struct userspace_policy *userspace = policy->governor_data; pr_debug("managing cpu %u stopped\n", policy->cpu); - mutex_lock(&userspace_mutex); - per_cpu(cpu_is_managed, policy->cpu) = 0; - *setspeed = 0; - mutex_unlock(&userspace_mutex); + mutex_lock(&userspace->mutex); + userspace->is_managed = 0; + userspace->setspeed = 0; + mutex_unlock(&userspace->mutex); } static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy) { - unsigned int *setspeed = policy->governor_data; + struct userspace_policy *userspace = policy->governor_data; - mutex_lock(&userspace_mutex); + mutex_lock(&userspace->mutex); pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", - policy->cpu, policy->min, policy->max, policy->cur, *setspeed); - - if (policy->max < *setspeed) - __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); - else if (policy->min > *setspeed) - __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); + policy->cpu, policy->min, policy->max, policy->cur, userspace->setspeed); + + if (policy->max < userspace->setspeed) + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > userspace->setspeed) + __cpufreq_driver_target(policy, policy->min, + CPUFREQ_RELATION_L); else - __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, userspace->setspeed, + CPUFREQ_RELATION_L); - mutex_unlock(&userspace_mutex); + mutex_unlock(&userspace->mutex); } static struct cpufreq_governor cpufreq_gov_userspace = { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dc50c9fb488d..a534a1f7f1ee 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -571,13 +571,9 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) static inline void update_turbo_state(void) { u64 misc_en; - struct cpudata *cpu; - cpu = all_cpu_data[0]; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - global.turbo_disabled = - (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || - cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); + global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE; } static int min_perf_pct_min(void) diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index ec75e79659ac..df3567c1e93b 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -24,6 +24,7 @@ #include <linux/device.h> #include <linux/hardirq.h> #include <linux/of.h> +#include <linux/of_address.h> #include <asm/machdep.h> #include <asm/irq.h> @@ -378,10 +379,9 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) static u32 read_gpio(struct device_node *np) { - const u32 *reg = of_get_property(np, "reg", NULL); - u32 offset; + u64 offset; - if (reg == NULL) + if (of_property_read_reg(np, 0, &offset, NULL) < 0) return 0; /* That works for all keylargos but shall be fixed properly * some day... The problem is that it seems we can't rely @@ -389,7 +389,6 @@ static u32 read_gpio(struct device_node *np) * relative to the base of KeyLargo or to the base of the * GPIO space, and the device-tree doesn't help. */ - offset = *reg; if (offset < KEYLARGO_GPIO_LEVELS0) offset += KEYLARGO_GPIO_LEVELS0; return offset; diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 84d7033e5efe..15367ac08b2b 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -30,6 +30,14 @@ #include <dt-bindings/arm/qcom,ids.h> +enum ipq806x_versions { + IPQ8062_VERSION = 0, + IPQ8064_VERSION, + IPQ8065_VERSION, +}; + +#define IPQ6000_VERSION BIT(2) + struct qcom_cpufreq_drv; struct qcom_cpufreq_match_data { @@ -40,16 +48,38 @@ struct qcom_cpufreq_match_data { const char **genpd_names; }; +struct qcom_cpufreq_drv_cpu { + int opp_token; +}; + struct qcom_cpufreq_drv { - int *opp_tokens; u32 versions; const struct qcom_cpufreq_match_data *data; + struct qcom_cpufreq_drv_cpu cpus[]; }; static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev; +static int qcom_cpufreq_simple_get_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + char **pvs_name, + struct qcom_cpufreq_drv *drv) +{ + u8 *speedbin; + + *pvs_name = NULL; + speedbin = nvmem_cell_read(speedbin_nvmem, NULL); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + dev_dbg(cpu_dev, "speedbin: %d\n", *speedbin); + drv->versions = 1 << *speedbin; + kfree(speedbin); + return 0; +} + static void get_krait_bin_format_a(struct device *cpu_dev, - int *speed, int *pvs, int *pvs_ver, + int *speed, int *pvs, u8 *buf) { u32 pte_efuse; @@ -180,8 +210,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, switch (len) { case 4: - get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver, - speedbin); + get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin); break; case 8: get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver, @@ -203,6 +232,114 @@ len_error: return ret; } +static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + char **pvs_name, + struct qcom_cpufreq_drv *drv) +{ + int speed = 0, pvs = 0; + int msm_id, ret = 0; + u8 *speedbin; + size_t len; + + speedbin = nvmem_cell_read(speedbin_nvmem, &len); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + if (len != 4) { + dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n"); + ret = -ENODEV; + goto exit; + } + + get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin); + + ret = qcom_smem_get_soc_id(&msm_id); + if (ret) + goto exit; + + switch (msm_id) { + case QCOM_ID_IPQ8062: + drv->versions = BIT(IPQ8062_VERSION); + break; + case QCOM_ID_IPQ8064: + case QCOM_ID_IPQ8066: + case QCOM_ID_IPQ8068: + drv->versions = BIT(IPQ8064_VERSION); + break; + case QCOM_ID_IPQ8065: + case QCOM_ID_IPQ8069: + drv->versions = BIT(IPQ8065_VERSION); + break; + default: + dev_err(cpu_dev, + "SoC ID %u is not part of IPQ8064 family, limiting to 1.0GHz!\n", + msm_id); + drv->versions = BIT(IPQ8062_VERSION); + break; + } + + /* IPQ8064 speed is never fused. Only pvs values are fused. */ + snprintf(*pvs_name, sizeof("speed0-pvsXX"), "speed0-pvs%d", pvs); + +exit: + kfree(speedbin); + return ret; +} + +static int qcom_cpufreq_ipq6018_name_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + char **pvs_name, + struct qcom_cpufreq_drv *drv) +{ + u32 msm_id; + int ret; + u8 *speedbin; + *pvs_name = NULL; + + ret = qcom_smem_get_soc_id(&msm_id); + if (ret) + return ret; + + speedbin = nvmem_cell_read(speedbin_nvmem, NULL); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + switch (msm_id) { + case QCOM_ID_IPQ6005: + case QCOM_ID_IPQ6010: + case QCOM_ID_IPQ6018: + case QCOM_ID_IPQ6028: + /* Fuse Value Freq BIT to set + * --------------------------------- + * 2’b0 No Limit BIT(0) + * 2’b1 1.5 GHz BIT(1) + */ + drv->versions = 1 << (unsigned int)(*speedbin); + break; + case QCOM_ID_IPQ6000: + /* + * IPQ6018 family only has one bit to advertise the CPU + * speed-bin, but that is not enough for IPQ6000 which + * is only rated up to 1.2GHz. + * So for IPQ6000 manually set BIT(2) based on SMEM ID. + */ + drv->versions = IPQ6000_VERSION; + break; + default: + dev_err(cpu_dev, + "SoC ID %u is not part of IPQ6018 family, limiting to 1.2GHz!\n", + msm_id); + drv->versions = IPQ6000_VERSION; + break; + } + + kfree(speedbin); + return 0; +} + +static const char *generic_genpd_names[] = { "perf", NULL }; + static const struct qcom_cpufreq_match_data match_data_kryo = { .get_version = qcom_cpufreq_kryo_name_version, }; @@ -211,12 +348,25 @@ static const struct qcom_cpufreq_match_data match_data_krait = { .get_version = qcom_cpufreq_krait_name_version, }; +static const struct qcom_cpufreq_match_data match_data_msm8909 = { + .get_version = qcom_cpufreq_simple_get_version, + .genpd_names = generic_genpd_names, +}; + static const char *qcs404_genpd_names[] = { "cpr", NULL }; static const struct qcom_cpufreq_match_data match_data_qcs404 = { .genpd_names = qcs404_genpd_names, }; +static const struct qcom_cpufreq_match_data match_data_ipq6018 = { + .get_version = qcom_cpufreq_ipq6018_name_version, +}; + +static const struct qcom_cpufreq_match_data match_data_ipq8064 = { + .get_version = qcom_cpufreq_ipq8064_name_version, +}; + static int qcom_cpufreq_probe(struct platform_device *pdev) { struct qcom_cpufreq_drv *drv; @@ -237,48 +387,39 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) if (!np) return -ENOENT; - ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); + ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") || + of_device_is_compatible(np, "operating-points-v2-krait-cpu"); if (!ret) { of_node_put(np); return -ENOENT; } - drv = kzalloc(sizeof(*drv), GFP_KERNEL); + drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()), + GFP_KERNEL); if (!drv) return -ENOMEM; match = pdev->dev.platform_data; drv->data = match->data; - if (!drv->data) { - ret = -ENODEV; - goto free_drv; - } + if (!drv->data) + return -ENODEV; if (drv->data->get_version) { speedbin_nvmem = of_nvmem_cell_get(np, NULL); - if (IS_ERR(speedbin_nvmem)) { - ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), - "Could not get nvmem cell\n"); - goto free_drv; - } + if (IS_ERR(speedbin_nvmem)) + return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), + "Could not get nvmem cell\n"); ret = drv->data->get_version(cpu_dev, speedbin_nvmem, &pvs_name, drv); if (ret) { nvmem_cell_put(speedbin_nvmem); - goto free_drv; + return ret; } nvmem_cell_put(speedbin_nvmem); } of_node_put(np); - drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens), - GFP_KERNEL); - if (!drv->opp_tokens) { - ret = -ENOMEM; - goto free_drv; - } - for_each_possible_cpu(cpu) { struct dev_pm_opp_config config = { .supported_hw = NULL, @@ -304,9 +445,9 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) } if (config.supported_hw || config.genpd_names) { - drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config); - if (drv->opp_tokens[cpu] < 0) { - ret = drv->opp_tokens[cpu]; + drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config); + if (drv->cpus[cpu].opp_token < 0) { + ret = drv->cpus[cpu].opp_token; dev_err(cpu_dev, "Failed to set OPP config\n"); goto free_opp; } @@ -325,11 +466,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) free_opp: for_each_possible_cpu(cpu) - dev_pm_opp_clear_config(drv->opp_tokens[cpu]); - kfree(drv->opp_tokens); -free_drv: - kfree(drv); - + dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); return ret; } @@ -341,10 +478,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev) platform_device_unregister(cpufreq_dt_pdev); for_each_possible_cpu(cpu) - dev_pm_opp_clear_config(drv->opp_tokens[cpu]); - - kfree(drv->opp_tokens); - kfree(drv); + dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); } static struct platform_driver qcom_cpufreq_driver = { @@ -357,9 +491,11 @@ static struct platform_driver qcom_cpufreq_driver = { static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { { .compatible = "qcom,apq8096", .data = &match_data_kryo }, + { .compatible = "qcom,msm8909", .data = &match_data_msm8909 }, { .compatible = "qcom,msm8996", .data = &match_data_kryo }, { .compatible = "qcom,qcs404", .data = &match_data_qcs404 }, - { .compatible = "qcom,ipq8064", .data = &match_data_krait }, + { .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 }, + { .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 }, { .compatible = "qcom,apq8064", .data = &match_data_krait }, { .compatible = "qcom,msm8974", .data = &match_data_krait }, { .compatible = "qcom,msm8960", .data = &match_data_krait }, diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 88ef5e57ccd0..59865ea455a8 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -5,7 +5,6 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> -#include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/of.h> @@ -21,10 +20,11 @@ #define KHZ 1000 #define REF_CLK_MHZ 408 /* 408 MHz */ -#define US_DELAY 500 #define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ) #define MAX_CNT ~0U +#define MAX_DELTA_KHZ 115200 + #define NDIV_MASK 0x1FF #define CORE_OFFSET(cpu) (cpu * 8) @@ -39,6 +39,12 @@ /* cpufreq transisition latency */ #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ +struct tegra_cpu_data { + u32 cpuid; + u32 clusterid; + void __iomem *freq_core_reg; +}; + struct tegra_cpu_ctr { u32 cpu; u32 coreclk_cnt, last_coreclk_cnt; @@ -62,6 +68,7 @@ struct tegra_cpufreq_soc { int maxcpus_per_cluster; unsigned int num_clusters; phys_addr_t actmon_cntr_base; + u32 refclk_delta_min; }; struct tegra194_cpufreq_data { @@ -69,6 +76,7 @@ struct tegra194_cpufreq_data { struct cpufreq_frequency_table **bpmp_luts; const struct tegra_cpufreq_soc *soc; bool icc_dram_bw_scaling; + struct tegra_cpu_data *cpu_data; }; static struct workqueue_struct *read_counters_wq; @@ -116,14 +124,8 @@ static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); - void __iomem *freq_core_reg; - u64 mpidr_id; - /* use physical id to get address of per core frequency register */ - mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid; - freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id); - - *ndiv = readl(freq_core_reg) & NDIV_MASK; + *ndiv = readl(data->cpu_data[cpu].freq_core_reg) & NDIV_MASK; return 0; } @@ -131,19 +133,10 @@ static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); - void __iomem *freq_core_reg; - u32 cpu, cpuid, clusterid; - u64 mpidr_id; - - for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) { - data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid); - - /* use physical id to get address of per core frequency register */ - mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid; - freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id); + u32 cpu; - writel(ndiv, freq_core_reg); - } + for_each_cpu(cpu, policy->cpus) + writel(ndiv, data->cpu_data[cpu].freq_core_reg); } /* @@ -157,19 +150,35 @@ static void tegra234_read_counters(struct tegra_cpu_ctr *c) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); void __iomem *actmon_reg; - u32 cpuid, clusterid; + u32 delta_refcnt; + int cnt = 0; u64 val; - data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid); - actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid); + actmon_reg = CORE_ACTMON_CNTR_REG(data, data->cpu_data[c->cpu].clusterid, + data->cpu_data[c->cpu].cpuid); val = readq(actmon_reg); c->last_refclk_cnt = upper_32_bits(val); c->last_coreclk_cnt = lower_32_bits(val); - udelay(US_DELAY); - val = readq(actmon_reg); - c->refclk_cnt = upper_32_bits(val); - c->coreclk_cnt = lower_32_bits(val); + + /* + * The sampling window is based on the minimum number of reference + * clock cycles which is known to give a stable value of CPU frequency. + */ + do { + val = readq(actmon_reg); + c->refclk_cnt = upper_32_bits(val); + c->coreclk_cnt = lower_32_bits(val); + if (c->refclk_cnt < c->last_refclk_cnt) + delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt); + else + delta_refcnt = c->refclk_cnt - c->last_refclk_cnt; + if (++cnt >= 0xFFFF) { + pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n", + c->cpu, delta_refcnt, cnt); + break; + } + } while (delta_refcnt < data->soc->refclk_delta_min); } static struct tegra_cpufreq_ops tegra234_cpufreq_ops = { @@ -184,6 +193,7 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = { .actmon_cntr_base = 0x9000, .maxcpus_per_cluster = 4, .num_clusters = 3, + .refclk_delta_min = 16000, }; static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { @@ -191,6 +201,7 @@ static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { .actmon_cntr_base = 0x4000, .maxcpus_per_cluster = 8, .num_clusters = 1, + .refclk_delta_min = 16000, }; static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) @@ -231,15 +242,33 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response static void tegra194_read_counters(struct tegra_cpu_ctr *c) { + struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); + u32 delta_refcnt; + int cnt = 0; u64 val; val = read_freq_feedback(); c->last_refclk_cnt = lower_32_bits(val); c->last_coreclk_cnt = upper_32_bits(val); - udelay(US_DELAY); - val = read_freq_feedback(); - c->refclk_cnt = lower_32_bits(val); - c->coreclk_cnt = upper_32_bits(val); + + /* + * The sampling window is based on the minimum number of reference + * clock cycles which is known to give a stable value of CPU frequency. + */ + do { + val = read_freq_feedback(); + c->refclk_cnt = lower_32_bits(val); + c->coreclk_cnt = upper_32_bits(val); + if (c->refclk_cnt < c->last_refclk_cnt) + delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt); + else + delta_refcnt = c->refclk_cnt - c->last_refclk_cnt; + if (++cnt >= 0xFFFF) { + pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n", + c->cpu, delta_refcnt, cnt); + break; + } + } while (delta_refcnt < data->soc->refclk_delta_min); } static void tegra_read_counters(struct work_struct *work) @@ -297,9 +326,8 @@ static unsigned int tegra194_calculate_speed(u32 cpu) u32 rate_mhz; /* - * udelay() is required to reconstruct cpu frequency over an - * observation window. Using workqueue to call udelay() with - * interrupts enabled. + * Reconstruct cpu frequency over an observation/sampling window. + * Using workqueue to keep interrupts enabled during the interval. */ read_counters_work.c.cpu = cpu; INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters); @@ -357,19 +385,17 @@ static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv) static unsigned int tegra194_get_speed(u32 cpu) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); + u32 clusterid = data->cpu_data[cpu].clusterid; struct cpufreq_frequency_table *pos; - u32 cpuid, clusterid; unsigned int rate; u64 ndiv; int ret; - data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid); - /* reconstruct actual cpu freq using counters */ rate = tegra194_calculate_speed(cpu); /* get last written ndiv value */ - ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv); + ret = data->soc->ops->get_cpu_ndiv(cpu, data->cpu_data[cpu].cpuid, clusterid, &ndiv); if (WARN_ON_ONCE(ret)) return rate; @@ -383,9 +409,9 @@ static unsigned int tegra194_get_speed(u32 cpu) if (pos->driver_data != ndiv) continue; - if (abs(pos->frequency - rate) > 115200) { - pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n", - cpu, rate, pos->frequency, ndiv); + if (abs(pos->frequency - rate) > MAX_DELTA_KHZ) { + pr_warn("cpufreq: cpu%d,cur:%u,set:%u,delta:%d,set ndiv:%llu\n", + cpu, rate, pos->frequency, abs(rate - pos->frequency), ndiv); } else { rate = pos->frequency; } @@ -450,6 +476,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy, if (IS_ERR(opp)) continue; + dev_pm_opp_put(opp); + ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ); if (ret < 0) return ret; @@ -473,13 +501,12 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); int maxcpus_per_cluster = data->soc->maxcpus_per_cluster; + u32 clusterid = data->cpu_data[policy->cpu].clusterid; struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *bpmp_lut; u32 start_cpu, cpu; - u32 clusterid; int ret; - data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid]) return -EINVAL; @@ -578,6 +605,7 @@ static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { .ops = &tegra194_cpufreq_ops, .maxcpus_per_cluster = 2, .num_clusters = 4, + .refclk_delta_min = 16000, }; static void tegra194_cpufreq_free_resources(void) @@ -657,6 +685,28 @@ tegra_cpufreq_bpmp_read_lut(struct platform_device *pdev, struct tegra_bpmp *bpm return freq_table; } +static int tegra194_cpufreq_store_physids(unsigned int cpu, struct tegra194_cpufreq_data *data) +{ + int num_cpus = data->soc->maxcpus_per_cluster * data->soc->num_clusters; + u32 cpuid, clusterid; + u64 mpidr_id; + + if (cpu > (num_cpus - 1)) { + pr_err("cpufreq: wrong num of cpus or clusters in soc data\n"); + return -EINVAL; + } + + data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid); + + mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid; + + data->cpu_data[cpu].cpuid = cpuid; + data->cpu_data[cpu].clusterid = clusterid; + data->cpu_data[cpu].freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id); + + return 0; +} + static int tegra194_cpufreq_probe(struct platform_device *pdev) { const struct tegra_cpufreq_soc *soc; @@ -664,6 +714,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) struct tegra_bpmp *bpmp; struct device *cpu_dev; int err, i; + u32 cpu; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -671,7 +722,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) soc = of_device_get_match_data(&pdev->dev); - if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) { + if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters && soc->refclk_delta_min) { data->soc = soc; } else { dev_err(&pdev->dev, "soc data missing\n"); @@ -690,6 +741,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) return PTR_ERR(data->regs); } + data->cpu_data = devm_kcalloc(&pdev->dev, data->soc->num_clusters * + data->soc->maxcpus_per_cluster, + sizeof(*data->cpu_data), GFP_KERNEL); + if (!data->cpu_data) + return -ENOMEM; + platform_set_drvdata(pdev, data); bpmp = tegra_bpmp_get(&pdev->dev); @@ -711,6 +768,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) } } + for_each_possible_cpu(cpu) { + err = tegra194_cpufreq_store_physids(cpu, data); + if (err) + goto err_free_res; + } + tegra194_cpufreq_driver.driver_data = data; /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */ diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 3c37d7899660..46c41e2ca727 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -338,6 +338,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, }, { .compatible = "ti,am625", .data = &am625_soc_data, }, { .compatible = "ti,am62a7", .data = &am625_soc_data, }, + { .compatible = "ti,am62p5", .data = &am625_soc_data, }, /* legacy */ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, }, { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, }, diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 474d81831ad3..b3a68d5833bd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -88,7 +88,7 @@ static unsigned long find_available_min_freq(struct devfreq *devfreq) struct dev_pm_opp *opp; unsigned long min_freq = 0; - opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq); + opp = dev_pm_opp_find_freq_ceil_indexed(devfreq->dev.parent, &min_freq, 0); if (IS_ERR(opp)) min_freq = 0; else @@ -102,7 +102,7 @@ static unsigned long find_available_max_freq(struct devfreq *devfreq) struct dev_pm_opp *opp; unsigned long max_freq = ULONG_MAX; - opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq); + opp = dev_pm_opp_find_freq_floor_indexed(devfreq->dev.parent, &max_freq, 0); if (IS_ERR(opp)) max_freq = 0; else @@ -196,7 +196,7 @@ static int set_freq_table(struct devfreq *devfreq) return -ENOMEM; for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) { - opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); + opp = dev_pm_opp_find_freq_ceil_indexed(devfreq->dev.parent, &freq, 0); if (IS_ERR(opp)) { devm_kfree(devfreq->dev.parent, devfreq->freq_table); return PTR_ERR(opp); @@ -2036,18 +2036,18 @@ struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { /* The freq is an upper bound. opp should be lower */ - opp = dev_pm_opp_find_freq_floor(dev, freq); + opp = dev_pm_opp_find_freq_floor_indexed(dev, freq, 0); /* If not available, use the closest opp */ if (opp == ERR_PTR(-ERANGE)) - opp = dev_pm_opp_find_freq_ceil(dev, freq); + opp = dev_pm_opp_find_freq_ceil_indexed(dev, freq, 0); } else { /* The freq is an lower bound. opp should be higher */ - opp = dev_pm_opp_find_freq_ceil(dev, freq); + opp = dev_pm_opp_find_freq_ceil_indexed(dev, freq, 0); /* If not available, use the closest opp */ if (opp == ERR_PTR(-ERANGE)) - opp = dev_pm_opp_find_freq_floor(dev, freq); + opp = dev_pm_opp_find_freq_floor_indexed(dev, freq, 0); } return opp; diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 896a6cc93b00..56bac4702006 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -12,9 +12,9 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_address.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/suspend.h> #include <linux/devfreq-event.h> @@ -507,7 +507,6 @@ static int of_get_devfreq_events(struct device_node *np, struct device *dev = info->dev; struct device_node *events_np, *node; int i, j, count; - const struct of_device_id *of_id; int ret; events_np = of_get_child_by_name(np, "events"); @@ -525,13 +524,7 @@ static int of_get_devfreq_events(struct device_node *np, } info->num_events = count; - of_id = of_match_device(exynos_ppmu_id_match, dev); - if (of_id) - info->ppmu_type = (enum exynos_ppmu_type)of_id->data; - else { - of_node_put(events_np); - return -EINVAL; - } + info->ppmu_type = (enum exynos_ppmu_type)device_get_match_data(dev); j = 0; for_each_child_of_node(events_np, node) { diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 39ac069cabc7..e2a1e4463b6f 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -16,30 +16,71 @@ #include <linux/regmap.h> #include <linux/slab.h> #include <linux/list.h> +#include <linux/seqlock.h> #include <linux/of.h> +#include <linux/of_device.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/perf_event.h> +#include <soc/rockchip/rockchip_grf.h> #include <soc/rockchip/rk3399_grf.h> +#include <soc/rockchip/rk3568_grf.h> +#include <soc/rockchip/rk3588_grf.h> -#define RK3399_DMC_NUM_CH 2 +#define DMC_MAX_CHANNELS 4 + +#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) /* DDRMON_CTRL */ #define DDRMON_CTRL 0x04 -#define CLR_DDRMON_CTRL (0x1f0000 << 0) -#define LPDDR4_EN (0x10001 << 4) -#define HARDWARE_EN (0x10001 << 3) -#define LPDDR3_EN (0x10001 << 2) -#define SOFTWARE_EN (0x10001 << 1) -#define SOFTWARE_DIS (0x10000 << 1) -#define TIME_CNT_EN (0x10001 << 0) - +#define DDRMON_CTRL_DDR4 BIT(5) +#define DDRMON_CTRL_LPDDR4 BIT(4) +#define DDRMON_CTRL_HARDWARE_EN BIT(3) +#define DDRMON_CTRL_LPDDR23 BIT(2) +#define DDRMON_CTRL_SOFTWARE_EN BIT(1) +#define DDRMON_CTRL_TIMER_CNT_EN BIT(0) +#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \ + DDRMON_CTRL_LPDDR4 | \ + DDRMON_CTRL_LPDDR23) + +#define DDRMON_CH0_WR_NUM 0x20 +#define DDRMON_CH0_RD_NUM 0x24 #define DDRMON_CH0_COUNT_NUM 0x28 #define DDRMON_CH0_DFI_ACCESS_NUM 0x2c #define DDRMON_CH1_COUNT_NUM 0x3c #define DDRMON_CH1_DFI_ACCESS_NUM 0x40 -struct dmc_usage { - u32 access; - u32 total; +#define PERF_EVENT_CYCLES 0x0 +#define PERF_EVENT_READ_BYTES 0x1 +#define PERF_EVENT_WRITE_BYTES 0x2 +#define PERF_EVENT_READ_BYTES0 0x3 +#define PERF_EVENT_WRITE_BYTES0 0x4 +#define PERF_EVENT_READ_BYTES1 0x5 +#define PERF_EVENT_WRITE_BYTES1 0x6 +#define PERF_EVENT_READ_BYTES2 0x7 +#define PERF_EVENT_WRITE_BYTES2 0x8 +#define PERF_EVENT_READ_BYTES3 0x9 +#define PERF_EVENT_WRITE_BYTES3 0xa +#define PERF_EVENT_BYTES 0xb +#define PERF_ACCESS_TYPE_MAX 0xc + +/** + * struct dmc_count_channel - structure to hold counter values from the DDR controller + * @access: Number of read and write accesses + * @clock_cycles: DDR clock cycles + * @read_access: number of read accesses + * @write_access: number of write accesses + */ +struct dmc_count_channel { + u64 access; + u64 clock_cycles; + u64 read_access; + u64 write_access; +}; + +struct dmc_count { + struct dmc_count_channel c[DMC_MAX_CHANNELS]; }; /* @@ -49,177 +90,735 @@ struct dmc_usage { */ struct rockchip_dfi { struct devfreq_event_dev *edev; - struct devfreq_event_desc *desc; - struct dmc_usage ch_usage[RK3399_DMC_NUM_CH]; + struct devfreq_event_desc desc; + struct dmc_count last_event_count; + + struct dmc_count last_perf_count; + struct dmc_count total_count; + seqlock_t count_seqlock; /* protects last_perf_count and total_count */ + struct device *dev; void __iomem *regs; struct regmap *regmap_pmu; struct clk *clk; + int usecount; + struct mutex mutex; + u32 ddr_type; + unsigned int channel_mask; + unsigned int max_channels; + enum cpuhp_state cpuhp_state; + struct hlist_node node; + struct pmu pmu; + struct hrtimer timer; + unsigned int cpu; + int active_events; + int burst_len; + int buswidth[DMC_MAX_CHANNELS]; + int ddrmon_stride; + bool ddrmon_ctrl_single; }; -static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev) +static int rockchip_dfi_enable(struct rockchip_dfi *dfi) { - struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); - void __iomem *dfi_regs = info->regs; - u32 val; - u32 ddr_type; + void __iomem *dfi_regs = dfi->regs; + int i, ret = 0; - /* get ddr type */ - regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); - ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) & - RK3399_PMUGRF_DDRTYPE_MASK; + mutex_lock(&dfi->mutex); - /* clear DDRMON_CTRL setting */ - writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL); + dfi->usecount++; + if (dfi->usecount > 1) + goto out; - /* set ddr type to dfi */ - if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3) - writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL); - else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4) - writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL); + ret = clk_prepare_enable(dfi->clk); + if (ret) { + dev_err(&dfi->edev->dev, "failed to enable dfi clk: %d\n", ret); + goto out; + } - /* enable count, use software mode */ - writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL); + for (i = 0; i < dfi->max_channels; i++) { + u32 ctrl = 0; + + if (!(dfi->channel_mask & BIT(i))) + continue; + + /* clear DDRMON_CTRL setting */ + writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_TIMER_CNT_EN | + DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN), + dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); + + /* set ddr type to dfi */ + switch (dfi->ddr_type) { + case ROCKCHIP_DDRTYPE_LPDDR2: + case ROCKCHIP_DDRTYPE_LPDDR3: + ctrl = DDRMON_CTRL_LPDDR23; + break; + case ROCKCHIP_DDRTYPE_LPDDR4: + case ROCKCHIP_DDRTYPE_LPDDR4X: + ctrl = DDRMON_CTRL_LPDDR4; + break; + default: + break; + } + + writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK), + dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); + + /* enable count, use software mode */ + writel_relaxed(HIWORD_UPDATE(DDRMON_CTRL_SOFTWARE_EN, DDRMON_CTRL_SOFTWARE_EN), + dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); + + if (dfi->ddrmon_ctrl_single) + break; + } +out: + mutex_unlock(&dfi->mutex); + + return ret; } -static void rockchip_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) +static void rockchip_dfi_disable(struct rockchip_dfi *dfi) { - struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); - void __iomem *dfi_regs = info->regs; + void __iomem *dfi_regs = dfi->regs; + int i; + + mutex_lock(&dfi->mutex); - writel_relaxed(SOFTWARE_DIS, dfi_regs + DDRMON_CTRL); + dfi->usecount--; + + WARN_ON_ONCE(dfi->usecount < 0); + + if (dfi->usecount > 0) + goto out; + + for (i = 0; i < dfi->max_channels; i++) { + if (!(dfi->channel_mask & BIT(i))) + continue; + + writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_SOFTWARE_EN), + dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); + + if (dfi->ddrmon_ctrl_single) + break; + } + + clk_disable_unprepare(dfi->clk); +out: + mutex_unlock(&dfi->mutex); +} + +static void rockchip_dfi_read_counters(struct rockchip_dfi *dfi, struct dmc_count *res) +{ + u32 i; + void __iomem *dfi_regs = dfi->regs; + + for (i = 0; i < dfi->max_channels; i++) { + if (!(dfi->channel_mask & BIT(i))) + continue; + res->c[i].read_access = readl_relaxed(dfi_regs + + DDRMON_CH0_RD_NUM + i * dfi->ddrmon_stride); + res->c[i].write_access = readl_relaxed(dfi_regs + + DDRMON_CH0_WR_NUM + i * dfi->ddrmon_stride); + res->c[i].access = readl_relaxed(dfi_regs + + DDRMON_CH0_DFI_ACCESS_NUM + i * dfi->ddrmon_stride); + res->c[i].clock_cycles = readl_relaxed(dfi_regs + + DDRMON_CH0_COUNT_NUM + i * dfi->ddrmon_stride); + } +} + +static int rockchip_dfi_event_disable(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev); + + rockchip_dfi_disable(dfi); + + return 0; +} + +static int rockchip_dfi_event_enable(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev); + + return rockchip_dfi_enable(dfi); } -static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev) +static int rockchip_dfi_set_event(struct devfreq_event_dev *edev) { - struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); - u32 tmp, max = 0; - u32 i, busier_ch = 0; - void __iomem *dfi_regs = info->regs; + return 0; +} - rockchip_dfi_stop_hardware_counter(edev); +static int rockchip_dfi_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev); + struct dmc_count count; + struct dmc_count *last = &dfi->last_event_count; + u32 access = 0, clock_cycles = 0; + int i; + + rockchip_dfi_read_counters(dfi, &count); + + /* We can only report one channel, so find the busiest one */ + for (i = 0; i < dfi->max_channels; i++) { + u32 a, c; - /* Find out which channel is busier */ - for (i = 0; i < RK3399_DMC_NUM_CH; i++) { - info->ch_usage[i].access = readl_relaxed(dfi_regs + - DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4; - info->ch_usage[i].total = readl_relaxed(dfi_regs + - DDRMON_CH0_COUNT_NUM + i * 20); - tmp = info->ch_usage[i].access; - if (tmp > max) { - busier_ch = i; - max = tmp; + if (!(dfi->channel_mask & BIT(i))) + continue; + + a = count.c[i].access - last->c[i].access; + c = count.c[i].clock_cycles - last->c[i].clock_cycles; + + if (a > access) { + access = a; + clock_cycles = c; } } - rockchip_dfi_start_hardware_counter(edev); - return busier_ch; + edata->load_count = access * 4; + edata->total_count = clock_cycles; + + dfi->last_event_count = count; + + return 0; +} + +static const struct devfreq_event_ops rockchip_dfi_ops = { + .disable = rockchip_dfi_event_disable, + .enable = rockchip_dfi_event_enable, + .get_event = rockchip_dfi_get_event, + .set_event = rockchip_dfi_set_event, +}; + +#ifdef CONFIG_PERF_EVENTS + +static void rockchip_ddr_perf_counters_add(struct rockchip_dfi *dfi, + const struct dmc_count *now, + struct dmc_count *res) +{ + const struct dmc_count *last = &dfi->last_perf_count; + int i; + + for (i = 0; i < dfi->max_channels; i++) { + res->c[i].read_access = dfi->total_count.c[i].read_access + + (u32)(now->c[i].read_access - last->c[i].read_access); + res->c[i].write_access = dfi->total_count.c[i].write_access + + (u32)(now->c[i].write_access - last->c[i].write_access); + res->c[i].access = dfi->total_count.c[i].access + + (u32)(now->c[i].access - last->c[i].access); + res->c[i].clock_cycles = dfi->total_count.c[i].clock_cycles + + (u32)(now->c[i].clock_cycles - last->c[i].clock_cycles); + } +} + +static ssize_t ddr_perf_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct rockchip_dfi *dfi = container_of(pmu, struct rockchip_dfi, pmu); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu)); +} + +static struct device_attribute ddr_perf_cpumask_attr = + __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); + +static struct attribute *ddr_perf_cpumask_attrs[] = { + &ddr_perf_cpumask_attr.attr, + NULL, +}; + +static const struct attribute_group ddr_perf_cpumask_attr_group = { + .attrs = ddr_perf_cpumask_attrs, +}; + +PMU_EVENT_ATTR_STRING(cycles, ddr_pmu_cycles, "event="__stringify(PERF_EVENT_CYCLES)) + +#define DFI_PMU_EVENT_ATTR(_name, _var, _str) \ + PMU_EVENT_ATTR_STRING(_name, _var, _str); \ + PMU_EVENT_ATTR_STRING(_name.unit, _var##_unit, "MB"); \ + PMU_EVENT_ATTR_STRING(_name.scale, _var##_scale, "9.536743164e-07") + +DFI_PMU_EVENT_ATTR(read-bytes0, ddr_pmu_read_bytes0, "event="__stringify(PERF_EVENT_READ_BYTES0)); +DFI_PMU_EVENT_ATTR(write-bytes0, ddr_pmu_write_bytes0, "event="__stringify(PERF_EVENT_WRITE_BYTES0)); + +DFI_PMU_EVENT_ATTR(read-bytes1, ddr_pmu_read_bytes1, "event="__stringify(PERF_EVENT_READ_BYTES1)); +DFI_PMU_EVENT_ATTR(write-bytes1, ddr_pmu_write_bytes1, "event="__stringify(PERF_EVENT_WRITE_BYTES1)); + +DFI_PMU_EVENT_ATTR(read-bytes2, ddr_pmu_read_bytes2, "event="__stringify(PERF_EVENT_READ_BYTES2)); +DFI_PMU_EVENT_ATTR(write-bytes2, ddr_pmu_write_bytes2, "event="__stringify(PERF_EVENT_WRITE_BYTES2)); + +DFI_PMU_EVENT_ATTR(read-bytes3, ddr_pmu_read_bytes3, "event="__stringify(PERF_EVENT_READ_BYTES3)); +DFI_PMU_EVENT_ATTR(write-bytes3, ddr_pmu_write_bytes3, "event="__stringify(PERF_EVENT_WRITE_BYTES3)); + +DFI_PMU_EVENT_ATTR(read-bytes, ddr_pmu_read_bytes, "event="__stringify(PERF_EVENT_READ_BYTES)); +DFI_PMU_EVENT_ATTR(write-bytes, ddr_pmu_write_bytes, "event="__stringify(PERF_EVENT_WRITE_BYTES)); + +DFI_PMU_EVENT_ATTR(bytes, ddr_pmu_bytes, "event="__stringify(PERF_EVENT_BYTES)); + +#define DFI_ATTR_MB(_name) \ + &_name.attr.attr, \ + &_name##_unit.attr.attr, \ + &_name##_scale.attr.attr + +static struct attribute *ddr_perf_events_attrs[] = { + &ddr_pmu_cycles.attr.attr, + DFI_ATTR_MB(ddr_pmu_read_bytes), + DFI_ATTR_MB(ddr_pmu_write_bytes), + DFI_ATTR_MB(ddr_pmu_read_bytes0), + DFI_ATTR_MB(ddr_pmu_write_bytes0), + DFI_ATTR_MB(ddr_pmu_read_bytes1), + DFI_ATTR_MB(ddr_pmu_write_bytes1), + DFI_ATTR_MB(ddr_pmu_read_bytes2), + DFI_ATTR_MB(ddr_pmu_write_bytes2), + DFI_ATTR_MB(ddr_pmu_read_bytes3), + DFI_ATTR_MB(ddr_pmu_write_bytes3), + DFI_ATTR_MB(ddr_pmu_bytes), + NULL, +}; + +static const struct attribute_group ddr_perf_events_attr_group = { + .name = "events", + .attrs = ddr_perf_events_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); + +static struct attribute *ddr_perf_format_attrs[] = { + &format_attr_event.attr, + NULL, +}; + +static const struct attribute_group ddr_perf_format_attr_group = { + .name = "format", + .attrs = ddr_perf_format_attrs, +}; + +static const struct attribute_group *attr_groups[] = { + &ddr_perf_events_attr_group, + &ddr_perf_cpumask_attr_group, + &ddr_perf_format_attr_group, + NULL, +}; + +static int rockchip_ddr_perf_event_init(struct perf_event *event) +{ + struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->cpu < 0) { + dev_warn(dfi->dev, "Can't provide per-task data!\n"); + return -EINVAL; + } + + return 0; +} + +static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event) +{ + struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); + int blen = dfi->burst_len; + struct dmc_count total, now; + unsigned int seq; + u64 count = 0; + int i; + + rockchip_dfi_read_counters(dfi, &now); + + do { + seq = read_seqbegin(&dfi->count_seqlock); + rockchip_ddr_perf_counters_add(dfi, &now, &total); + } while (read_seqretry(&dfi->count_seqlock, seq)); + + switch (event->attr.config) { + case PERF_EVENT_CYCLES: + count = total.c[0].clock_cycles; + break; + case PERF_EVENT_READ_BYTES: + for (i = 0; i < dfi->max_channels; i++) + count += total.c[i].read_access * blen * dfi->buswidth[i]; + break; + case PERF_EVENT_WRITE_BYTES: + for (i = 0; i < dfi->max_channels; i++) + count += total.c[i].write_access * blen * dfi->buswidth[i]; + break; + case PERF_EVENT_READ_BYTES0: + count = total.c[0].read_access * blen * dfi->buswidth[0]; + break; + case PERF_EVENT_WRITE_BYTES0: + count = total.c[0].write_access * blen * dfi->buswidth[0]; + break; + case PERF_EVENT_READ_BYTES1: + count = total.c[1].read_access * blen * dfi->buswidth[1]; + break; + case PERF_EVENT_WRITE_BYTES1: + count = total.c[1].write_access * blen * dfi->buswidth[1]; + break; + case PERF_EVENT_READ_BYTES2: + count = total.c[2].read_access * blen * dfi->buswidth[2]; + break; + case PERF_EVENT_WRITE_BYTES2: + count = total.c[2].write_access * blen * dfi->buswidth[2]; + break; + case PERF_EVENT_READ_BYTES3: + count = total.c[3].read_access * blen * dfi->buswidth[3]; + break; + case PERF_EVENT_WRITE_BYTES3: + count = total.c[3].write_access * blen * dfi->buswidth[3]; + break; + case PERF_EVENT_BYTES: + for (i = 0; i < dfi->max_channels; i++) + count += total.c[i].access * blen * dfi->buswidth[i]; + break; + } + + return count; +} + +static void rockchip_ddr_perf_event_update(struct perf_event *event) +{ + u64 now; + s64 prev; + + if (event->attr.config >= PERF_ACCESS_TYPE_MAX) + return; + + now = rockchip_ddr_perf_event_get_count(event); + prev = local64_xchg(&event->hw.prev_count, now); + local64_add(now - prev, &event->count); +} + +static void rockchip_ddr_perf_event_start(struct perf_event *event, int flags) +{ + u64 now = rockchip_ddr_perf_event_get_count(event); + + local64_set(&event->hw.prev_count, now); +} + +static int rockchip_ddr_perf_event_add(struct perf_event *event, int flags) +{ + struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); + + dfi->active_events++; + + if (dfi->active_events == 1) { + dfi->total_count = (struct dmc_count){}; + rockchip_dfi_read_counters(dfi, &dfi->last_perf_count); + hrtimer_start(&dfi->timer, ns_to_ktime(NSEC_PER_SEC), HRTIMER_MODE_REL); + } + + if (flags & PERF_EF_START) + rockchip_ddr_perf_event_start(event, flags); + + return 0; +} + +static void rockchip_ddr_perf_event_stop(struct perf_event *event, int flags) +{ + rockchip_ddr_perf_event_update(event); } -static int rockchip_dfi_disable(struct devfreq_event_dev *edev) +static void rockchip_ddr_perf_event_del(struct perf_event *event, int flags) { - struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); - rockchip_dfi_stop_hardware_counter(edev); - clk_disable_unprepare(info->clk); + rockchip_ddr_perf_event_stop(event, PERF_EF_UPDATE); + + dfi->active_events--; + + if (dfi->active_events == 0) + hrtimer_cancel(&dfi->timer); +} + +static enum hrtimer_restart rockchip_dfi_timer(struct hrtimer *timer) +{ + struct rockchip_dfi *dfi = container_of(timer, struct rockchip_dfi, timer); + struct dmc_count now, total; + + rockchip_dfi_read_counters(dfi, &now); + + write_seqlock(&dfi->count_seqlock); + + rockchip_ddr_perf_counters_add(dfi, &now, &total); + dfi->total_count = total; + dfi->last_perf_count = now; + + write_sequnlock(&dfi->count_seqlock); + + hrtimer_forward_now(&dfi->timer, ns_to_ktime(NSEC_PER_SEC)); + + return HRTIMER_RESTART; +}; + +static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct rockchip_dfi *dfi = hlist_entry_safe(node, struct rockchip_dfi, node); + int target; + + if (cpu != dfi->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&dfi->pmu, cpu, target); + dfi->cpu = target; return 0; } -static int rockchip_dfi_enable(struct devfreq_event_dev *edev) +static void rockchip_ddr_cpuhp_remove_state(void *data) +{ + struct rockchip_dfi *dfi = data; + + cpuhp_remove_multi_state(dfi->cpuhp_state); + + rockchip_dfi_disable(dfi); +} + +static void rockchip_ddr_cpuhp_remove_instance(void *data) +{ + struct rockchip_dfi *dfi = data; + + cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node); +} + +static void rockchip_ddr_perf_remove(void *data) { - struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + struct rockchip_dfi *dfi = data; + + perf_pmu_unregister(&dfi->pmu); +} + +static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi) +{ + struct pmu *pmu = &dfi->pmu; int ret; - ret = clk_prepare_enable(info->clk); + seqlock_init(&dfi->count_seqlock); + + pmu->module = THIS_MODULE; + pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE; + pmu->task_ctx_nr = perf_invalid_context; + pmu->attr_groups = attr_groups; + pmu->event_init = rockchip_ddr_perf_event_init; + pmu->add = rockchip_ddr_perf_event_add; + pmu->del = rockchip_ddr_perf_event_del; + pmu->start = rockchip_ddr_perf_event_start; + pmu->stop = rockchip_ddr_perf_event_stop; + pmu->read = rockchip_ddr_perf_event_update; + + dfi->cpu = raw_smp_processor_id(); + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "rockchip_ddr_perf_pmu", + NULL, + ddr_perf_offline_cpu); + + if (ret < 0) { + dev_err(dfi->dev, "cpuhp_setup_state_multi failed: %d\n", ret); + return ret; + } + + dfi->cpuhp_state = ret; + + rockchip_dfi_enable(dfi); + + ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_state, dfi); + if (ret) + return ret; + + ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node); if (ret) { - dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret); + dev_err(dfi->dev, "Error %d registering hotplug\n", ret); return ret; } - rockchip_dfi_start_hardware_counter(edev); + ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_instance, dfi); + if (ret) + return ret; + + hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + dfi->timer.function = rockchip_dfi_timer; + + switch (dfi->ddr_type) { + case ROCKCHIP_DDRTYPE_LPDDR2: + case ROCKCHIP_DDRTYPE_LPDDR3: + dfi->burst_len = 8; + break; + case ROCKCHIP_DDRTYPE_LPDDR4: + case ROCKCHIP_DDRTYPE_LPDDR4X: + dfi->burst_len = 16; + break; + } + + ret = perf_pmu_register(pmu, "rockchip_ddr", -1); + if (ret) + return ret; + + return devm_add_action_or_reset(dfi->dev, rockchip_ddr_perf_remove, dfi); +} +#else +static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi) +{ return 0; } +#endif -static int rockchip_dfi_set_event(struct devfreq_event_dev *edev) +static int rk3399_dfi_init(struct rockchip_dfi *dfi) { + struct regmap *regmap_pmu = dfi->regmap_pmu; + u32 val; + + dfi->clk = devm_clk_get(dfi->dev, "pclk_ddr_mon"); + if (IS_ERR(dfi->clk)) + return dev_err_probe(dfi->dev, PTR_ERR(dfi->clk), + "Cannot get the clk pclk_ddr_mon\n"); + + /* get ddr type */ + regmap_read(regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); + dfi->ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val); + + dfi->channel_mask = GENMASK(1, 0); + dfi->max_channels = 2; + + dfi->buswidth[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0, val) == 0 ? 4 : 2; + dfi->buswidth[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1, val) == 0 ? 4 : 2; + + dfi->ddrmon_stride = 0x14; + dfi->ddrmon_ctrl_single = true; + return 0; -} +}; -static int rockchip_dfi_get_event(struct devfreq_event_dev *edev, - struct devfreq_event_data *edata) +static int rk3568_dfi_init(struct rockchip_dfi *dfi) { - struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); - int busier_ch; + struct regmap *regmap_pmu = dfi->regmap_pmu; + u32 reg2, reg3; + + regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG2, ®2); + regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG3, ®3); + + /* lower 3 bits of the DDR type */ + dfi->ddr_type = FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2); + + /* + * For version three and higher the upper two bits of the DDR type are + * in RK3568_PMUGRF_OS_REG3 + */ + if (FIELD_GET(RK3568_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3) + dfi->ddr_type |= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3; - busier_ch = rockchip_dfi_get_busier_ch(edev); + dfi->channel_mask = BIT(0); + dfi->max_channels = 1; - edata->load_count = info->ch_usage[busier_ch].access; - edata->total_count = info->ch_usage[busier_ch].total; + dfi->buswidth[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2; + + dfi->ddrmon_stride = 0x0; /* not relevant, we only have a single channel on this SoC */ + dfi->ddrmon_ctrl_single = true; return 0; -} +}; -static const struct devfreq_event_ops rockchip_dfi_ops = { - .disable = rockchip_dfi_disable, - .enable = rockchip_dfi_enable, - .get_event = rockchip_dfi_get_event, - .set_event = rockchip_dfi_set_event, +static int rk3588_dfi_init(struct rockchip_dfi *dfi) +{ + struct regmap *regmap_pmu = dfi->regmap_pmu; + u32 reg2, reg3, reg4; + + regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, ®2); + regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, ®3); + regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG4, ®4); + + /* lower 3 bits of the DDR type */ + dfi->ddr_type = FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2); + + /* + * For version three and higher the upper two bits of the DDR type are + * in RK3588_PMUGRF_OS_REG3 + */ + if (FIELD_GET(RK3588_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3) + dfi->ddr_type |= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3; + + dfi->buswidth[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2; + dfi->buswidth[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg2) == 0 ? 4 : 2; + dfi->buswidth[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg4) == 0 ? 4 : 2; + dfi->buswidth[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg4) == 0 ? 4 : 2; + dfi->channel_mask = FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg2) | + FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg4) << 2; + dfi->max_channels = 4; + + dfi->ddrmon_stride = 0x4000; + + return 0; }; static const struct of_device_id rockchip_dfi_id_match[] = { - { .compatible = "rockchip,rk3399-dfi" }, + { .compatible = "rockchip,rk3399-dfi", .data = rk3399_dfi_init }, + { .compatible = "rockchip,rk3568-dfi", .data = rk3568_dfi_init }, + { .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init }, { }, }; + MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match); static int rockchip_dfi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct rockchip_dfi *data; + struct rockchip_dfi *dfi; struct devfreq_event_desc *desc; struct device_node *np = pdev->dev.of_node, *node; + int (*soc_init)(struct rockchip_dfi *dfi); + int ret; - data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL); - if (!data) - return -ENOMEM; + soc_init = of_device_get_match_data(&pdev->dev); + if (!soc_init) + return -EINVAL; - data->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(data->regs)) - return PTR_ERR(data->regs); + dfi = devm_kzalloc(dev, sizeof(*dfi), GFP_KERNEL); + if (!dfi) + return -ENOMEM; - data->clk = devm_clk_get(dev, "pclk_ddr_mon"); - if (IS_ERR(data->clk)) - return dev_err_probe(dev, PTR_ERR(data->clk), - "Cannot get the clk pclk_ddr_mon\n"); + dfi->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dfi->regs)) + return PTR_ERR(dfi->regs); - /* try to find the optional reference to the pmu syscon */ node = of_parse_phandle(np, "rockchip,pmu", 0); - if (node) { - data->regmap_pmu = syscon_node_to_regmap(node); - of_node_put(node); - if (IS_ERR(data->regmap_pmu)) - return PTR_ERR(data->regmap_pmu); - } - data->dev = dev; + if (!node) + return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n"); - desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); - if (!desc) - return -ENOMEM; + dfi->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); + if (IS_ERR(dfi->regmap_pmu)) + return PTR_ERR(dfi->regmap_pmu); + + dfi->dev = dev; + mutex_init(&dfi->mutex); + desc = &dfi->desc; desc->ops = &rockchip_dfi_ops; - desc->driver_data = data; + desc->driver_data = dfi; desc->name = np->name; - data->desc = desc; - data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); - if (IS_ERR(data->edev)) { + ret = soc_init(dfi); + if (ret) + return ret; + + dfi->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); + if (IS_ERR(dfi->edev)) { dev_err(&pdev->dev, "failed to add devfreq-event device\n"); - return PTR_ERR(data->edev); + return PTR_ERR(dfi->edev); } - platform_set_drvdata(pdev, data); + ret = rockchip_ddr_perf_init(dfi); + if (ret) + return ret; + + platform_set_drvdata(pdev, dfi); return 0; } @@ -229,6 +828,7 @@ static struct platform_driver rockchip_dfi_driver = { .driver = { .name = "rockchip-dfi", .of_match_table = rockchip_dfi_id_match, + .suppress_bind_attrs = true, }, }; module_platform_driver(rockchip_dfi_driver); diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c index 83a73f0ccd80..11bc3d03494c 100644 --- a/drivers/devfreq/mtk-cci-devfreq.c +++ b/drivers/devfreq/mtk-cci-devfreq.c @@ -137,6 +137,8 @@ static int mtk_ccifreq_target(struct device *dev, unsigned long *freq, if (drv->pre_freq == *freq) return 0; + mutex_lock(&drv->reg_lock); + inter_voltage = drv->inter_voltage; cci_pll = clk_get_parent(drv->cci_clk); @@ -144,11 +146,10 @@ static int mtk_ccifreq_target(struct device *dev, unsigned long *freq, opp = devfreq_recommended_opp(dev, &opp_rate, 1); if (IS_ERR(opp)) { dev_err(dev, "failed to find opp for freq: %ld\n", opp_rate); - return PTR_ERR(opp); + ret = PTR_ERR(opp); + goto out_unlock; } - mutex_lock(&drv->reg_lock); - voltage = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); @@ -227,9 +228,9 @@ static int mtk_ccifreq_opp_notifier(struct notifier_block *nb, drv = container_of(nb, struct mtk_ccifreq_drv, opp_nb); if (event == OPP_EVENT_ADJUST_VOLTAGE) { + mutex_lock(&drv->reg_lock); freq = dev_pm_opp_get_freq(opp); - mutex_lock(&drv->reg_lock); /* current opp item is changed */ if (freq == drv->pre_freq) { volt = dev_pm_opp_get_voltage(opp); diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index daff40702615..fd2c5ffedf41 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -22,6 +22,7 @@ #include <linux/suspend.h> #include <soc/rockchip/pm_domains.h> +#include <soc/rockchip/rockchip_grf.h> #include <soc/rockchip/rk3399_grf.h> #include <soc/rockchip/rockchip_sip.h> @@ -381,17 +382,16 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) } regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); - ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) & - RK3399_PMUGRF_DDRTYPE_MASK; + ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val); switch (ddr_type) { - case RK3399_PMUGRF_DDRTYPE_DDR3: + case ROCKCHIP_DDRTYPE_DDR3: data->odt_dis_freq = data->ddr3_odt_dis_freq; break; - case RK3399_PMUGRF_DDRTYPE_LPDDR3: + case ROCKCHIP_DDRTYPE_LPDDR3: data->odt_dis_freq = data->lpddr3_odt_dis_freq; break; - case RK3399_PMUGRF_DDRTYPE_LPDDR4: + case ROCKCHIP_DDRTYPE_LPDDR4: data->odt_dis_freq = data->lpddr4_odt_dis_freq; break; default: diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 919cc53bc02e..84f345c69ea5 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -814,6 +814,31 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); /** + * dev_pm_opp_find_level_floor() - Search for a rounded floor level + * @dev: device for which we do this operation + * @level: Start level + * + * Search for the matching floor *available* OPP from a starting level + * for a device. + * + * Return: matching *opp and refreshes *level accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, + unsigned long *level) +{ + return _find_key_floor(dev, level, 0, true, _read_level, NULL); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor); + +/** * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth * @dev: device for which we do this operation * @bw: start bandwidth @@ -1030,7 +1055,7 @@ static int _set_performance_state(struct device *dev, struct device *pd_dev, if (!pd_dev) return 0; - ret = dev_pm_genpd_set_performance_state(pd_dev, pstate); + ret = dev_pm_domain_set_performance_state(pd_dev, pstate); if (ret) { dev_err(dev, "Failed to set performance state of %s: %d (%d)\n", dev_name(pd_dev), pstate, ret); @@ -1051,32 +1076,28 @@ static int _opp_set_required_opps_genpd(struct device *dev, { struct device **genpd_virt_devs = opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev; - int i, ret = 0; - - /* - * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev - * after it is freed from another thread. - */ - mutex_lock(&opp_table->genpd_virt_dev_lock); + int index, target, delta, ret; /* Scaling up? Set required OPPs in normal order, else reverse */ if (!scaling_down) { - for (i = 0; i < opp_table->required_opp_count; i++) { - ret = _set_performance_state(dev, genpd_virt_devs[i], opp, i); - if (ret) - break; - } + index = 0; + target = opp_table->required_opp_count; + delta = 1; } else { - for (i = opp_table->required_opp_count - 1; i >= 0; i--) { - ret = _set_performance_state(dev, genpd_virt_devs[i], opp, i); - if (ret) - break; - } + index = opp_table->required_opp_count - 1; + target = -1; + delta = -1; } - mutex_unlock(&opp_table->genpd_virt_dev_lock); + while (index != target) { + ret = _set_performance_state(dev, genpd_virt_devs[index], opp, index); + if (ret) + return ret; - return ret; + index += delta; + } + + return 0; } /* This is only called for PM domain for now */ @@ -1107,6 +1128,28 @@ void _update_set_required_opps(struct opp_table *opp_table) opp_table->set_required_opps = _opp_set_required_opps_generic; } +static int _set_opp_level(struct device *dev, struct opp_table *opp_table, + struct dev_pm_opp *opp) +{ + unsigned int level = 0; + int ret = 0; + + if (opp) { + if (!opp->level) + return 0; + + level = opp->level; + } + + /* Request a new performance state through the device's PM domain. */ + ret = dev_pm_domain_set_performance_state(dev, level); + if (ret) + dev_err(dev, "Failed to set performance state %u (%d)\n", level, + ret); + + return ret; +} + static void _find_current_opp(struct device *dev, struct opp_table *opp_table) { struct dev_pm_opp *opp = ERR_PTR(-ENODEV); @@ -1154,8 +1197,13 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) if (opp_table->regulators) regulator_disable(opp_table->regulators[0]); + ret = _set_opp_level(dev, opp_table, NULL); + if (ret) + goto out; + ret = _set_required_opps(dev, opp_table, NULL, false); +out: opp_table->enabled = false; return ret; } @@ -1198,6 +1246,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, return ret; } + ret = _set_opp_level(dev, opp_table, opp); + if (ret) + return ret; + ret = _set_opp_bw(opp_table, opp, dev); if (ret) { dev_err(dev, "Failed to set bw: %d\n", ret); @@ -1241,6 +1293,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, return ret; } + ret = _set_opp_level(dev, opp_table, opp); + if (ret) + return ret; + ret = _set_required_opps(dev, opp_table, opp, false); if (ret) { dev_err(dev, "Failed to set required opps: %d\n", ret); @@ -1410,7 +1466,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) return ERR_PTR(-ENOMEM); mutex_init(&opp_table->lock); - mutex_init(&opp_table->genpd_virt_dev_lock); INIT_LIST_HEAD(&opp_table->dev_list); INIT_LIST_HEAD(&opp_table->lazy); @@ -1446,7 +1501,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) remove_opp_dev: _of_clear_opp_table(opp_table); _remove_opp_dev(opp_dev, opp_table); - mutex_destroy(&opp_table->genpd_virt_dev_lock); mutex_destroy(&opp_table->lock); err: kfree(opp_table); @@ -1614,7 +1668,6 @@ static void _opp_table_kref_release(struct kref *kref) list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) _remove_opp_dev(opp_dev, opp_table); - mutex_destroy(&opp_table->genpd_virt_dev_lock); mutex_destroy(&opp_table->lock); kfree(opp_table); } @@ -2002,8 +2055,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, * _opp_add_v1() - Allocate a OPP based on v1 bindings. * @opp_table: OPP table * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP + * @data: The OPP data for the OPP to add * @dynamic: Dynamically added OPPs. * * This function adds an opp definition to the opp table and returns status. @@ -2021,10 +2073,10 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, * -ENOMEM Memory allocation failure */ int _opp_add_v1(struct opp_table *opp_table, struct device *dev, - unsigned long freq, long u_volt, bool dynamic) + struct dev_pm_opp_data *data, bool dynamic) { struct dev_pm_opp *new_opp; - unsigned long tol; + unsigned long tol, u_volt = data->u_volt; int ret; if (!assert_single_clk(opp_table)) @@ -2035,7 +2087,8 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev, return -ENOMEM; /* populate the opp table */ - new_opp->rates[0] = freq; + new_opp->rates[0] = data->freq; + new_opp->level = data->level; tol = u_volt * opp_table->voltage_tolerance_v1 / 100; new_opp->supplies[0].u_volt = u_volt; new_opp->supplies[0].u_volt_min = u_volt - tol; @@ -2064,12 +2117,7 @@ free_opp: return ret; } -/** - * _opp_set_supported_hw() - Set supported platforms - * @dev: Device for which supported-hw has to be set. - * @versions: Array of hierarchy of versions to match. - * @count: Number of elements in the array. - * +/* * This is required only for the V2 bindings, and it enables a platform to * specify the hierarchy of versions it supports. OPP layer will then enable * OPPs, which are available for those versions, based on its 'opp-supported-hw' @@ -2092,14 +2140,6 @@ static int _opp_set_supported_hw(struct opp_table *opp_table, return 0; } -/** - * _opp_put_supported_hw() - Releases resources blocked for supported hw - * @opp_table: OPP table returned by _opp_set_supported_hw(). - * - * This is required only for the V2 bindings, and is called for a matching - * _opp_set_supported_hw(). Until this is called, the opp_table structure - * will not be freed. - */ static void _opp_put_supported_hw(struct opp_table *opp_table) { if (opp_table->supported_hw) { @@ -2109,11 +2149,7 @@ static void _opp_put_supported_hw(struct opp_table *opp_table) } } -/** - * _opp_set_prop_name() - Set prop-extn name - * @dev: Device for which the prop-name has to be set. - * @name: name to postfix to properties. - * +/* * This is required only for the V2 bindings, and it enables a platform to * specify the extn to be used for certain property names. The properties to * which the extension will apply are opp-microvolt and opp-microamp. OPP core @@ -2131,14 +2167,6 @@ static int _opp_set_prop_name(struct opp_table *opp_table, const char *name) return 0; } -/** - * _opp_put_prop_name() - Releases resources blocked for prop-name - * @opp_table: OPP table returned by _opp_set_prop_name(). - * - * This is required only for the V2 bindings, and is called for a matching - * _opp_set_prop_name(). Until this is called, the opp_table structure - * will not be freed. - */ static void _opp_put_prop_name(struct opp_table *opp_table) { if (opp_table->prop_name) { @@ -2147,12 +2175,7 @@ static void _opp_put_prop_name(struct opp_table *opp_table) } } -/** - * _opp_set_regulators() - Set regulator names for the device - * @dev: Device for which regulator name is being set. - * @names: Array of pointers to the names of the regulator. - * @count: Number of regulators. - * +/* * In order to support OPP switching, OPP layer needs to know the name of the * device's regulators, as the core would be required to switch voltages as * well. @@ -2214,10 +2237,6 @@ free_regulators: return ret; } -/** - * _opp_put_regulators() - Releases resources blocked for regulator - * @opp_table: OPP table returned from _opp_set_regulators(). - */ static void _opp_put_regulators(struct opp_table *opp_table) { int i; @@ -2249,11 +2268,7 @@ static void _put_clks(struct opp_table *opp_table, int count) opp_table->clks = NULL; } -/** - * _opp_set_clknames() - Set clk names for the device - * @dev: Device for which clk names is being set. - * @names: Clk names. - * +/* * In order to support OPP switching, OPP layer needs to get pointers to the * clocks for the device. Simple cases work fine without using this routine * (i.e. by passing connection-id as NULL), but for a device with multiple @@ -2337,10 +2352,6 @@ free_clks: return ret; } -/** - * _opp_put_clknames() - Releases resources blocked for clks. - * @opp_table: OPP table returned from _opp_set_clknames(). - */ static void _opp_put_clknames(struct opp_table *opp_table) { if (!opp_table->clks) @@ -2352,11 +2363,7 @@ static void _opp_put_clknames(struct opp_table *opp_table) _put_clks(opp_table, opp_table->clk_count); } -/** - * _opp_set_config_regulators_helper() - Register custom set regulator helper. - * @dev: Device for which the helper is getting registered. - * @config_regulators: Custom set regulator helper. - * +/* * This is useful to support platforms with multiple regulators per device. * * This must be called before any OPPs are initialized for the device. @@ -2371,20 +2378,13 @@ static int _opp_set_config_regulators_helper(struct opp_table *opp_table, return 0; } -/** - * _opp_put_config_regulators_helper() - Releases resources blocked for - * config_regulators helper. - * @opp_table: OPP table returned from _opp_set_config_regulators_helper(). - * - * Release resources blocked for platform specific config_regulators helper. - */ static void _opp_put_config_regulators_helper(struct opp_table *opp_table) { if (opp_table->config_regulators) opp_table->config_regulators = NULL; } -static void _detach_genpd(struct opp_table *opp_table) +static void _opp_detach_genpd(struct opp_table *opp_table) { int index; @@ -2403,12 +2403,7 @@ static void _detach_genpd(struct opp_table *opp_table) opp_table->genpd_virt_devs = NULL; } -/** - * _opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer - * @dev: Consumer device for which the genpd is getting attached. - * @names: Null terminated array of pointers containing names of genpd to attach. - * @virt_devs: Pointer to return the array of virtual devices. - * +/* * Multiple generic power domains for a device are supported with the help of * virtual genpd devices, which are created for each consumer device - genpd * pair. These are the device structures which are attached to the power domain @@ -2435,21 +2430,11 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, if (opp_table->genpd_virt_devs) return 0; - /* - * If the genpd's OPP table isn't already initialized, parsing of the - * required-opps fail for dev. We should retry this after genpd's OPP - * table is added. - */ - if (!opp_table->required_opp_count) - return -EPROBE_DEFER; - - mutex_lock(&opp_table->genpd_virt_dev_lock); - opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count, sizeof(*opp_table->genpd_virt_devs), GFP_KERNEL); if (!opp_table->genpd_virt_devs) - goto unlock; + return -ENOMEM; while (*name) { if (index >= opp_table->required_opp_count) { @@ -2472,36 +2457,15 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, if (virt_devs) *virt_devs = opp_table->genpd_virt_devs; - mutex_unlock(&opp_table->genpd_virt_dev_lock); return 0; err: - _detach_genpd(opp_table); -unlock: - mutex_unlock(&opp_table->genpd_virt_dev_lock); + _opp_detach_genpd(opp_table); return ret; } -/** - * _opp_detach_genpd() - Detach genpd(s) from the device. - * @opp_table: OPP table returned by _opp_attach_genpd(). - * - * This detaches the genpd(s), resets the virtual device pointers, and puts the - * OPP table. - */ -static void _opp_detach_genpd(struct opp_table *opp_table) -{ - /* - * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting - * used in parallel. - */ - mutex_lock(&opp_table->genpd_virt_dev_lock); - _detach_genpd(opp_table); - mutex_unlock(&opp_table->genpd_virt_dev_lock); -} - static void _opp_clear_config(struct opp_config_data *data) { if (data->flags & OPP_CONFIG_GENPD) @@ -2642,7 +2606,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_config); /** * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration. - * @opp_table: OPP table returned from dev_pm_opp_set_config(). + * @token: The token returned by dev_pm_opp_set_config() previously. * * This allows all device OPP configurations to be cleared at once. This must be * called once for each call made to dev_pm_opp_set_config(), in order to free @@ -2825,10 +2789,9 @@ unlock: } /** - * dev_pm_opp_add() - Add an OPP table from a table definitions - * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP + * dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions + * @dev: The device for which we do this operation + * @data: The OPP data for the OPP to add * * This function adds an opp definition to the opp table and returns status. * The opp is made available by default and it can be controlled using @@ -2841,7 +2804,7 @@ unlock: * Duplicate OPPs (both freq and volt are same) and !opp->available * -ENOMEM Memory allocation failure */ -int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data) { struct opp_table *opp_table; int ret; @@ -2853,13 +2816,13 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) /* Fix regulator count for dynamic OPPs */ opp_table->regulator_count = 1; - ret = _opp_add_v1(opp_table, dev, freq, u_volt, true); + ret = _opp_add_v1(opp_table, dev, data, true); if (ret) dev_pm_opp_put_opp_table(opp_table); return ret; } -EXPORT_SYMBOL_GPL(dev_pm_opp_add); +EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); /** * _opp_set_availability() - helper to set the availability of an opp diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index 17543c0aa5b6..ec030b19164a 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -56,7 +56,7 @@ static void opp_debug_create_bw(struct dev_pm_opp *opp, struct dentry *pdentry) { struct dentry *d; - char name[11]; + char name[20]; int i; for (i = 0; i < opp_table->path_count; i++) { diff --git a/drivers/opp/of.c b/drivers/opp/of.c index ada4963c7cfa..81fa27599d58 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -208,9 +208,9 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, mutex_lock(&opp_table_lock); list_add(&opp_table->lazy, &lazy_opp_tables); mutex_unlock(&opp_table_lock); - } - else + } else { _update_set_required_opps(opp_table); + } goto put_np; @@ -296,24 +296,41 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) of_node_put(opp->np); } +static int _link_required_opps(struct dev_pm_opp *opp, + struct opp_table *required_table, int index) +{ + struct device_node *np; + + np = of_parse_required_opp(opp->np, index); + if (unlikely(!np)) + return -ENODEV; + + opp->required_opps[index] = _find_opp_of_np(required_table, np); + of_node_put(np); + + if (!opp->required_opps[index]) { + pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", + __func__, opp->np, index); + return -ENODEV; + } + + return 0; +} + /* Populate all required OPPs which are part of "required-opps" list */ static int _of_opp_alloc_required_opps(struct opp_table *opp_table, struct dev_pm_opp *opp) { - struct dev_pm_opp **required_opps; struct opp_table *required_table; - struct device_node *np; int i, ret, count = opp_table->required_opp_count; if (!count) return 0; - required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL); - if (!required_opps) + opp->required_opps = kcalloc(count, sizeof(*opp->required_opps), GFP_KERNEL); + if (!opp->required_opps) return -ENOMEM; - opp->required_opps = required_opps; - for (i = 0; i < count; i++) { required_table = opp_table->required_opp_tables[i]; @@ -321,21 +338,9 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table, if (IS_ERR_OR_NULL(required_table)) continue; - np = of_parse_required_opp(opp->np, i); - if (unlikely(!np)) { - ret = -ENODEV; - goto free_required_opps; - } - - required_opps[i] = _find_opp_of_np(required_table, np); - of_node_put(np); - - if (!required_opps[i]) { - pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", - __func__, opp->np, i); - ret = -ENODEV; + ret = _link_required_opps(opp, required_table, i); + if (ret) goto free_required_opps; - } } return 0; @@ -350,22 +355,13 @@ free_required_opps: static int lazy_link_required_opps(struct opp_table *opp_table, struct opp_table *new_table, int index) { - struct device_node *required_np; struct dev_pm_opp *opp; + int ret; list_for_each_entry(opp, &opp_table->opp_list, node) { - required_np = of_parse_required_opp(opp->np, index); - if (unlikely(!required_np)) - return -ENODEV; - - opp->required_opps[index] = _find_opp_of_np(new_table, required_np); - of_node_put(required_np); - - if (!opp->required_opps[index]) { - pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", - __func__, opp->np, index); - return -ENODEV; - } + ret = _link_required_opps(opp, new_table, index); + if (ret) + return ret; } return 0; @@ -1079,11 +1075,15 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) while (nr) { unsigned long freq = be32_to_cpup(val++) * 1000; unsigned long volt = be32_to_cpup(val++); + struct dev_pm_opp_data data = { + .freq = freq, + .u_volt = volt, + }; - ret = _opp_add_v1(opp_table, dev, freq, volt, false); + ret = _opp_add_v1(opp_table, dev, &data, false); if (ret) { dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", - __func__, freq, ret); + __func__, data.freq, ret); goto remove_static_opp; } nr -= 2; diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 8a5ea38f3a3d..08366f90f16b 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -160,7 +160,6 @@ enum opp_table_access { * @rate_clk_single: Currently configured frequency for single clk. * @current_opp: Currently configured OPP for the table. * @suspend_opp: Pointer to OPP to be used during device suspend. - * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. * @genpd_virt_devs: List of virtual devices for multiple genpd support. * @required_opp_tables: List of device OPP tables that are required by OPPs in * this table. @@ -212,7 +211,6 @@ struct opp_table { struct dev_pm_opp *current_opp; struct dev_pm_opp *suspend_opp; - struct mutex genpd_virt_dev_lock; struct device **genpd_virt_devs; struct opp_table **required_opp_tables; unsigned int required_opp_count; @@ -251,7 +249,7 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table); void _opp_free(struct dev_pm_opp *opp); int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, struct dev_pm_opp *opp2); int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); -int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic); +int _opp_add_v1(struct opp_table *opp_table, struct device *dev, struct dev_pm_opp_data *data, bool dynamic); void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk); void _put_opp_list_kref(struct opp_table *opp_table); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 40a2cc649c79..2feed036c1cd 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -892,7 +892,7 @@ static int rapl_write_pl_data(struct rapl_domain *rd, int pl, return -EINVAL; if (rd->rpl[pl].locked) { - pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]); + pr_debug("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]); return -EACCES; } |