diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2022-12-06 12:07:30 +0100 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2022-12-06 12:07:30 +0100 |
commit | edeba49e39193025cbf1a06ce7a74f136e83664f (patch) | |
tree | b33a049462835dfcb6fa69a6956bdd4be1e2caf5 /drivers/cpufreq | |
parent | df51f287b5de3b9d4fd39593eafd1f8298d711c7 (diff) | |
parent | 8ff150aa6fe252e9b7713cf737c4dc5cbaa263ab (diff) |
Merge tag 'cpufreq-arm-updates-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm
Pull cpufreq ARM updates for 6.2 from Viresh Kumar:
"- Generalize of_perf_domain_get_sharing_cpumask phandle format (Hector
Martin).
- New cpufreq driver for Apple SoC CPU P-states (Hector Martin).
- Lots of Qualcomm cpufreq driver updates, that include CPU clock
provider support, generic cleanups or reorganization, fixed a
potential memleak and the return value of cpufreq_driver->get()
(Manivannan Sadhasivam, and Chen Hui).
- Few updates to Qualcomm cpufreq driver's DT bindings, that include
support for CPU clock provider, fixing missing cache related
properties, and support for QDU1000/QRU1000 (Manivannan Sadhasivam,
Rob Herring, and Melody Olvera).
- Add support for ti,am625 SoC and enable build of ti-cpufreq for
ARCH_K3 (Dave Gerlach, and Vibhore Vardhan).
- tegra186: Use flexible array to simplify memory allocation (Christophe
JAILLET)."
* tag 'cpufreq-arm-updates-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
dt-bindings: cpufreq: cpufreq-qcom-hw: Add QDU1000/QRU1000 cpufreq
cpufreq: tegra186: Use flexible array to simplify memory allocation
cpufreq: apple-soc: Add new driver to control Apple SoC CPU P-states
cpufreq: qcom-hw: Add CPU clock provider support
dt-bindings: cpufreq: cpufreq-qcom-hw: Add cpufreq clock provider
cpufreq: qcom-hw: Fix the frequency returned by cpufreq_driver->get()
cpufreq: qcom-hw: Fix memory leak in qcom_cpufreq_hw_read_lut()
arm64: dts: ti: k3-am625-sk: Add 1.4GHz OPP
cpufreq: ti: Enable ti-cpufreq for ARCH_K3
arm64: dts: ti: k3-am625: Introduce operating-points table
cpufreq: dt-platdev: Blacklist ti,am625 SoC
cpufreq: ti-cpufreq: Add support for AM625
dt-bindings: cpufreq: qcom: Add missing cache related properties
cpufreq: qcom-hw: Move soc_data to struct qcom_cpufreq
cpufreq: qcom-hw: Use cached dev pointer in probe()
cpufreq: qcom-hw: Allocate qcom_cpufreq_data during probe
cpufreq: qcom-hw: Remove un-necessary cpumask_empty() check
cpufreq: Generalize of_perf_domain_get_sharing_cpumask phandle format
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 13 | ||||
-rw-r--r-- | drivers/cpufreq/Makefile | 1 | ||||
-rw-r--r-- | drivers/cpufreq/apple-soc-cpufreq.c | 352 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt-platdev.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/mediatek-cpufreq-hw.c | 14 | ||||
-rw-r--r-- | drivers/cpufreq/qcom-cpufreq-hw.c | 206 | ||||
-rw-r--r-- | drivers/cpufreq/tegra186-cpufreq.c | 11 | ||||
-rw-r--r-- | drivers/cpufreq/ti-cpufreq.c | 36 |
8 files changed, 543 insertions, 93 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 82e5de1f6f8c..0a0352d8fa45 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -41,6 +41,15 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM To compile this driver as a module, choose M here: the module will be called sun50i-cpufreq-nvmem. +config ARM_APPLE_SOC_CPUFREQ + tristate "Apple Silicon SoC CPUFreq support" + depends on ARCH_APPLE || (COMPILE_TEST && 64BIT) + select PM_OPP + default ARCH_APPLE + help + This adds the CPUFreq driver for Apple Silicon machines + (e.g. Apple M1). + config ARM_ARMADA_37XX_CPUFREQ tristate "Armada 37xx CPUFreq support" depends on ARCH_MVEBU && CPUFREQ_DT @@ -340,8 +349,8 @@ config ARM_TEGRA194_CPUFREQ config ARM_TI_CPUFREQ bool "Texas Instruments CPUFreq support" - depends on ARCH_OMAP2PLUS - default ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS || ARCH_K3 + default y help This driver enables valid OPPs on the running platform based on values contained within the SoC in use. Enable this in order to diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 49b98c62c5af..32a7029e25ed 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o ################################################################################## # ARM SoC drivers +obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c new file mode 100644 index 000000000000..d1801281cdd9 --- /dev/null +++ b/drivers/cpufreq/apple-soc-cpufreq.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Apple SoC CPU cluster performance state driver + * + * Copyright The Asahi Linux Contributors + * + * Based on scpi-cpufreq.c + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/pm_opp.h> +#include <linux/slab.h> + +#define APPLE_DVFS_CMD 0x20 +#define APPLE_DVFS_CMD_BUSY BIT(31) +#define APPLE_DVFS_CMD_SET BIT(25) +#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12) +#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0) + +/* Same timebase as CPU counter (24MHz) */ +#define APPLE_DVFS_LAST_CHG_TIME 0x38 + +/* + * Apple ran out of bits and had to shift this in T8112... + */ +#define APPLE_DVFS_STATUS 0x50 +#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4) +#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4 +#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0) +#define APPLE_DVFS_STATUS_CUR_PS_T8112 GENMASK(9, 5) +#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112 5 +#define APPLE_DVFS_STATUS_TGT_PS_T8112 GENMASK(4, 0) + +/* + * Div is +1, base clock is 12MHz on existing SoCs. + * For documentation purposes. We use the OPP table to + * get the frequency. + */ +#define APPLE_DVFS_PLL_STATUS 0xc0 +#define APPLE_DVFS_PLL_FACTOR 0xc8 +#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16) +#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0) + +#define APPLE_DVFS_TRANSITION_TIMEOUT 100 + +struct apple_soc_cpufreq_info { + u64 max_pstate; + u64 cur_pstate_mask; + u64 cur_pstate_shift; +}; + +struct apple_cpu_priv { + struct device *cpu_dev; + void __iomem *reg_base; + const struct apple_soc_cpufreq_info *info; +}; + +static struct cpufreq_driver apple_soc_cpufreq_driver; + +static const struct apple_soc_cpufreq_info soc_t8103_info = { + .max_pstate = 15, + .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103, + .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103, +}; + +static const struct apple_soc_cpufreq_info soc_t8112_info = { + .max_pstate = 31, + .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112, + .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112, +}; + +static const struct apple_soc_cpufreq_info soc_default_info = { + .max_pstate = 15, + .cur_pstate_mask = 0, /* fallback */ +}; + +static const struct of_device_id apple_soc_cpufreq_of_match[] = { + { + .compatible = "apple,t8103-cluster-cpufreq", + .data = &soc_t8103_info, + }, + { + .compatible = "apple,t8112-cluster-cpufreq", + .data = &soc_t8112_info, + }, + { + .compatible = "apple,cluster-cpufreq", + .data = &soc_default_info, + }, + {} +}; + +static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + struct apple_cpu_priv *priv = policy->driver_data; + struct cpufreq_frequency_table *p; + unsigned int pstate; + + if (priv->info->cur_pstate_mask) { + u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS); + + pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift; + } else { + /* + * For the fallback case we might not know the layout of DVFS_STATUS, + * so just use the command register value (which ignores boost limitations). + */ + u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_CMD); + + pstate = FIELD_GET(APPLE_DVFS_CMD_PS1, reg); + } + + cpufreq_for_each_valid_entry(p, policy->freq_table) + if (p->driver_data == pstate) + return p->frequency; + + dev_err(priv->cpu_dev, "could not find frequency for pstate %d\n", + pstate); + return 0; +} + +static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy, + unsigned int index) +{ + struct apple_cpu_priv *priv = policy->driver_data; + unsigned int pstate = policy->freq_table[index].driver_data; + u64 reg; + + /* Fallback for newer SoCs */ + if (index > priv->info->max_pstate) + index = priv->info->max_pstate; + + if (readq_poll_timeout_atomic(priv->reg_base + APPLE_DVFS_CMD, reg, + !(reg & APPLE_DVFS_CMD_BUSY), 2, + APPLE_DVFS_TRANSITION_TIMEOUT)) { + return -EIO; + } + + reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2); + reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate); + reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate); + reg |= APPLE_DVFS_CMD_SET; + + writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD); + + return 0; +} + +static unsigned int apple_soc_cpufreq_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + if (apple_soc_cpufreq_set_target(policy, policy->cached_resolved_idx) < 0) + return 0; + + return policy->freq_table[policy->cached_resolved_idx].frequency; +} + +static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy, + void __iomem **reg_base, + const struct apple_soc_cpufreq_info **info) +{ + struct of_phandle_args args; + const struct of_device_id *match; + int ret = 0; + + ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains", + "#performance-domain-cells", + policy->cpus, &args); + if (ret < 0) + return ret; + + match = of_match_node(apple_soc_cpufreq_of_match, args.np); + of_node_put(args.np); + if (!match) + return -ENODEV; + + *info = match->data; + + *reg_base = of_iomap(args.np, 0); + if (IS_ERR(*reg_base)) + return PTR_ERR(*reg_base); + + return 0; +} + +static struct freq_attr *apple_soc_cpufreq_hw_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, /* Filled in below if boost is enabled */ + NULL, +}; + +static int apple_soc_cpufreq_init(struct cpufreq_policy *policy) +{ + int ret, i; + unsigned int transition_latency; + void __iomem *reg_base; + struct device *cpu_dev; + struct apple_cpu_priv *priv; + const struct apple_soc_cpufreq_info *info; + struct cpufreq_frequency_table *freq_table; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", policy->cpu); + return -ENODEV; + } + + ret = dev_pm_opp_of_add_table(cpu_dev); + if (ret < 0) { + dev_err(cpu_dev, "%s: failed to add OPP table: %d\n", __func__, ret); + return ret; + } + + ret = apple_soc_cpufreq_find_cluster(policy, ®_base, &info); + if (ret) { + dev_err(cpu_dev, "%s: failed to get cluster info: %d\n", __func__, ret); + return ret; + } + + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); + if (ret) { + dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); + goto out_iounmap; + } + + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); + ret = -EPROBE_DEFER; + goto out_free_opp; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto out_free_opp; + } + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); + goto out_free_priv; + } + + /* Get OPP levels (p-state indexes) and stash them in driver_data */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + unsigned long rate = freq_table[i].frequency * 1000 + 999; + struct dev_pm_opp *opp = dev_pm_opp_find_freq_floor(cpu_dev, &rate); + + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto out_free_cpufreq_table; + } + freq_table[i].driver_data = dev_pm_opp_get_level(opp); + dev_pm_opp_put(opp); + } + + priv->cpu_dev = cpu_dev; + priv->reg_base = reg_base; + priv->info = info; + policy->driver_data = priv; + policy->freq_table = freq_table; + + transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); + if (!transition_latency) + transition_latency = CPUFREQ_ETERNAL; + + policy->cpuinfo.transition_latency = transition_latency; + policy->dvfs_possible_from_any_cpu = true; + policy->fast_switch_possible = true; + + if (policy_has_boost_freq(policy)) { + ret = cpufreq_enable_boost_support(); + if (ret) { + dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); + } else { + apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + apple_soc_cpufreq_driver.boost_enabled = true; + } + } + + return 0; + +out_free_cpufreq_table: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_free_priv: + kfree(priv); +out_free_opp: + dev_pm_opp_remove_all_dynamic(cpu_dev); +out_iounmap: + iounmap(reg_base); + return ret; +} + +static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy) +{ + struct apple_cpu_priv *priv = policy->driver_data; + + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); + dev_pm_opp_remove_all_dynamic(priv->cpu_dev); + iounmap(priv->reg_base); + kfree(priv); + + return 0; +} + +static struct cpufreq_driver apple_soc_cpufreq_driver = { + .name = "apple-cpufreq", + .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, + .verify = cpufreq_generic_frequency_table_verify, + .attr = cpufreq_generic_attr, + .get = apple_soc_cpufreq_get_rate, + .init = apple_soc_cpufreq_init, + .exit = apple_soc_cpufreq_exit, + .target_index = apple_soc_cpufreq_set_target, + .fast_switch = apple_soc_cpufreq_fast_switch, + .register_em = cpufreq_register_em_with_opp, + .attr = apple_soc_cpufreq_hw_attr, +}; + +static int __init apple_soc_cpufreq_module_init(void) +{ + if (!of_machine_is_compatible("apple,arm-platform")) + return -ENODEV; + + return cpufreq_register_driver(&apple_soc_cpufreq_driver); +} +module_init(apple_soc_cpufreq_module_init); + +static void __exit apple_soc_cpufreq_module_exit(void) +{ + cpufreq_unregister_driver(&apple_soc_cpufreq_driver); +} +module_exit(apple_soc_cpufreq_module_exit); + +MODULE_DEVICE_TABLE(of, apple_soc_cpufreq_of_match); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_DESCRIPTION("Apple SoC CPU cluster DVFS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 6ac3800db450..8ab672883043 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -103,6 +103,8 @@ static const struct of_device_id allowlist[] __initconst = { static const struct of_device_id blocklist[] __initconst = { { .compatible = "allwinner,sun50i-h6", }, + { .compatible = "apple,arm-platform", }, + { .compatible = "arm,vexpress", }, { .compatible = "calxeda,highbank", }, @@ -160,6 +162,7 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "ti,am43", }, { .compatible = "ti,dra7", }, { .compatible = "ti,omap3", }, + { .compatible = "ti,am625", }, { .compatible = "qcom,ipq8064", }, { .compatible = "qcom,apq8064", }, diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index f0e0a35c7f21..f80339779084 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -160,6 +160,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev, struct mtk_cpufreq_data *data; struct device *dev = &pdev->dev; struct resource *res; + struct of_phandle_args args; void __iomem *base; int ret, i; int index; @@ -168,11 +169,14 @@ static int mtk_cpu_resources_init(struct platform_device *pdev, if (!data) return -ENOMEM; - index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains", - "#performance-domain-cells", - policy->cpus); - if (index < 0) - return index; + ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains", + "#performance-domain-cells", + policy->cpus, &args); + if (ret < 0) + return ret; + + index = args.args[0]; + of_node_put(args.np); res = platform_get_resource(pdev, IORESOURCE_MEM, index); if (!res) { diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 833589bc95e4..340fed35e45d 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -4,6 +4,7 @@ */ #include <linux/bitfield.h> +#include <linux/clk-provider.h> #include <linux/cpufreq.h> #include <linux/init.h> #include <linux/interconnect.h> @@ -43,7 +44,6 @@ struct qcom_cpufreq_soc_data { struct qcom_cpufreq_data { void __iomem *base; struct resource *res; - const struct qcom_cpufreq_soc_data *soc_data; /* * Mutex to synchronize between de-init sequence and re-starting LMh @@ -55,12 +55,18 @@ struct qcom_cpufreq_data { bool cancel_throttle; struct delayed_work throttle_work; struct cpufreq_policy *policy; + struct clk_hw cpu_clk; bool per_core_dcvs; struct freq_qos_request throttle_freq_req; }; +static struct { + struct qcom_cpufreq_data *data; + const struct qcom_cpufreq_soc_data *soc_data; +} qcom_cpufreq; + static unsigned long cpu_hw_rate, xo_rate; static bool icc_scaling_enabled; @@ -109,7 +115,7 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy, unsigned int index) { struct qcom_cpufreq_data *data = policy->driver_data; - const struct qcom_cpufreq_soc_data *soc_data = data->soc_data; + const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data; unsigned long freq = policy->freq_table[index].frequency; unsigned int i; @@ -125,9 +131,37 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy, return 0; } +static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) +{ + unsigned int lval; + + if (qcom_cpufreq.soc_data->reg_current_vote) + lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_current_vote) & 0x3ff; + else + lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_domain_state) & 0xff; + + return lval * xo_rate; +} + +/* Get the current frequency of the CPU (after throttling) */ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) { struct qcom_cpufreq_data *data; + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get_raw(cpu); + if (!policy) + return 0; + + data = policy->driver_data; + + return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ; +} + +/* Get the frequency requested by the cpufreq core for the CPU */ +static unsigned int qcom_cpufreq_get_freq(unsigned int cpu) +{ + struct qcom_cpufreq_data *data; const struct qcom_cpufreq_soc_data *soc_data; struct cpufreq_policy *policy; unsigned int index; @@ -137,7 +171,7 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) return 0; data = policy->driver_data; - soc_data = data->soc_data; + soc_data = qcom_cpufreq.soc_data; index = readl_relaxed(data->base + soc_data->reg_perf_state); index = min(index, LUT_MAX_ENTRIES - 1); @@ -149,7 +183,7 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { struct qcom_cpufreq_data *data = policy->driver_data; - const struct qcom_cpufreq_soc_data *soc_data = data->soc_data; + const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data; unsigned int index; unsigned int i; @@ -173,7 +207,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, unsigned long rate; int ret; struct qcom_cpufreq_data *drv_data = policy->driver_data; - const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data; + const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data; table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL); if (!table) @@ -193,6 +227,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, } } else if (ret != -ENODEV) { dev_err(cpu_dev, "Invalid opp table in device tree\n"); + kfree(table); return ret; } else { policy->fast_switch_possible = true; @@ -286,18 +321,6 @@ static void qcom_get_related_cpus(int index, struct cpumask *m) } } -static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) -{ - unsigned int lval; - - if (data->soc_data->reg_current_vote) - lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff; - else - lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff; - - return lval * xo_rate; -} - static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) { struct cpufreq_policy *policy = data->policy; @@ -341,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) * If h/w throttled frequency is higher than what cpufreq has requested * for, then stop polling and switch back to interrupt mechanism. */ - if (throttled_freq >= qcom_cpufreq_hw_get(cpu)) + if (throttled_freq >= qcom_cpufreq_get_freq(cpu)) enable_irq(data->throttle_irq); else mod_delayed_work(system_highpri_wq, &data->throttle_work, @@ -367,9 +390,9 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data) disable_irq_nosync(c_data->throttle_irq); schedule_delayed_work(&c_data->throttle_work, 0); - if (c_data->soc_data->reg_intr_clr) + if (qcom_cpufreq.soc_data->reg_intr_clr) writel_relaxed(GT_IRQ_STATUS, - c_data->base + c_data->soc_data->reg_intr_clr); + c_data->base + qcom_cpufreq.soc_data->reg_intr_clr); return IRQ_HANDLED; } @@ -503,8 +526,6 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) struct of_phandle_args args; struct device_node *cpu_np; struct device *cpu_dev; - struct resource *res; - void __iomem *base; struct qcom_cpufreq_data *data; int ret, index; @@ -526,51 +547,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) return ret; index = args.args[0]; - - res = platform_get_resource(pdev, IORESOURCE_MEM, index); - if (!res) { - dev_err(dev, "failed to get mem resource %d\n", index); - return -ENODEV; - } - - if (!request_mem_region(res->start, resource_size(res), res->name)) { - dev_err(dev, "failed to request resource %pR\n", res); - return -EBUSY; - } - - base = ioremap(res->start, resource_size(res)); - if (!base) { - dev_err(dev, "failed to map resource %pR\n", res); - ret = -ENOMEM; - goto release_region; - } - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) { - ret = -ENOMEM; - goto unmap_base; - } - - data->soc_data = of_device_get_match_data(&pdev->dev); - data->base = base; - data->res = res; + data = &qcom_cpufreq.data[index]; /* HW should be in enabled state to proceed */ - if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) { + if (!(readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_enable) & 0x1)) { dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index); - ret = -ENODEV; - goto error; + return -ENODEV; } - if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1) + if (readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_dcvs_ctrl) & 0x1) data->per_core_dcvs = true; qcom_get_related_cpus(index, policy->cpus); - if (cpumask_empty(policy->cpus)) { - dev_err(dev, "Domain-%d failed to get related CPUs\n", index); - ret = -ENOENT; - goto error; - } policy->driver_data = data; policy->dvfs_possible_from_any_cpu = true; @@ -578,14 +566,13 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy); if (ret) { dev_err(dev, "Domain-%d failed to read LUT\n", index); - goto error; + return ret; } ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) { dev_err(cpu_dev, "Failed to add OPPs\n"); - ret = -ENODEV; - goto error; + return -ENODEV; } if (policy_has_boost_freq(policy)) { @@ -594,18 +581,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); } - ret = qcom_cpufreq_hw_lmh_init(policy, index); - if (ret) - goto error; - - return 0; -error: - kfree(data); -unmap_base: - iounmap(base); -release_region: - release_mem_region(res->start, resource_size(res)); - return ret; + return qcom_cpufreq_hw_lmh_init(policy, index); } static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) @@ -658,20 +634,33 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = { .ready = qcom_cpufreq_ready, }; +static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk); + + return qcom_lmh_get_throttle_freq(data); +} + +static const struct clk_ops qcom_cpufreq_hw_clk_ops = { + .recalc_rate = qcom_cpufreq_hw_recalc_rate, +}; + static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) { + struct clk_hw_onecell_data *clk_data; + struct device *dev = &pdev->dev; struct device *cpu_dev; struct clk *clk; - int ret; + int ret, i, num_domains; - clk = clk_get(&pdev->dev, "xo"); + clk = clk_get(dev, "xo"); if (IS_ERR(clk)) return PTR_ERR(clk); xo_rate = clk_get_rate(clk); clk_put(clk); - clk = clk_get(&pdev->dev, "alternate"); + clk = clk_get(dev, "alternate"); if (IS_ERR(clk)) return PTR_ERR(clk); @@ -689,11 +678,70 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) if (ret) return ret; + /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */ + num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * 4); + if (num_domains <= 0) + return num_domains; + + qcom_cpufreq.data = devm_kzalloc(dev, sizeof(struct qcom_cpufreq_data) * num_domains, + GFP_KERNEL); + if (!qcom_cpufreq.data) + return -ENOMEM; + + qcom_cpufreq.soc_data = of_device_get_match_data(dev); + + clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + clk_data->num = num_domains; + + for (i = 0; i < num_domains; i++) { + struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i]; + struct clk_init_data clk_init = {}; + struct resource *res; + void __iomem *base; + + base = devm_platform_get_and_ioremap_resource(pdev, i, &res); + if (IS_ERR(base)) { + dev_err(dev, "Failed to map resource %pR\n", res); + return PTR_ERR(base); + } + + data->base = base; + data->res = res; + + /* Register CPU clock for each frequency domain */ + clk_init.name = kasprintf(GFP_KERNEL, "qcom_cpufreq%d", i); + if (!clk_init.name) + return -ENOMEM; + + clk_init.flags = CLK_GET_RATE_NOCACHE; + clk_init.ops = &qcom_cpufreq_hw_clk_ops; + data->cpu_clk.init = &clk_init; + + ret = devm_clk_hw_register(dev, &data->cpu_clk); + if (ret < 0) { + dev_err(dev, "Failed to register clock %d: %d\n", i, ret); + kfree(clk_init.name); + return ret; + } + + clk_data->hws[i] = &data->cpu_clk; + kfree(clk_init.name); + } + + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data); + if (ret < 0) { + dev_err(dev, "Failed to add clock provider\n"); + return ret; + } + ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver); if (ret) - dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n"); + dev_err(dev, "CPUFreq HW driver failed to register\n"); else - dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n"); + dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n"); return ret; } diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 6c88827f4e62..f98f53bf1011 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -65,8 +65,8 @@ struct tegra186_cpufreq_cluster { struct tegra186_cpufreq_data { void __iomem *regs; - struct tegra186_cpufreq_cluster *clusters; const struct tegra186_cpufreq_cpu *cpus; + struct tegra186_cpufreq_cluster clusters[]; }; static int tegra186_cpufreq_init(struct cpufreq_policy *policy) @@ -221,15 +221,12 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) struct tegra_bpmp *bpmp; unsigned int i = 0, err; - data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + data = devm_kzalloc(&pdev->dev, + struct_size(data, clusters, TEGRA186_NUM_CLUSTERS), + GFP_KERNEL); if (!data) return -ENOMEM; - data->clusters = devm_kcalloc(&pdev->dev, TEGRA186_NUM_CLUSTERS, - sizeof(*data->clusters), GFP_KERNEL); - if (!data->clusters) - return -ENOMEM; - data->cpus = tegra186_cpus; bpmp = tegra_bpmp_get(&pdev->dev); diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index f64180dd2005..be4209d97cb3 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -39,6 +39,14 @@ #define OMAP34xx_ProdID_SKUID 0x4830A20C #define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270) +#define AM625_EFUSE_K_MPU_OPP 11 +#define AM625_EFUSE_S_MPU_OPP 19 +#define AM625_EFUSE_T_MPU_OPP 20 + +#define AM625_SUPPORT_K_MPU_OPP BIT(0) +#define AM625_SUPPORT_S_MPU_OPP BIT(1) +#define AM625_SUPPORT_T_MPU_OPP BIT(2) + #define VERSION_COUNT 2 struct ti_cpufreq_data; @@ -104,6 +112,25 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data, return BIT(efuse); } +static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data, + unsigned long efuse) +{ + unsigned long calculated_efuse = AM625_SUPPORT_K_MPU_OPP; + + switch (efuse) { + case AM625_EFUSE_T_MPU_OPP: + calculated_efuse |= AM625_SUPPORT_T_MPU_OPP; + fallthrough; + case AM625_EFUSE_S_MPU_OPP: + calculated_efuse |= AM625_SUPPORT_S_MPU_OPP; + fallthrough; + case AM625_EFUSE_K_MPU_OPP: + calculated_efuse |= AM625_SUPPORT_K_MPU_OPP; + } + + return calculated_efuse; +} + static struct ti_cpufreq_soc_data am3x_soc_data = { .efuse_xlate = amx3_efuse_xlate, .efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ, @@ -198,6 +225,14 @@ static struct ti_cpufreq_soc_data am3517_soc_data = { .multi_regulator = false, }; +static struct ti_cpufreq_soc_data am625_soc_data = { + .efuse_xlate = am625_efuse_xlate, + .efuse_offset = 0x0018, + .efuse_mask = 0x07c0, + .efuse_shift = 0x6, + .rev_offset = 0x0014, + .multi_regulator = false, +}; /** * ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC @@ -301,6 +336,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,dra7", .data = &dra7_soc_data }, { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, }, { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, }, + { .compatible = "ti,am625", .data = &am625_soc_data, }, /* legacy */ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, }, { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, }, |