summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2022-12-12 15:53:48 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2022-12-12 15:53:48 +0100
commit173c6c5af3277911763e8c737f0178db9a246d69 (patch)
treed79a668b1b926f145103b463d8bf457614a46261
parentdbfa44782787dc90460bae8b500708ec83e0f611 (diff)
parent04ac14ad3a1a36970f0935146185157704c9695c (diff)
Merge branch 'pm-cpufreq'
Merge cpufreq changes for 6.2-rc1: - Generalize of_perf_domain_get_sharing_cpumask phandle format (Hector Martin). - Add new cpufreq driver for Apple SoC CPU P-states (Hector Martin). - Update Qualcomm cpufreq driver, including: * CPU clock provider support, * Generic cleanups or reorganization. * Potential memleak fix. * Fix of the return value of cpufreq_driver->get(). (Manivannan Sadhasivam, Chen Hui). - Update Qualcomm cpufreq driver's DT bindings, including: * Support for CPU clock provider. * Missing cache-related properties fixes. * Support for QDU1000/QRU1000. (Manivannan Sadhasivam, Rob Herring, Melody Olvera). - Add support for ti,am625 SoC and enable build of ti-cpufreq for ARCH_K3 (Dave Gerlach, and Vibhore Vardhan). - Use flexible array to simplify memory allocation in the tegra186 cpufreq driver (Christophe JAILLET). - Convert cpufreq statistics code to use sysfs_emit_at() (ye xingchen). - Allow intel_pstate to use no-HWP mode on Sapphire Rapids (Giovanni Gherdovich). - Add missing pci_dev_put() to the amd_freq_sensitivity cpufreq driver (Xiongfeng Wang). - Initialize the kobj_unregister completion before calling kobject_init_and_add() in the cpufreq core code (Yongqiang Liu). - Defer setting boost MSRs in the ACPI cpufreq driver (Stuart Hayes, Nathan Chancellor). - Make intel_pstate accept initial EPP value of 0x80 (Srinivas Pandruvada). - Make read-only array sys_clk_src in the SPEAr cpufreq driver static (Colin Ian King). - Make array speeds in the longhaul cpufreq driver static (Colin Ian King). - Use str_enabled_disabled() helper in the ACPI cpufreq driver (Andy Shevchenko). - Drop a reference to CVS from cpufreq documentation (Conghui Wang). * pm-cpufreq: (30 commits) cpufreq: Remove CVS version control contents from documentation cpufreq: stats: Convert to use sysfs_emit_at() API cpufreq: ACPI: Only set boost MSRs on supported CPUs dt-bindings: cpufreq: cpufreq-qcom-hw: Add QDU1000/QRU1000 cpufreq cpufreq: tegra186: Use flexible array to simplify memory allocation cpufreq: intel_pstate: Add Sapphire Rapids support in no-HWP mode cpufreq: amd_freq_sensitivity: Add missing pci_dev_put() cpufreq: Init completion before kobject_init_and_add() cpufreq: apple-soc: Add new driver to control Apple SoC CPU P-states cpufreq: qcom-hw: Add CPU clock provider support dt-bindings: cpufreq: cpufreq-qcom-hw: Add cpufreq clock provider cpufreq: qcom-hw: Fix the frequency returned by cpufreq_driver->get() cpufreq: ACPI: Remove unused variables 'acpi_cpufreq_online' and 'ret' cpufreq: qcom-hw: Fix memory leak in qcom_cpufreq_hw_read_lut() arm64: dts: ti: k3-am625-sk: Add 1.4GHz OPP cpufreq: ti: Enable ti-cpufreq for ARCH_K3 arm64: dts: ti: k3-am625: Introduce operating-points table cpufreq: dt-platdev: Blacklist ti,am625 SoC cpufreq: ti-cpufreq: Add support for AM625 dt-bindings: cpufreq: qcom: Add missing cache related properties ...
-rw-r--r--Documentation/cpu-freq/index.rst9
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml31
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-sk.dts9
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625.dtsi51
-rw-r--r--drivers/cpufreq/Kconfig.arm13
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c43
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c352
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c16
-rw-r--r--drivers/cpufreq/intel_pstate.c10
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c14
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c206
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c11
-rw-r--r--drivers/cpufreq/ti-cpufreq.c36
-rw-r--r--include/linux/cpufreq.h28
20 files changed, 679 insertions, 164 deletions
diff --git a/Documentation/cpu-freq/index.rst b/Documentation/cpu-freq/index.rst
index aba7831ab1cb..2fe32dad562a 100644
--- a/Documentation/cpu-freq/index.rst
+++ b/Documentation/cpu-freq/index.rst
@@ -20,18 +20,15 @@ Author: Dominik Brodowski <linux@brodo.de>
Mailing List
------------
-There is a CPU frequency changing CVS commit and general list where
-you can report bugs, problems or submit patches. To post a message,
-send an email to linux-pm@vger.kernel.org.
+There is a CPU frequency general list where you can report bugs,
+problems or submit patches. To post a message, send an email to
+linux-pm@vger.kernel.org.
Links
-----
the FTP archives:
* ftp://ftp.linux.org.uk/pub/linux/cpufreq/
-how to access the CVS repository:
-* http://cvs.arm.linux.org.uk/
-
the CPUFreq Mailing list:
* http://vger.kernel.org/vger-lists.html#linux-pm
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index 24fa3d87a40b..903b31129f01 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -25,6 +25,7 @@ properties:
- description: v2 of CPUFREQ HW (EPSS)
items:
- enum:
+ - qcom,qdu1000-cpufreq-epss
- qcom,sm6375-cpufreq-epss
- qcom,sm8250-cpufreq-epss
- const: qcom,cpufreq-epss
@@ -56,6 +57,9 @@ properties:
'#freq-domain-cells':
const: 1
+ '#clock-cells':
+ const: 1
+
required:
- compatible
- reg
@@ -83,11 +87,16 @@ examples:
enable-method = "psci";
next-level-cache = <&L2_0>;
qcom,freq-domain = <&cpufreq_hw 0>;
+ clocks = <&cpufreq_hw 0>;
L2_0: l2-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
next-level-cache = <&L3_0>;
L3_0: l3-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <3>;
};
};
};
@@ -99,8 +108,11 @@ examples:
enable-method = "psci";
next-level-cache = <&L2_100>;
qcom,freq-domain = <&cpufreq_hw 0>;
+ clocks = <&cpufreq_hw 0>;
L2_100: l2-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
next-level-cache = <&L3_0>;
};
};
@@ -112,8 +124,11 @@ examples:
enable-method = "psci";
next-level-cache = <&L2_200>;
qcom,freq-domain = <&cpufreq_hw 0>;
+ clocks = <&cpufreq_hw 0>;
L2_200: l2-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
next-level-cache = <&L3_0>;
};
};
@@ -125,8 +140,11 @@ examples:
enable-method = "psci";
next-level-cache = <&L2_300>;
qcom,freq-domain = <&cpufreq_hw 0>;
+ clocks = <&cpufreq_hw 0>;
L2_300: l2-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
next-level-cache = <&L3_0>;
};
};
@@ -138,8 +156,11 @@ examples:
enable-method = "psci";
next-level-cache = <&L2_400>;
qcom,freq-domain = <&cpufreq_hw 1>;
+ clocks = <&cpufreq_hw 1>;
L2_400: l2-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
next-level-cache = <&L3_0>;
};
};
@@ -151,8 +172,11 @@ examples:
enable-method = "psci";
next-level-cache = <&L2_500>;
qcom,freq-domain = <&cpufreq_hw 1>;
+ clocks = <&cpufreq_hw 1>;
L2_500: l2-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
next-level-cache = <&L3_0>;
};
};
@@ -164,8 +188,11 @@ examples:
enable-method = "psci";
next-level-cache = <&L2_600>;
qcom,freq-domain = <&cpufreq_hw 1>;
+ clocks = <&cpufreq_hw 1>;
L2_600: l2-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
next-level-cache = <&L3_0>;
};
};
@@ -177,8 +204,11 @@ examples:
enable-method = "psci";
next-level-cache = <&L2_700>;
qcom,freq-domain = <&cpufreq_hw 1>;
+ clocks = <&cpufreq_hw 1>;
L2_700: l2-cache {
compatible = "cache";
+ cache-unified;
+ cache-level = <2>;
next-level-cache = <&L3_0>;
};
};
@@ -197,6 +227,7 @@ examples:
clock-names = "xo", "alternate";
#freq-domain-cells = <1>;
+ #clock-cells = <1>;
};
};
...
diff --git a/arch/arm64/boot/dts/ti/k3-am625-sk.dts b/arch/arm64/boot/dts/ti/k3-am625-sk.dts
index 93a5f0817efc..4620ef5e19bb 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-sk.dts
@@ -31,6 +31,15 @@
bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,0x02800000";
};
+ opp-table {
+ /* Add 1.4GHz OPP for am625-sk board. Requires VDD_CORE to be at 0.85V */
+ opp-1400000000 {
+ opp-hz = /bits/ 64 <1400000000>;
+ opp-supported-hw = <0x01 0x0004>;
+ clock-latency-ns = <6000000>;
+ };
+ };
+
memory@80000000 {
device_type = "memory";
/* 2G RAM */
diff --git a/arch/arm64/boot/dts/ti/k3-am625.dtsi b/arch/arm64/boot/dts/ti/k3-am625.dtsi
index 887f31c23fef..cea2cc7de5dd 100644
--- a/arch/arm64/boot/dts/ti/k3-am625.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am625.dtsi
@@ -48,6 +48,8 @@
d-cache-line-size = <64>;
d-cache-sets = <128>;
next-level-cache = <&L2_0>;
+ operating-points-v2 = <&a53_opp_table>;
+ clocks = <&k3_clks 135 0>;
};
cpu1: cpu@1 {
@@ -62,6 +64,8 @@
d-cache-line-size = <64>;
d-cache-sets = <128>;
next-level-cache = <&L2_0>;
+ operating-points-v2 = <&a53_opp_table>;
+ clocks = <&k3_clks 136 0>;
};
cpu2: cpu@2 {
@@ -76,6 +80,8 @@
d-cache-line-size = <64>;
d-cache-sets = <128>;
next-level-cache = <&L2_0>;
+ operating-points-v2 = <&a53_opp_table>;
+ clocks = <&k3_clks 137 0>;
};
cpu3: cpu@3 {
@@ -90,6 +96,51 @@
d-cache-line-size = <64>;
d-cache-sets = <128>;
next-level-cache = <&L2_0>;
+ operating-points-v2 = <&a53_opp_table>;
+ clocks = <&k3_clks 138 0>;
+ };
+ };
+
+ a53_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ opp-shared;
+ syscon = <&wkup_conf>;
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-supported-hw = <0x01 0x0007>;
+ clock-latency-ns = <6000000>;
+ };
+
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-supported-hw = <0x01 0x0007>;
+ clock-latency-ns = <6000000>;
+ };
+
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-supported-hw = <0x01 0x0007>;
+ clock-latency-ns = <6000000>;
+ };
+
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-supported-hw = <0x01 0x0007>;
+ clock-latency-ns = <6000000>;
+ };
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-supported-hw = <0x01 0x0006>;
+ clock-latency-ns = <6000000>;
+ };
+
+ opp-1250000000 {
+ opp-hz = /bits/ 64 <1250000000>;
+ opp-supported-hw = <0x01 0x0004>;
+ clock-latency-ns = <6000000>;
+ opp-suspend;
};
};
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 82e5de1f6f8c..0a0352d8fa45 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -41,6 +41,15 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
To compile this driver as a module, choose M here: the
module will be called sun50i-cpufreq-nvmem.
+config ARM_APPLE_SOC_CPUFREQ
+ tristate "Apple Silicon SoC CPUFreq support"
+ depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
+ select PM_OPP
+ default ARCH_APPLE
+ help
+ This adds the CPUFreq driver for Apple Silicon machines
+ (e.g. Apple M1).
+
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
depends on ARCH_MVEBU && CPUFREQ_DT
@@ -340,8 +349,8 @@ config ARM_TEGRA194_CPUFREQ
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
- depends on ARCH_OMAP2PLUS
- default ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || ARCH_K3
+ default y
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 49b98c62c5af..32a7029e25ed 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 1bb2b90ebb21..78adfb2ffff6 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/compiler.h>
#include <linux/dmi.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -135,8 +136,8 @@ static int set_boost(struct cpufreq_policy *policy, int val)
{
on_each_cpu_mask(policy->cpus, boost_set_msr_each,
(void *)(long)val, 1);
- pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
- cpumask_pr_args(policy->cpus), val ? "en" : "dis");
+ pr_debug("CPU %*pbl: Core Boosting %s.\n",
+ cpumask_pr_args(policy->cpus), str_enabled_disabled(val));
return 0;
}
@@ -535,15 +536,6 @@ static void free_acpi_perf_data(void)
free_percpu(acpi_perf_data);
}
-static int cpufreq_boost_online(unsigned int cpu)
-{
- /*
- * On the CPU_UP path we simply keep the boost-disable flag
- * in sync with the current global state.
- */
- return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
-}
-
static int cpufreq_boost_down_prep(unsigned int cpu)
{
/*
@@ -897,6 +889,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
+ if (acpi_cpufreq_driver.set_boost)
+ set_boost(policy, acpi_cpufreq_driver.boost_enabled);
+
return result;
err_unreg:
@@ -916,6 +911,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
pr_debug("%s\n", __func__);
+ cpufreq_boost_down_prep(policy->cpu);
policy->fast_switch_possible = false;
policy->driver_data = NULL;
acpi_processor_unregister_performance(data->acpi_perf_cpu);
@@ -958,12 +954,8 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.attr = acpi_cpufreq_attr,
};
-static enum cpuhp_state acpi_cpufreq_online;
-
static void __init acpi_cpufreq_boost_init(void)
{
- int ret;
-
if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
pr_debug("Boost capabilities not present in the processor\n");
return;
@@ -971,24 +963,6 @@ static void __init acpi_cpufreq_boost_init(void)
acpi_cpufreq_driver.set_boost = set_boost;
acpi_cpufreq_driver.boost_enabled = boost_state(0);
-
- /*
- * This calls the online callback on all online cpu and forces all
- * MSRs to the same value.
- */
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
- cpufreq_boost_online, cpufreq_boost_down_prep);
- if (ret < 0) {
- pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
- return;
- }
- acpi_cpufreq_online = ret;
-}
-
-static void acpi_cpufreq_boost_exit(void)
-{
- if (acpi_cpufreq_online > 0)
- cpuhp_remove_state_nocalls(acpi_cpufreq_online);
}
static int __init acpi_cpufreq_init(void)
@@ -1032,7 +1006,6 @@ static int __init acpi_cpufreq_init(void)
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
if (ret) {
free_acpi_perf_data();
- acpi_cpufreq_boost_exit();
}
return ret;
}
@@ -1041,8 +1014,6 @@ static void __exit acpi_cpufreq_exit(void)
{
pr_debug("%s\n", __func__);
- acpi_cpufreq_boost_exit();
-
cpufreq_unregister_driver(&acpi_cpufreq_driver);
free_acpi_perf_data();
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 6448e03bcf48..59b19b9975e8 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -125,6 +125,8 @@ static int __init amd_freq_sensitivity_init(void)
if (!pcidev) {
if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV;
+ } else {
+ pci_dev_put(pcidev);
}
if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
new file mode 100644
index 000000000000..d1801281cdd9
--- /dev/null
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple SoC CPU cluster performance state driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on scpi-cpufreq.c
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#define APPLE_DVFS_CMD 0x20
+#define APPLE_DVFS_CMD_BUSY BIT(31)
+#define APPLE_DVFS_CMD_SET BIT(25)
+#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12)
+#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+
+/* Same timebase as CPU counter (24MHz) */
+#define APPLE_DVFS_LAST_CHG_TIME 0x38
+
+/*
+ * Apple ran out of bits and had to shift this in T8112...
+ */
+#define APPLE_DVFS_STATUS 0x50
+#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4
+#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0)
+#define APPLE_DVFS_STATUS_CUR_PS_T8112 GENMASK(9, 5)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112 5
+#define APPLE_DVFS_STATUS_TGT_PS_T8112 GENMASK(4, 0)
+
+/*
+ * Div is +1, base clock is 12MHz on existing SoCs.
+ * For documentation purposes. We use the OPP table to
+ * get the frequency.
+ */
+#define APPLE_DVFS_PLL_STATUS 0xc0
+#define APPLE_DVFS_PLL_FACTOR 0xc8
+#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16)
+#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0)
+
+#define APPLE_DVFS_TRANSITION_TIMEOUT 100
+
+struct apple_soc_cpufreq_info {
+ u64 max_pstate;
+ u64 cur_pstate_mask;
+ u64 cur_pstate_shift;
+};
+
+struct apple_cpu_priv {
+ struct device *cpu_dev;
+ void __iomem *reg_base;
+ const struct apple_soc_cpufreq_info *info;
+};
+
+static struct cpufreq_driver apple_soc_cpufreq_driver;
+
+static const struct apple_soc_cpufreq_info soc_t8103_info = {
+ .max_pstate = 15,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103,
+};
+
+static const struct apple_soc_cpufreq_info soc_t8112_info = {
+ .max_pstate = 31,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112,
+};
+
+static const struct apple_soc_cpufreq_info soc_default_info = {
+ .max_pstate = 15,
+ .cur_pstate_mask = 0, /* fallback */
+};
+
+static const struct of_device_id apple_soc_cpufreq_of_match[] = {
+ {
+ .compatible = "apple,t8103-cluster-cpufreq",
+ .data = &soc_t8103_info,
+ },
+ {
+ .compatible = "apple,t8112-cluster-cpufreq",
+ .data = &soc_t8112_info,
+ },
+ {
+ .compatible = "apple,cluster-cpufreq",
+ .data = &soc_default_info,
+ },
+ {}
+};
+
+static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct apple_cpu_priv *priv = policy->driver_data;
+ struct cpufreq_frequency_table *p;
+ unsigned int pstate;
+
+ if (priv->info->cur_pstate_mask) {
+ u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+
+ pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift;
+ } else {
+ /*
+ * For the fallback case we might not know the layout of DVFS_STATUS,
+ * so just use the command register value (which ignores boost limitations).
+ */
+ u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_CMD);
+
+ pstate = FIELD_GET(APPLE_DVFS_CMD_PS1, reg);
+ }
+
+ cpufreq_for_each_valid_entry(p, policy->freq_table)
+ if (p->driver_data == pstate)
+ return p->frequency;
+
+ dev_err(priv->cpu_dev, "could not find frequency for pstate %d\n",
+ pstate);
+ return 0;
+}
+
+static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct apple_cpu_priv *priv = policy->driver_data;
+ unsigned int pstate = policy->freq_table[index].driver_data;
+ u64 reg;
+
+ /* Fallback for newer SoCs */
+ if (index > priv->info->max_pstate)
+ index = priv->info->max_pstate;
+
+ if (readq_poll_timeout_atomic(priv->reg_base + APPLE_DVFS_CMD, reg,
+ !(reg & APPLE_DVFS_CMD_BUSY), 2,
+ APPLE_DVFS_TRANSITION_TIMEOUT)) {
+ return -EIO;
+ }
+
+ reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2);
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate);
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ reg |= APPLE_DVFS_CMD_SET;
+
+ writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD);
+
+ return 0;
+}
+
+static unsigned int apple_soc_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ if (apple_soc_cpufreq_set_target(policy, policy->cached_resolved_idx) < 0)
+ return 0;
+
+ return policy->freq_table[policy->cached_resolved_idx].frequency;
+}
+
+static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
+ void __iomem **reg_base,
+ const struct apple_soc_cpufreq_info **info)
+{
+ struct of_phandle_args args;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus, &args);
+ if (ret < 0)
+ return ret;
+
+ match = of_match_node(apple_soc_cpufreq_of_match, args.np);
+ of_node_put(args.np);
+ if (!match)
+ return -ENODEV;
+
+ *info = match->data;
+
+ *reg_base = of_iomap(args.np, 0);
+ if (IS_ERR(*reg_base))
+ return PTR_ERR(*reg_base);
+
+ return 0;
+}
+
+static struct freq_attr *apple_soc_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL, /* Filled in below if boost is enabled */
+ NULL,
+};
+
+static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret, i;
+ unsigned int transition_latency;
+ void __iomem *reg_base;
+ struct device *cpu_dev;
+ struct apple_cpu_priv *priv;
+ const struct apple_soc_cpufreq_info *info;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "%s: failed to add OPP table: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = apple_soc_cpufreq_find_cluster(policy, &reg_base, &info);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to get cluster info: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret);
+ goto out_iounmap;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ /* Get OPP levels (p-state indexes) and stash them in driver_data */
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned long rate = freq_table[i].frequency * 1000 + 999;
+ struct dev_pm_opp *opp = dev_pm_opp_find_freq_floor(cpu_dev, &rate);
+
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out_free_cpufreq_table;
+ }
+ freq_table[i].driver_data = dev_pm_opp_get_level(opp);
+ dev_pm_opp_put(opp);
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->reg_base = reg_base;
+ priv->info = info;
+ policy->driver_data = priv;
+ policy->freq_table = freq_table;
+
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ if (!transition_latency)
+ transition_latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->dvfs_possible_from_any_cpu = true;
+ policy->fast_switch_possible = true;
+
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret) {
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ } else {
+ apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ apple_soc_cpufreq_driver.boost_enabled = true;
+ }
+ }
+
+ return 0;
+
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+out_iounmap:
+ iounmap(reg_base);
+ return ret;
+}
+
+static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct apple_cpu_priv *priv = policy->driver_data;
+
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ iounmap(priv->reg_base);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct cpufreq_driver apple_soc_cpufreq_driver = {
+ .name = "apple-cpufreq",
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .get = apple_soc_cpufreq_get_rate,
+ .init = apple_soc_cpufreq_init,
+ .exit = apple_soc_cpufreq_exit,
+ .target_index = apple_soc_cpufreq_set_target,
+ .fast_switch = apple_soc_cpufreq_fast_switch,
+ .register_em = cpufreq_register_em_with_opp,
+ .attr = apple_soc_cpufreq_hw_attr,
+};
+
+static int __init apple_soc_cpufreq_module_init(void)
+{
+ if (!of_machine_is_compatible("apple,arm-platform"))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&apple_soc_cpufreq_driver);
+}
+module_init(apple_soc_cpufreq_module_init);
+
+static void __exit apple_soc_cpufreq_module_exit(void)
+{
+ cpufreq_unregister_driver(&apple_soc_cpufreq_driver);
+}
+module_exit(apple_soc_cpufreq_module_exit);
+
+MODULE_DEVICE_TABLE(of, apple_soc_cpufreq_of_match);
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("Apple SoC CPU cluster DVFS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 6ac3800db450..8ab672883043 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,8 @@ static const struct of_device_id allowlist[] __initconst = {
static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "apple,arm-platform", },
+
{ .compatible = "arm,vexpress", },
{ .compatible = "calxeda,highbank", },
@@ -160,6 +162,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
{ .compatible = "ti,omap3", },
+ { .compatible = "ti,am625", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,apq8064", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 69b3d61852ac..7e56a42750ea 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1207,6 +1207,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
goto err_free_rcpumask;
+ init_completion(&policy->kobj_unregister);
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
@@ -1245,7 +1246,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
- init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
policy->cpu = cpu;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1570d6f3e75d..55c7ffd37d1c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -128,25 +128,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
ssize_t len = 0;
int i, j, count;
- len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
- len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
+ len += sysfs_emit_at(buf, len, " From : To\n");
+ len += sysfs_emit_at(buf, len, " : ");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
- stats->freq_table[i]);
+ len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
- stats->freq_table[i]);
+ len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE)
@@ -157,11 +155,11 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
else
count = stats->trans_table[i * stats->max_state + j];
- len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count);
+ len += sysfs_emit_at(buf, len, "%9u ", count);
}
if (len >= PAGE_SIZE)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
}
if (len >= PAGE_SIZE) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6ff73c30769f..fd73d6d2b808 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -298,6 +298,7 @@ static int hwp_active __read_mostly;
static int hwp_mode_bdw __read_mostly;
static bool per_cpu_limits __read_mostly;
static bool hwp_boost __read_mostly;
+static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -1679,12 +1680,12 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
return;
/*
- * If powerup EPP is something other than chipset default 0x80 and
- * - is more performance oriented than 0x80 (default balance_perf EPP)
+ * If the EPP is set by firmware, which means that firmware enabled HWP
+ * - Is equal or less than 0x80 (default balance_perf EPP)
* - But less performance oriented than performance EPP
* then use this as new balance_perf EPP.
*/
- if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE &&
+ if (hwp_forced && cpudata->epp_default <= HWP_EPP_BALANCE_PERFORMANCE &&
cpudata->epp_default > HWP_EPP_PERFORMANCE) {
epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
return;
@@ -2378,6 +2379,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(COMETLAKE, core_funcs),
X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(TIGERLAKE, core_funcs),
+ X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -3384,7 +3386,7 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
- bool hwp_forced = intel_pstate_hwp_is_enabled();
+ hwp_forced = intel_pstate_hwp_is_enabled();
if (hwp_forced)
pr_info("HWP enabled by BIOS\n");
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 3e000e1a75c6..4c57c6725c13 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -407,10 +407,10 @@ static int guess_fsb(int mult)
{
int speed = cpu_khz / 1000;
int i;
- int speeds[] = { 666, 1000, 1333, 2000 };
+ static const int speeds[] = { 666, 1000, 1333, 2000 };
int f_max, f_min;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < ARRAY_SIZE(speeds); i++) {
f_max = ((speeds[i] * mult) + 50) / 100;
f_max += (ROUNDING / 2);
f_min = f_max - ROUNDING;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index f0e0a35c7f21..f80339779084 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -160,6 +160,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
struct mtk_cpufreq_data *data;
struct device *dev = &pdev->dev;
struct resource *res;
+ struct of_phandle_args args;
void __iomem *base;
int ret, i;
int index;
@@ -168,11 +169,14 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
if (!data)
return -ENOMEM;
- index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
- "#performance-domain-cells",
- policy->cpus);
- if (index < 0)
- return index;
+ ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus, &args);
+ if (ret < 0)
+ return ret;
+
+ index = args.args[0];
+ of_node_put(args.np);
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 833589bc95e4..340fed35e45d 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/interconnect.h>
@@ -43,7 +44,6 @@ struct qcom_cpufreq_soc_data {
struct qcom_cpufreq_data {
void __iomem *base;
struct resource *res;
- const struct qcom_cpufreq_soc_data *soc_data;
/*
* Mutex to synchronize between de-init sequence and re-starting LMh
@@ -55,12 +55,18 @@ struct qcom_cpufreq_data {
bool cancel_throttle;
struct delayed_work throttle_work;
struct cpufreq_policy *policy;
+ struct clk_hw cpu_clk;
bool per_core_dcvs;
struct freq_qos_request throttle_freq_req;
};
+static struct {
+ struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
+} qcom_cpufreq;
+
static unsigned long cpu_hw_rate, xo_rate;
static bool icc_scaling_enabled;
@@ -109,7 +115,7 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
struct qcom_cpufreq_data *data = policy->driver_data;
- const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
+ const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
unsigned long freq = policy->freq_table[index].frequency;
unsigned int i;
@@ -125,9 +131,37 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
return 0;
}
+static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+{
+ unsigned int lval;
+
+ if (qcom_cpufreq.soc_data->reg_current_vote)
+ lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_current_vote) & 0x3ff;
+ else
+ lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_domain_state) & 0xff;
+
+ return lval * xo_rate;
+}
+
+/* Get the current frequency of the CPU (after throttling) */
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+}
+
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+{
+ struct qcom_cpufreq_data *data;
const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -137,7 +171,7 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
return 0;
data = policy->driver_data;
- soc_data = data->soc_data;
+ soc_data = qcom_cpufreq.soc_data;
index = readl_relaxed(data->base + soc_data->reg_perf_state);
index = min(index, LUT_MAX_ENTRIES - 1);
@@ -149,7 +183,7 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct qcom_cpufreq_data *data = policy->driver_data;
- const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
+ const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
unsigned int index;
unsigned int i;
@@ -173,7 +207,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
unsigned long rate;
int ret;
struct qcom_cpufreq_data *drv_data = policy->driver_data;
- const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
+ const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
if (!table)
@@ -193,6 +227,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
}
} else if (ret != -ENODEV) {
dev_err(cpu_dev, "Invalid opp table in device tree\n");
+ kfree(table);
return ret;
} else {
policy->fast_switch_possible = true;
@@ -286,18 +321,6 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
-static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
-{
- unsigned int lval;
-
- if (data->soc_data->reg_current_vote)
- lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
- else
- lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
-
- return lval * xo_rate;
-}
-
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
struct cpufreq_policy *policy = data->policy;
@@ -341,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
* If h/w throttled frequency is higher than what cpufreq has requested
* for, then stop polling and switch back to interrupt mechanism.
*/
- if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
+ if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
enable_irq(data->throttle_irq);
else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
@@ -367,9 +390,9 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
disable_irq_nosync(c_data->throttle_irq);
schedule_delayed_work(&c_data->throttle_work, 0);
- if (c_data->soc_data->reg_intr_clr)
+ if (qcom_cpufreq.soc_data->reg_intr_clr)
writel_relaxed(GT_IRQ_STATUS,
- c_data->base + c_data->soc_data->reg_intr_clr);
+ c_data->base + qcom_cpufreq.soc_data->reg_intr_clr);
return IRQ_HANDLED;
}
@@ -503,8 +526,6 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
struct of_phandle_args args;
struct device_node *cpu_np;
struct device *cpu_dev;
- struct resource *res;
- void __iomem *base;
struct qcom_cpufreq_data *data;
int ret, index;
@@ -526,51 +547,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return ret;
index = args.args[0];
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, index);
- if (!res) {
- dev_err(dev, "failed to get mem resource %d\n", index);
- return -ENODEV;
- }
-
- if (!request_mem_region(res->start, resource_size(res), res->name)) {
- dev_err(dev, "failed to request resource %pR\n", res);
- return -EBUSY;
- }
-
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- dev_err(dev, "failed to map resource %pR\n", res);
- ret = -ENOMEM;
- goto release_region;
- }
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto unmap_base;
- }
-
- data->soc_data = of_device_get_match_data(&pdev->dev);
- data->base = base;
- data->res = res;
+ data = &qcom_cpufreq.data[index];
/* HW should be in enabled state to proceed */
- if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
+ if (!(readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_enable) & 0x1)) {
dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
- if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
+ if (readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_dcvs_ctrl) & 0x1)
data->per_core_dcvs = true;
qcom_get_related_cpus(index, policy->cpus);
- if (cpumask_empty(policy->cpus)) {
- dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
- ret = -ENOENT;
- goto error;
- }
policy->driver_data = data;
policy->dvfs_possible_from_any_cpu = true;
@@ -578,14 +566,13 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
dev_err(dev, "Domain-%d failed to read LUT\n", index);
- goto error;
+ return ret;
}
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
dev_err(cpu_dev, "Failed to add OPPs\n");
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
if (policy_has_boost_freq(policy)) {
@@ -594,18 +581,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
}
- ret = qcom_cpufreq_hw_lmh_init(policy, index);
- if (ret)
- goto error;
-
- return 0;
-error:
- kfree(data);
-unmap_base:
- iounmap(base);
-release_region:
- release_mem_region(res->start, resource_size(res));
- return ret;
+ return qcom_cpufreq_hw_lmh_init(policy, index);
}
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
@@ -658,20 +634,33 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.ready = qcom_cpufreq_ready,
};
+static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
+
+ return qcom_lmh_get_throttle_freq(data);
+}
+
+static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
+ .recalc_rate = qcom_cpufreq_hw_recalc_rate,
+};
+
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct clk_hw_onecell_data *clk_data;
+ struct device *dev = &pdev->dev;
struct device *cpu_dev;
struct clk *clk;
- int ret;
+ int ret, i, num_domains;
- clk = clk_get(&pdev->dev, "xo");
+ clk = clk_get(dev, "xo");
if (IS_ERR(clk))
return PTR_ERR(clk);
xo_rate = clk_get_rate(clk);
clk_put(clk);
- clk = clk_get(&pdev->dev, "alternate");
+ clk = clk_get(dev, "alternate");
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -689,11 +678,70 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */
+ num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * 4);
+ if (num_domains <= 0)
+ return num_domains;
+
+ qcom_cpufreq.data = devm_kzalloc(dev, sizeof(struct qcom_cpufreq_data) * num_domains,
+ GFP_KERNEL);
+ if (!qcom_cpufreq.data)
+ return -ENOMEM;
+
+ qcom_cpufreq.soc_data = of_device_get_match_data(dev);
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = num_domains;
+
+ for (i = 0; i < num_domains; i++) {
+ struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i];
+ struct clk_init_data clk_init = {};
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, i, &res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to map resource %pR\n", res);
+ return PTR_ERR(base);
+ }
+
+ data->base = base;
+ data->res = res;
+
+ /* Register CPU clock for each frequency domain */
+ clk_init.name = kasprintf(GFP_KERNEL, "qcom_cpufreq%d", i);
+ if (!clk_init.name)
+ return -ENOMEM;
+
+ clk_init.flags = CLK_GET_RATE_NOCACHE;
+ clk_init.ops = &qcom_cpufreq_hw_clk_ops;
+ data->cpu_clk.init = &clk_init;
+
+ ret = devm_clk_hw_register(dev, &data->cpu_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register clock %d: %d\n", i, ret);
+ kfree(clk_init.name);
+ return ret;
+ }
+
+ clk_data->hws[i] = &data->cpu_clk;
+ kfree(clk_init.name);
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add clock provider\n");
+ return ret;
+ }
+
ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
if (ret)
- dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+ dev_err(dev, "CPUFreq HW driver failed to register\n");
else
- dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
+ dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n");
return ret;
}
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 7d0d62a06bf3..c6fdf019dbde 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -39,7 +39,7 @@ static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
* In SPEAr1340, cpu clk's parent sys clk can take input from
* following sources
*/
- const char *sys_clk_src[] = {
+ static const char * const sys_clk_src[] = {
"sys_syn_clk",
"pll1_clk",
"pll2_clk",
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 6c88827f4e62..f98f53bf1011 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -65,8 +65,8 @@ struct tegra186_cpufreq_cluster {
struct tegra186_cpufreq_data {
void __iomem *regs;
- struct tegra186_cpufreq_cluster *clusters;
const struct tegra186_cpufreq_cpu *cpus;
+ struct tegra186_cpufreq_cluster clusters[];
};
static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
@@ -221,15 +221,12 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
struct tegra_bpmp *bpmp;
unsigned int i = 0, err;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev,
+ struct_size(data, clusters, TEGRA186_NUM_CLUSTERS),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->clusters = devm_kcalloc(&pdev->dev, TEGRA186_NUM_CLUSTERS,
- sizeof(*data->clusters), GFP_KERNEL);
- if (!data->clusters)
- return -ENOMEM;
-
data->cpus = tegra186_cpus;
bpmp = tegra_bpmp_get(&pdev->dev);
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index f64180dd2005..be4209d97cb3 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -39,6 +39,14 @@
#define OMAP34xx_ProdID_SKUID 0x4830A20C
#define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270)
+#define AM625_EFUSE_K_MPU_OPP 11
+#define AM625_EFUSE_S_MPU_OPP 19
+#define AM625_EFUSE_T_MPU_OPP 20
+
+#define AM625_SUPPORT_K_MPU_OPP BIT(0)
+#define AM625_SUPPORT_S_MPU_OPP BIT(1)
+#define AM625_SUPPORT_T_MPU_OPP BIT(2)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
@@ -104,6 +112,25 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
return BIT(efuse);
}
+static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM625_SUPPORT_K_MPU_OPP;
+
+ switch (efuse) {
+ case AM625_EFUSE_T_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_T_MPU_OPP;
+ fallthrough;
+ case AM625_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_S_MPU_OPP;
+ fallthrough;
+ case AM625_EFUSE_K_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_K_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_xlate = amx3_efuse_xlate,
.efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
@@ -198,6 +225,14 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
.multi_regulator = false,
};
+static struct ti_cpufreq_soc_data am625_soc_data = {
+ .efuse_xlate = am625_efuse_xlate,
+ .efuse_offset = 0x0018,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
+ .rev_offset = 0x0014,
+ .multi_regulator = false,
+};
/**
* ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
@@ -301,6 +336,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,dra7", .data = &dra7_soc_data },
{ .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
+ { .compatible = "ti,am625", .data = &am625_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d5595d57f4e5..6a94a6eaad27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1110,10 +1110,10 @@ cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
}
static inline int parse_perf_domain(int cpu, const char *list_name,
- const char *cell_name)
+ const char *cell_name,
+ struct of_phandle_args *args)
{
struct device_node *cpu_np;
- struct of_phandle_args args;
int ret;
cpu_np = of_cpu_device_node_get(cpu);
@@ -1121,41 +1121,44 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
return -ENODEV;
ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
- &args);
+ args);
if (ret < 0)
return ret;
of_node_put(cpu_np);
- return args.args[0];
+ return 0;
}
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
- const char *cell_name, struct cpumask *cpumask)
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
{
- int target_idx;
int cpu, ret;
+ struct of_phandle_args args;
- ret = parse_perf_domain(pcpu, list_name, cell_name);
+ ret = parse_perf_domain(pcpu, list_name, cell_name, pargs);
if (ret < 0)
return ret;
- target_idx = ret;
cpumask_set_cpu(pcpu, cpumask);
for_each_possible_cpu(cpu) {
if (cpu == pcpu)
continue;
- ret = parse_perf_domain(cpu, list_name, cell_name);
+ ret = parse_perf_domain(cpu, list_name, cell_name, &args);
if (ret < 0)
continue;
- if (target_idx == ret)
+ if (pargs->np == args.np && pargs->args_count == args.args_count &&
+ !memcmp(pargs->args, args.args, sizeof(args.args[0]) * args.args_count))
cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(args.np);
}
- return target_idx;
+ return 0;
}
#else
static inline int cpufreq_boost_trigger_state(int state)
@@ -1185,7 +1188,8 @@ cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
}
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
- const char *cell_name, struct cpumask *cpumask)
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
{
return -EOPNOTSUPP;
}