summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-07-16 15:54:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-07-16 15:54:03 -0700
commit41906248d0d78e3a620a86cf715f6f630912a4eb (patch)
tree837ea1344255b01f03af50dd8acf99baed46bc1f
parent15114e8fb58ffd574da83951e89cb5ab0055cc1e (diff)
parenta02bed4183c48d42a2855a4e70867b6239c45770 (diff)
Merge tag 'pm-6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "These add a new cpufreq driver for Loongson-3, add support for new features in the intel_pstate (Lunar Lake and Arrow Lake platforms, OOB mode for Emerald Rapids, highest performance change interrupt), amd-pstate (fast CPPC) and sun50i (Allwinner H700 speed bin) cpufreq drivers, simplify the cpufreq driver interface, simplify the teo cpuidle governor, adjust the pm-graph utility for a new version of Python, address issues and clean up code. Specifics: - Add Loongson-3 CPUFreq driver support (Huacai Chen) - Add support for the Arrow Lake and Lunar Lake platforms and the out-of-band (OOB) mode on Emerald Rapids to the intel_pstate cpufreq driver, make it support the highest performance change interrupt and clean it up (Srinivas Pandruvada) - Switch cpufreq to new Intel CPU model defines (Tony Luck) - Simplify the cpufreq driver interface by switching the .exit() driver callback to the void return data type (Lizhe, Viresh Kumar) - Make cpufreq_boost_enabled() return bool (Dhruva Gole) - Add fast CPPC support to the amd-pstate cpufreq driver, address multiple assorted issues in it and clean it up (Perry Yuan, Mario Limonciello, Dhananjay Ugwekar, Meng Li, Xiaojian Du) - Add Allwinner H700 speed bin to the sun50i cpufreq driver (Ryan Walklin) - Fix memory leaks and of_node_put() usage in the sun50i and qcom-nvmem cpufreq drivers (Javier Carrasco) - Clean up the sti and dt-platdev cpufreq drivers (Jeff Johnson, Raphael Gallais-Pou) - Fix deferred probe handling in the TI cpufreq driver and wrong return values of ti_opp_supply_probe(), and add OPP tables for the AM62Ax and AM62Px SoCs to it (Bryan Brattlof, Primoz Fiser) - Avoid overflow of target_freq in .fast_switch() in the SCMI cpufreq driver (Jagadeesh Kona) - Use dev_err_probe() in every error path in probe in the Mediatek cpufreq driver (NĂ­colas Prado) - Fix kernel-doc param for longhaul_setstate in the longhaul cpufreq driver (Yang Li) - Fix system resume handling in the CPPC cpufreq driver (Riwen Lu) - Improve the teo cpuidle governor and clean up leftover comments from the menu cpuidle governor (Christian Loehle) - Clean up a comment typo in the teo cpuidle governor (Atul Kumar Pant) - Add missing MODULE_DESCRIPTION() macro to cpuidle haltpoll (Jeff Johnson) - Switch the intel_idle driver to new Intel CPU model defines (Tony Luck) - Switch the Intel RAPL driver new Intel CPU model defines (Tony Luck) - Simplify if condition in the idle_inject driver (Thorsten Blum) - Fix missing cleanup on error in _opp_attach_genpd() (Viresh Kumar) - Introduce an OF helper function to inform if required-opps is used and drop a redundant in-parameter to _set_opp_level() (Ulf Hansson) - Update pm-graph to v5.12 which includes fixes and major code revamp for python3.12 (Todd Brandt) - Address several assorted issues in the cpupower utility (Roman Storozhenko)" * tag 'pm-6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (77 commits) cpufreq: sti: fix build warning cpufreq: mediatek: Use dev_err_probe in every error path in probe cpufreq: Add Loongson-3 CPUFreq driver support cpufreq: Make cpufreq_driver->exit() return void cpufreq/amd-pstate: Fix the scaling_max_freq setting on shared memory CPPC systems cpufreq/amd-pstate-ut: Convert nominal_freq to khz during comparisons cpufreq: pcc: Remove empty exit() callback cpufreq: loongson2: Remove empty exit() callback cpufreq: nforce2: Remove empty exit() callback cpupower: fix lib default installation path cpufreq: docs: Add missing scaling_available_frequencies description cpuidle: teo: Don't count non-existent intercepts cpupower: Disable direct build of the 'bench' subproject cpuidle: teo: Remove recent intercepts metric Revert: "cpuidle: teo: Introduce util-awareness" cpufreq: make cpufreq_boost_enabled() return bool cpufreq: intel_pstate: Support highest performance change interrupt x86/cpufeatures: Add HWP highest perf change feature flag Documentation: cpufreq: amd-pstate: update doc for Per CPU boost control method cpufreq: amd-pstate: Cap the CPPC.max_perf to nominal_perf if CPB is off ...
-rw-r--r--Documentation/admin-guide/pm/amd-pstate.rst18
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst4
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c6
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c12
-rw-r--r--drivers/cpufreq/amd-pstate.c357
-rw-r--r--drivers/cpufreq/amd-pstate.h2
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c4
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c12
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c6
-rw-r--r--drivers/cpufreq/cpufreq.c50
-rw-r--r--drivers/cpufreq/e_powersaver.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c131
-rw-r--r--drivers/cpufreq/longhaul.c5
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c6
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c395
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c76
-rw-r--r--drivers/cpufreq/omap-cpufreq.c3
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c6
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c6
-rw-r--r--drivers/cpufreq/powernow-k6.c5
-rw-r--r--drivers/cpufreq/powernow-k7.c3
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c4
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c12
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c4
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c8
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/sh-cpufreq.c4
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c3
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c3
-rw-r--r--drivers/cpufreq/speedstep-centrino.c18
-rw-r--r--drivers/cpufreq/sti-cpufreq.c3
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c30
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c4
-rw-r--r--drivers/cpufreq/ti-cpufreq.c96
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c5
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c1
-rw-r--r--drivers/cpuidle/governors/menu.c17
-rw-r--r--drivers/cpuidle/governors/teo.c194
-rw-r--r--drivers/idle/intel_idle.c116
-rw-r--r--drivers/opp/core.c15
-rw-r--r--drivers/opp/of.c32
-rw-r--r--drivers/opp/ti-opp-supply.c6
-rw-r--r--drivers/powercap/idle_inject.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c120
-rw-r--r--drivers/powercap/intel_rapl_msr.c16
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--tools/power/cpupower/Makefile47
-rw-r--r--tools/power/cpupower/README160
-rw-r--r--tools/power/cpupower/bench/Makefile5
-rw-r--r--tools/power/cpupower/man/cpupower-monitor.113
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c2
-rwxr-xr-xtools/power/pm-graph/bootgraph.py16
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py1098
68 files changed, 1958 insertions, 1269 deletions
diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
index 1e0d101b020a..d0324d44f548 100644
--- a/Documentation/admin-guide/pm/amd-pstate.rst
+++ b/Documentation/admin-guide/pm/amd-pstate.rst
@@ -281,6 +281,22 @@ integer values defined between 0 to 255 when EPP feature is enabled by platform
firmware, if EPP feature is disabled, driver will ignore the written value
This attribute is read-write.
+``boost``
+The `boost` sysfs attribute provides control over the CPU core
+performance boost, allowing users to manage the maximum frequency limitation
+of the CPU. This attribute can be used to enable or disable the boost feature
+on individual CPUs.
+
+When the boost feature is enabled, the CPU can dynamically increase its frequency
+beyond the base frequency, providing enhanced performance for demanding workloads.
+On the other hand, disabling the boost feature restricts the CPU to operate at the
+base frequency, which may be desirable in certain scenarios to prioritize power
+efficiency or manage temperature.
+
+To manipulate the `boost` attribute, users can write a value of `0` to disable the
+boost or `1` to enable it, for the respective CPU using the sysfs path
+`/sys/devices/system/cpu/cpuX/cpufreq/boost`, where `X` represents the CPU number.
+
Other performance and frequency values can be read back from
``/sys/devices/system/cpu/cpuX/acpi_cppc/``, see :ref:`cppc_sysfs`.
@@ -406,7 +422,7 @@ control its functionality at the system level. They are located in the
``/sys/devices/system/cpu/amd_pstate/`` directory and affect all CPUs.
``status``
- Operation mode of the driver: "active", "passive" or "disable".
+ Operation mode of the driver: "active", "passive", "guided" or "disable".
"active"
The driver is functional and in the ``active mode``
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 6adb7988e0eb..fe1be4ad88cb 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -267,6 +267,10 @@ are the following:
``related_cpus``
List of all (online and offline) CPUs belonging to this policy.
+``scaling_available_frequencies``
+ List of available frequencies of the CPUs belonging to this policy
+ (in kHz).
+
``scaling_available_governors``
List of ``CPUFreq`` scaling governors present in the kernel that can
be attached to this policy or (if the |intel_pstate| scaling driver is
diff --git a/MAINTAINERS b/MAINTAINERS
index bb39d2ae6045..73d6d4979bc9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13040,6 +13040,7 @@ F: Documentation/arch/loongarch/
F: Documentation/translations/zh_CN/arch/loongarch/
F: arch/loongarch/
F: drivers/*/*loongarch*
+F: drivers/cpufreq/loongson3_cpufreq.c
LOONGSON GPIO DRIVER
M: Yinbo Zhu <zhuyinbo@loongson.cn>
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index b51e88d01b21..dd4682857c12 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -361,6 +361,7 @@
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* "hwp_act_window" HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14*32+10) /* "hwp_epp" HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* "hwp_pkg_req" HWP Package Level Request */
+#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* HWP Highest perf change */
#define X86_FEATURE_HFI (14*32+19) /* "hfi" Hardware Feedback Interface */
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
@@ -471,6 +472,7 @@
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
+#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
/*
* BUG word(s)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 01342963011e..e5069ddac222 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -783,6 +783,8 @@
#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
+#define MSR_K7_HWCR_CPB_DIS_BIT 25
+#define MSR_K7_HWCR_CPB_DIS BIT_ULL(MSR_K7_HWCR_CPB_DIS_BIT)
/* K6 MSRs */
#define MSR_K6_WHCR 0xc0000082
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index af5aa2c754c2..c84c30188fdf 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -45,6 +45,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { X86_FEATURE_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 94e55c40970a..10cda6f2fe1d 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -262,6 +262,18 @@ config LOONGSON2_CPUFREQ
If in doubt, say N.
endif
+if LOONGARCH
+config LOONGSON3_CPUFREQ
+ tristate "Loongson3 CPUFreq Driver"
+ help
+ This option adds a CPUFreq driver for Loongson processors which
+ support software configurable cpu frequency.
+
+ Loongson-3 family processors support this feature.
+
+ If in doubt, say N.
+endif
+
if SPARC64
config SPARC_US3_CPUFREQ
tristate "UltraSPARC-III CPU Frequency driver"
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 438c9e75a04d..97c2d4f15d76 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -71,6 +71,7 @@ config X86_AMD_PSTATE_DEFAULT_MODE
config X86_AMD_PSTATE_UT
tristate "selftest for AMD Processor P-State driver"
depends on X86 && ACPI_PROCESSOR
+ depends on X86_AMD_PSTATE
default n
help
This kernel module is used for testing. It's safe to say M here.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 8d141c71b016..0f184031dd12 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
# Other platform drivers
obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
+obj-$(CONFIG_LOONGSON3_CPUFREQ) += loongson3_cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4ac3a35dcd98..a8ca625a98b8 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -50,8 +50,6 @@ enum {
#define AMD_MSR_RANGE (0x7)
#define HYGON_MSR_RANGE (0x7)
-#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
-
struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
@@ -908,7 +906,7 @@ err_free:
return result;
}
-static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
@@ -921,8 +919,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
free_cpumask_var(data->freqdomain_cpus);
kfree(policy->freq_table);
kfree(data);
-
- return 0;
}
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index fc275d41d51e..66b73c308ce6 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -202,6 +202,7 @@ static void amd_pstate_ut_check_freq(u32 index)
int cpu = 0;
struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata = NULL;
+ u32 nominal_freq_khz;
for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
@@ -209,13 +210,14 @@ static void amd_pstate_ut_check_freq(u32 index)
break;
cpudata = policy->driver_data;
- if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
- (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
+ nominal_freq_khz = cpudata->nominal_freq*1000;
+ if (!((cpudata->max_freq >= nominal_freq_khz) &&
+ (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
(cpudata->min_freq > 0))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
+ __func__, cpu, cpudata->max_freq, nominal_freq_khz,
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
goto skip_test;
}
@@ -229,13 +231,13 @@ static void amd_pstate_ut_check_freq(u32 index)
if (cpudata->boost_supported) {
if ((policy->max == cpudata->max_freq) ||
- (policy->max == cpudata->nominal_freq))
+ (policy->max == nominal_freq_khz))
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
else {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
__func__, cpu, policy->max, cpudata->max_freq,
- cpudata->nominal_freq);
+ nominal_freq_khz);
goto skip_test;
}
} else {
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9ad62dbe8bfb..68c616b572f2 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -51,6 +51,7 @@
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
+#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
#define CPPC_HIGHEST_PERF_PERFORMANCE 196
#define CPPC_HIGHEST_PERF_DEFAULT 166
@@ -85,15 +86,6 @@ struct quirk_entry {
u32 lowest_freq;
};
-/*
- * TODO: We need more time to fine tune processors with shared memory solution
- * with community together.
- *
- * There are some performance drops on the CPU benchmarks which reports from
- * Suse. We are co-working with them to fine tune the shared memory solution. So
- * we disable it by default to go acpi-cpufreq on these processors and add a
- * module parameter to be able to enable it manually for debugging.
- */
static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
@@ -157,7 +149,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
* broken BIOS lack of nominal_freq and lowest_freq capabilities
* definition in ACPI tables
*/
- if (boot_cpu_has(X86_FEATURE_ZEN2)) {
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
quirks = dmi->driver_data;
pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
return 1;
@@ -199,7 +191,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
u64 epp;
int ret;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
if (!cppc_req_cached) {
epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
&cppc_req_cached);
@@ -247,12 +239,32 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
return index;
}
+static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, bool fast_switch)
+{
+ if (fast_switch)
+ wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
+ else
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ READ_ONCE(cpudata->cppc_req_cached));
+}
+
+DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+
+static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+ u32 min_perf, u32 des_perf,
+ u32 max_perf, bool fast_switch)
+{
+ static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, fast_switch);
+}
+
static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
int ret;
struct cppc_perf_ctrls perf_ctrls;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
u64 value = READ_ONCE(cpudata->cppc_req_cached);
value &= ~GENMASK_ULL(31, 24);
@@ -263,6 +275,9 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
if (!ret)
cpudata->epp_cached = epp;
} else {
+ amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, false);
+
perf_ctrls.energy_perf = epp;
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
if (ret) {
@@ -281,10 +296,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
int epp = -EINVAL;
int ret;
- if (!pref_index) {
- pr_debug("EPP pref_index is invalid\n");
- return -EINVAL;
- }
+ if (!pref_index)
+ epp = cpudata->epp_default;
if (epp == -EINVAL)
epp = epp_values[pref_index];
@@ -452,16 +465,6 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
-{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
-}
-
static void cppc_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
@@ -475,16 +478,6 @@ static void cppc_update_perf(struct amd_cpudata *cpudata,
cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
-
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
-{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
-}
-
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
{
u64 aperf, mperf, tsc;
@@ -521,7 +514,10 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
{
+ unsigned long max_freq;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
+ u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
u64 value = prev;
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
@@ -530,6 +526,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ max_freq = READ_ONCE(cpudata->max_limit_freq);
+ policy->cur = div_u64(des_perf * max_freq, max_perf);
+
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
des_perf = 0;
@@ -541,6 +540,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(des_perf);
+ /* limit the max perf when core performance boost feature is disabled */
+ if (!cpudata->boost_supported)
+ max_perf = min_t(unsigned long, nominal_perf, max_perf);
+
value &= ~AMD_CPPC_MAX_PERF(~0L);
value |= AMD_CPPC_MAX_PERF(max_perf);
@@ -651,10 +654,9 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long capacity)
{
unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf, max_freq;
+ cap_perf, lowest_nonlinear_perf;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
- unsigned int target_freq;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
@@ -662,7 +664,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- max_freq = READ_ONCE(cpudata->max_freq);
des_perf = cap_perf;
if (target_perf < capacity)
@@ -680,51 +681,111 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
max_perf = min_perf;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
- target_freq = div_u64(des_perf * max_freq, max_perf);
- policy->cur = target_freq;
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
cpufreq_cpu_put(policy);
}
-static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ u32 highest_perf, nominal_perf, nominal_freq, max_freq;
int ret;
- if (!cpudata->boost_supported) {
- pr_err("Boost mode is not supported by this processor or SBIOS\n");
- return -EINVAL;
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~GENMASK_ULL(7, 0);
+ value |= on ? highest_perf : nominal_perf;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
+ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (ret) {
+ cpufreq_cpu_release(policy);
+ pr_debug("Failed to set max perf on CPU:%d. ret:%d\n",
+ cpudata->cpu, ret);
+ return ret;
+ }
}
- if (state)
- policy->cpuinfo.max_freq = cpudata->max_freq;
- else
- policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
+ if (on)
+ policy->cpuinfo.max_freq = max_freq;
+ else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
+ policy->cpuinfo.max_freq = nominal_freq * 1000;
policy->max = policy->cpuinfo.max_freq;
- ret = freq_qos_update_request(&cpudata->req[1],
- policy->cpuinfo.max_freq);
- if (ret < 0)
- return ret;
+ if (cppc_state == AMD_PSTATE_PASSIVE) {
+ ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq);
+ if (ret < 0)
+ pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu);
+ }
- return 0;
+ return ret < 0 ? ret : 0;
}
-static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
+static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
- u32 highest_perf, nominal_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ if (!cpudata->boost_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EOPNOTSUPP;
+ }
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_cpu_boost_update(policy, state);
+ WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
+ policy->boost_enabled = !ret ? state : false;
+ refresh_frequency_limits(policy);
+ mutex_unlock(&amd_pstate_driver_lock);
- if (highest_perf <= nominal_perf)
- return;
+ return ret;
+}
- cpudata->boost_supported = true;
+static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
+{
+ u64 boost_val;
+ int ret = -1;
+
+ /*
+ * If platform has no CPB support or disable it, initialize current driver
+ * boost_enabled state to be false, it is not an error for cpufreq core to handle.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_CPB)) {
+ pr_debug_once("Boost CPB capabilities not present in the processor\n");
+ ret = 0;
+ goto exit_err;
+ }
+
+ /* at least one CPU supports CPB, even if others fail later on to set up */
current_pstate_driver->boost_enabled = true;
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ if (ret) {
+ pr_err_once("failed to read initial CPU boost state!\n");
+ ret = -EIO;
+ goto exit_err;
+ }
+
+ if (!(boost_val & MSR_K7_HWCR_CPB_DIS))
+ cpudata->boost_supported = true;
+
+ return 0;
+
+exit_err:
+ cpudata->boost_supported = false;
+ return ret;
}
static void amd_perf_ctl_reset(unsigned int cpu)
@@ -753,7 +814,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
{
int ret;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
u64 cap1;
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
@@ -849,8 +910,12 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
u32 transition_delay_ns;
transition_delay_ns = cppc_get_transition_latency(cpu);
- if (transition_delay_ns == CPUFREQ_ETERNAL)
- return AMD_PSTATE_TRANSITION_DELAY;
+ if (transition_delay_ns == CPUFREQ_ETERNAL) {
+ if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
+ return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
+ else
+ return AMD_PSTATE_TRANSITION_DELAY;
+ }
return transition_delay_ns / NSEC_PER_USEC;
}
@@ -921,12 +986,30 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
WRITE_ONCE(cpudata->max_freq, max_freq);
+ /**
+ * Below values need to be initialized correctly, otherwise driver will fail to load
+ * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf
+ * lowest_nonlinear_freq is a value between [min_freq, nominal_freq]
+ * Check _CPC in ACPI table objects if any values are incorrect
+ */
+ if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
+ pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
+ min_freq, max_freq, nominal_freq * 1000);
+ return -EINVAL;
+ }
+
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
+ pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
+ lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
+ return -EINVAL;
+ }
+
return 0;
}
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, ret;
+ int min_freq, max_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
@@ -955,18 +1038,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
min_freq = READ_ONCE(cpudata->min_freq);
max_freq = READ_ONCE(cpudata->max_freq);
- nominal_freq = READ_ONCE(cpudata->nominal_freq);
-
- if (min_freq <= 0 || max_freq <= 0 ||
- nominal_freq <= 0 || min_freq > max_freq) {
- dev_err(dev,
- "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
- min_freq, max_freq, nominal_freq);
- ret = -EINVAL;
- goto free_cpudata1;
- }
policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
@@ -977,10 +1054,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.min_freq = min_freq;
policy->cpuinfo.max_freq = max_freq;
+ policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- if (boot_cpu_has(X86_FEATURE_CPPC))
+ if (cpu_feature_enabled(X86_FEATURE_CPPC))
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
@@ -1002,7 +1081,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
- amd_pstate_boost_init(cpudata);
if (!current_pstate_driver->adjust_perf)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
@@ -1015,7 +1093,7 @@ free_cpudata1:
return ret;
}
-static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -1023,8 +1101,6 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
freq_qos_remove_request(&cpudata->req[0]);
policy->fast_switch_possible = false;
kfree(cpudata);
-
- return 0;
}
static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
@@ -1213,7 +1289,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
cppc_state = mode;
- if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
+ if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
return 0;
for_each_present_cpu(cpu) {
@@ -1386,7 +1462,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, ret;
+ int min_freq, max_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
@@ -1417,17 +1493,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
min_freq = READ_ONCE(cpudata->min_freq);
max_freq = READ_ONCE(cpudata->max_freq);
- nominal_freq = READ_ONCE(cpudata->nominal_freq);
- if (min_freq <= 0 || max_freq <= 0 ||
- nominal_freq <= 0 || min_freq > max_freq) {
- dev_err(dev,
- "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
- min_freq, max_freq, nominal_freq);
- ret = -EINVAL;
- goto free_cpudata1;
- }
policy->cpuinfo.min_freq = min_freq;
policy->cpuinfo.max_freq = max_freq;
@@ -1436,11 +1507,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
- cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
+ cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
+ policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+
/*
* Set the policy to provide a valid fallback value in case
* the default cpufreq governor is neither powersave nor performance.
@@ -1451,7 +1524,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret)
return ret;
@@ -1462,7 +1535,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
- amd_pstate_boost_init(cpudata);
return 0;
@@ -1471,7 +1543,7 @@ free_cpudata1:
return ret;
}
-static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -1481,7 +1553,6 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
}
pr_debug("CPU %d exiting\n", policy->cpu);
- return 0;
}
static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
@@ -1541,7 +1612,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
epp = 0;
/* Set initial EPP value */
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
}
@@ -1564,6 +1635,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
amd_pstate_epp_update_limit(policy);
+ /*
+ * policy->cur is never updated with the amd_pstate_epp driver, but it
+ * is used as a stale frequency value. So, keep it within limits.
+ */
+ policy->cur = policy->min;
+
return 0;
}
@@ -1580,7 +1657,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf);
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.max_perf = max_perf;
@@ -1614,7 +1691,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
value = READ_ONCE(cpudata->cppc_req_cached);
mutex_lock(&amd_pstate_limits_lock);
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
/* Set max perf same as min perf */
@@ -1718,6 +1795,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.suspend = amd_pstate_epp_suspend,
.resume = amd_pstate_epp_resume,
.update_limits = amd_pstate_update_limits,
+ .set_boost = amd_pstate_set_boost,
.name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
@@ -1741,6 +1819,46 @@ static int __init amd_pstate_set_driver(int mode_idx)
return -EINVAL;
}
+/**
+ * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
+ * show the debug message that helps to check if the CPU has CPPC support for loading issue.
+ */
+static bool amd_cppc_supported(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ bool warn = false;
+
+ if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) {
+ pr_debug_once("CPPC feature is not supported by the processor\n");
+ return false;
+ }
+
+ /*
+ * If the CPPC feature is disabled in the BIOS for processors that support MSR-based CPPC,
+ * the AMD Pstate driver may not function correctly.
+ * Check the CPPC flag and display a warning message if the platform supports CPPC.
+ * Note: below checking code will not abort the driver registeration process because of
+ * the code is added for debugging purposes.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ if (c->x86_model > 0x60 && c->x86_model < 0xaf)
+ warn = true;
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) {
+ if ((c->x86_model > 0x10 && c->x86_model < 0x1F) ||
+ (c->x86_model > 0x40 && c->x86_model < 0xaf))
+ warn = true;
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
+ warn = true;
+ }
+ }
+
+ if (warn)
+ pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n"
+ "Please enable it if your BIOS has the CPPC option.\n");
+ return true;
+}
+
static int __init amd_pstate_init(void)
{
struct device *dev_root;
@@ -1749,6 +1867,11 @@ static int __init amd_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
+ /* show debug message only if CPPC is not supported */
+ if (!amd_cppc_supported())
+ return -EOPNOTSUPP;
+
+ /* show warning message when BIOS broken or ACPI disabled */
if (!acpi_cpc_valid()) {
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
return -ENODEV;
@@ -1763,35 +1886,43 @@ static int __init amd_pstate_init(void)
/* check if this machine need CPPC quirks */
dmi_check_system(amd_pstate_quirks_table);
- switch (cppc_state) {
- case AMD_PSTATE_UNDEFINED:
+ /*
+ * determine the driver mode from the command line or kernel config.
+ * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED.
+ * command line options will override the kernel config settings.
+ */
+
+ if (cppc_state == AMD_PSTATE_UNDEFINED) {
/* Disable on the following configs by default:
* 1. Undefined platforms
* 2. Server platforms
- * 3. Shared memory designs
*/
if (amd_pstate_acpi_pm_profile_undefined() ||
- amd_pstate_acpi_pm_profile_server() ||
- !boot_cpu_has(X86_FEATURE_CPPC)) {
+ amd_pstate_acpi_pm_profile_server()) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
- ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
- if (ret)
- return ret;
- break;
+ /* get driver mode from kernel config option [1:4] */
+ cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
+ }
+
+ switch (cppc_state) {
case AMD_PSTATE_DISABLE:
+ pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
case AMD_PSTATE_PASSIVE:
case AMD_PSTATE_ACTIVE:
case AMD_PSTATE_GUIDED:
+ ret = amd_pstate_set_driver(cppc_state);
+ if (ret)
+ return ret;
break;
default:
return -EINVAL;
}
/* capability check */
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
if (cppc_state != AMD_PSTATE_ACTIVE)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
@@ -1805,13 +1936,15 @@ static int __init amd_pstate_init(void)
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
- pr_err("failed to enable with return %d\n", ret);
+ pr_err("failed to enable driver mode(%d)\n", cppc_state);
return ret;
}
ret = cpufreq_register_driver(current_pstate_driver);
- if (ret)
+ if (ret) {
pr_err("failed to register with return %d\n", ret);
+ goto disable_driver;
+ }
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
@@ -1827,6 +1960,8 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
+disable_driver:
+ amd_pstate_enable(false);
return ret;
}
device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index e6a28e7f4dbf..cc8bb2bc325a 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -99,6 +99,8 @@ struct amd_cpudata {
u32 policy;
u64 cppc_cap1_cached;
bool suspended;
+ s16 epp_default;
+ bool boost_state;
};
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 021f423705e1..af34c22fa273 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -305,7 +305,7 @@ out_iounmap:
return ret;
}
-static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
+static void apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct apple_cpu_priv *priv = policy->driver_data;
@@ -313,8 +313,6 @@ static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
iounmap(priv->reg_base);
kfree(priv);
-
- return 0;
}
static struct cpufreq_driver apple_soc_cpufreq_driver = {
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index 39221a9a187a..17a4c174553d 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -121,11 +121,9 @@ static int bmips_cpufreq_target_index(struct cpufreq_policy *policy,
return 0;
}
-static int bmips_cpufreq_exit(struct cpufreq_policy *policy)
+static void bmips_cpufreq_exit(struct cpufreq_policy *policy)
{
kfree(policy->freq_table);
-
- return 0;
}
static int bmips_cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 15f1d41920a3..bafa32dd375d 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -291,15 +291,10 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
struct cpufreq_freqs freqs;
- u32 desired_perf;
int ret = 0;
- desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
- /* Return if it is exactly the same perf */
- if (desired_perf == cpu_data->perf_ctrls.desired_perf)
- return ret;
-
- cpu_data->perf_ctrls.desired_perf = desired_perf;
+ cpu_data->perf_ctrls.desired_perf =
+ cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
freqs.old = policy->cur;
freqs.new = target_freq;
@@ -688,7 +683,7 @@ out:
return ret;
}
-static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
@@ -705,7 +700,6 @@ static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
caps->lowest_perf, cpu, ret);
cppc_cpufreq_put_cpu_data(policy);
- return 0;
}
static inline u64 get_delta(u64 t1, u64 t0)
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index c74dd1e01e0d..cac379ba006d 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -233,4 +233,5 @@ create_pdev:
sizeof(struct cpufreq_dt_platform_data)));
}
core_initcall(cpufreq_dt_platdev_init);
+MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 907e22632fda..6532c4d71338 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -157,10 +157,9 @@ static int cpufreq_offline(struct cpufreq_policy *policy)
return 0;
}
-static int cpufreq_exit(struct cpufreq_policy *policy)
+static void cpufreq_exit(struct cpufreq_policy *policy)
{
clk_put(policy->clk);
- return 0;
}
static struct cpufreq_driver dt_cpufreq_driver = {
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index f7a7bcf6f52e..fedad1081973 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -359,11 +359,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int nforce2_cpu_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver nforce2_driver = {
.name = "nforce2",
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
@@ -371,7 +366,6 @@ static struct cpufreq_driver nforce2_driver = {
.target = nforce2_target,
.get = nforce2_get,
.init = nforce2_cpu_init,
- .exit = nforce2_cpu_exit,
};
#ifdef MODULE
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9e5060b27864..04fc786dd2c0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -608,16 +608,15 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
static ssize_t show_boost(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+ return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
}
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
- int ret, enable;
+ bool enable;
- ret = sscanf(buf, "%d", &enable);
- if (ret != 1 || enable < 0 || enable > 1)
+ if (kstrtobool(buf, &enable))
return -EINVAL;
if (cpufreq_boost_trigger_state(enable)) {
@@ -641,10 +640,10 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
static ssize_t store_local_boost(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- int ret, enable;
+ int ret;
+ bool enable;
- ret = kstrtoint(buf, 10, &enable);
- if (ret || enable < 0 || enable > 1)
+ if (kstrtobool(buf, &enable))
return -EINVAL;
if (!cpufreq_driver->boost_enabled)
@@ -739,7 +738,7 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
static ssize_t show_##file_name \
(struct cpufreq_policy *policy, char *buf) \
{ \
- return sprintf(buf, "%u\n", policy->object); \
+ return sysfs_emit(buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
@@ -760,11 +759,11 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
freq = arch_freq_get_on_cpu(policy->cpu);
if (freq)
- ret = sprintf(buf, "%u\n", freq);
+ ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
- ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
+ ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
else
- ret = sprintf(buf, "%u\n", policy->cur);
+ ret = sysfs_emit(buf, "%u\n", policy->cur);
return ret;
}
@@ -798,9 +797,9 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
unsigned int cur_freq = __cpufreq_get(policy);
if (cur_freq)
- return sprintf(buf, "%u\n", cur_freq);
+ return sysfs_emit(buf, "%u\n", cur_freq);
- return sprintf(buf, "<unknown>\n");
+ return sysfs_emit(buf, "<unknown>\n");
}
/*
@@ -809,12 +808,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
- return sprintf(buf, "powersave\n");
+ return sysfs_emit(buf, "powersave\n");
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
- return sprintf(buf, "performance\n");
+ return sysfs_emit(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
- policy->governor->name);
+ return sysfs_emit(buf, "%s\n", policy->governor->name);
return -EINVAL;
}
@@ -873,7 +871,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
struct cpufreq_governor *t;
if (!has_target()) {
- i += sprintf(buf, "performance powersave");
+ i += sysfs_emit(buf, "performance powersave");
goto out;
}
@@ -882,11 +880,11 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
break;
- i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
+ i += sysfs_emit_at(buf, i, "%s ", t->name);
}
mutex_unlock(&cpufreq_governor_mutex);
out:
- i += sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
return i;
}
@@ -896,7 +894,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
unsigned int cpu;
for_each_cpu(cpu, mask) {
- i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
+ i += sysfs_emit_at(buf, i, "%u ", cpu);
if (i >= (PAGE_SIZE - 5))
break;
}
@@ -904,7 +902,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
/* Remove the extra space at the end */
i--;
- i += sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
return i;
}
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
@@ -947,7 +945,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
{
if (!policy->governor || !policy->governor->show_setspeed)
- return sprintf(buf, "<unsupported>\n");
+ return sysfs_emit(buf, "<unsupported>\n");
return policy->governor->show_setspeed(policy, buf);
}
@@ -961,8 +959,8 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
int ret;
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
if (!ret)
- return sprintf(buf, "%u\n", limit);
- return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
+ return sysfs_emit(buf, "%u\n", limit);
+ return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
@@ -2876,7 +2874,7 @@ int cpufreq_enable_boost_support(void)
}
EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
-int cpufreq_boost_enabled(void)
+bool cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
}
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index ab93bce8ae77..6e958b09e1b5 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -360,14 +360,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int eps_cpu_exit(struct cpufreq_policy *policy)
+static void eps_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
/* Bye */
kfree(eps_cpu[cpu]);
eps_cpu[cpu] = NULL;
- return 0;
}
static struct cpufreq_driver eps_driver = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index c31914a9876f..392a8000b238 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -300,6 +300,7 @@ static struct cpufreq_driver *intel_pstate_driver __read_mostly;
#define HYBRID_SCALING_FACTOR 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
+#define HYBRID_SCALING_FACTOR_LNL 86957
static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
@@ -1625,17 +1626,24 @@ static void intel_pstate_notify_work(struct work_struct *work)
static DEFINE_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;
+#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
+#define HWP_HIGHEST_PERF_CHANGE_STATUS BIT(3)
+
void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
+ u64 value, status_mask;
unsigned long flags;
- u64 value;
- if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
+ status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS;
+ if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
+ status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
+
rdmsrl_safe(MSR_HWP_STATUS, &value);
- if (!(value & 0x01))
+ if (!(value & status_mask))
return;
spin_lock_irqsave(&hwp_notify_lock, flags);
@@ -1659,7 +1667,7 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
bool cancel_work;
- if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
@@ -1673,17 +1681,25 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
cancel_delayed_work_sync(&cpudata->hwp_notify_work);
}
+#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0)
+#define HWP_HIGHEST_PERF_CHANGE_REQ BIT(2)
+
static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
- /* Enable HWP notification interrupt for guaranteed performance change */
+ /* Enable HWP notification interrupt for performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
+ u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
+
spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
spin_unlock_irq(&hwp_notify_lock);
+ if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
+ interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
+
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
@@ -2367,54 +2383,54 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-#define X86_MATCH(model, policy) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
- X86_FEATURE_APERFMPERF, &policy)
+#define X86_MATCH(vfm, policy) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, &policy)
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- X86_MATCH(SANDYBRIDGE, core_funcs),
- X86_MATCH(SANDYBRIDGE_X, core_funcs),
- X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
- X86_MATCH(IVYBRIDGE, core_funcs),
- X86_MATCH(HASWELL, core_funcs),
- X86_MATCH(BROADWELL, core_funcs),
- X86_MATCH(IVYBRIDGE_X, core_funcs),
- X86_MATCH(HASWELL_X, core_funcs),
- X86_MATCH(HASWELL_L, core_funcs),
- X86_MATCH(HASWELL_G, core_funcs),
- X86_MATCH(BROADWELL_G, core_funcs),
- X86_MATCH(ATOM_AIRMONT, airmont_funcs),
- X86_MATCH(SKYLAKE_L, core_funcs),
- X86_MATCH(BROADWELL_X, core_funcs),
- X86_MATCH(SKYLAKE, core_funcs),
- X86_MATCH(BROADWELL_D, core_funcs),
- X86_MATCH(XEON_PHI_KNL, knl_funcs),
- X86_MATCH(XEON_PHI_KNM, knl_funcs),
- X86_MATCH(ATOM_GOLDMONT, core_funcs),
- X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
- X86_MATCH(SKYLAKE_X, core_funcs),
- X86_MATCH(COMETLAKE, core_funcs),
- X86_MATCH(ICELAKE_X, core_funcs),
- X86_MATCH(TIGERLAKE, core_funcs),
- X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
- X86_MATCH(EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_SANDYBRIDGE, core_funcs),
+ X86_MATCH(INTEL_SANDYBRIDGE_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_SILVERMONT, silvermont_funcs),
+ X86_MATCH(INTEL_IVYBRIDGE, core_funcs),
+ X86_MATCH(INTEL_HASWELL, core_funcs),
+ X86_MATCH(INTEL_BROADWELL, core_funcs),
+ X86_MATCH(INTEL_IVYBRIDGE_X, core_funcs),
+ X86_MATCH(INTEL_HASWELL_X, core_funcs),
+ X86_MATCH(INTEL_HASWELL_L, core_funcs),
+ X86_MATCH(INTEL_HASWELL_G, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_G, core_funcs),
+ X86_MATCH(INTEL_ATOM_AIRMONT, airmont_funcs),
+ X86_MATCH(INTEL_SKYLAKE_L, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_X, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_D, core_funcs),
+ X86_MATCH(INTEL_XEON_PHI_KNL, knl_funcs),
+ X86_MATCH(INTEL_XEON_PHI_KNM, knl_funcs),
+ X86_MATCH(INTEL_ATOM_GOLDMONT, core_funcs),
+ X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE_X, core_funcs),
+ X86_MATCH(INTEL_COMETLAKE, core_funcs),
+ X86_MATCH(INTEL_ICELAKE_X, core_funcs),
+ X86_MATCH(INTEL_TIGERLAKE, core_funcs),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
- X86_MATCH(BROADWELL_D, core_funcs),
- X86_MATCH(BROADWELL_X, core_funcs),
- X86_MATCH(SKYLAKE_X, core_funcs),
- X86_MATCH(ICELAKE_X, core_funcs),
- X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_D, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_X, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE_X, core_funcs),
+ X86_MATCH(INTEL_ICELAKE_X, core_funcs),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
{}
};
#endif
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
- X86_MATCH(KABYLAKE, core_funcs),
+ X86_MATCH(INTEL_KABYLAKE, core_funcs),
{}
};
@@ -2699,13 +2715,11 @@ static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
return intel_cpufreq_cpu_offline(policy);
}
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{
pr_debug("CPU %d exiting\n", policy->cpu);
policy->fast_switch_possible = false;
-
- return 0;
}
static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -3036,7 +3050,7 @@ pstate_exit:
return ret;
}
-static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct freq_qos_request *req;
@@ -3046,7 +3060,7 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
freq_qos_remove_request(req);
kfree(req);
- return intel_pstate_cpu_exit(policy);
+ intel_pstate_cpu_exit(policy);
}
static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
@@ -3350,14 +3364,13 @@ static inline void intel_pstate_request_control_from_smm(void) {}
#define INTEL_PSTATE_HWP_BROADWELL 0x01
-#define X86_MATCH_HWP(model, hwp_mode) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
- X86_FEATURE_HWP, hwp_mode)
+#define X86_MATCH_HWP(vfm, hwp_mode) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_HWP, hwp_mode)
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
- X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
- X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
- X86_MATCH_HWP(ANY, 0),
+ X86_MATCH_HWP(INTEL_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(INTEL_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(INTEL_ANY, 0),
{}
};
@@ -3390,15 +3403,19 @@ static const struct x86_cpu_id intel_epp_default[] = {
* which can result in one core turbo frequency for
* AlderLake Mobile CPUs.
*/
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
- HWP_EPP_BALANCE_POWERSAVE, 115, 16)),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+ 179, 64, 16)),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+ 179, 64, 16)),
{}
};
static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
{}
};
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 4c57c6725c13..bd6fe8638d39 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -236,8 +236,9 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
}
/**
- * longhaul_set_cpu_frequency()
- * @mults_index : bitpattern of the new multiplier.
+ * longhaul_setstate()
+ * @policy: cpufreq_policy structure containing the current policy.
+ * @table_index: index of the frequency within the cpufreq_frequency_table.
*
* Sets a new clock ratio.
*/
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index afc59b292153..6a8e97896d38 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -85,18 +85,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver loongson2_cpufreq_driver = {
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
.get = cpufreq_generic_get,
- .exit = loongson2_cpufreq_exit,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
new file mode 100644
index 000000000000..5f79b6de127c
--- /dev/null
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPUFreq driver for the Loongson-3 processors.
+ *
+ * All revisions of Loongson-3 processor support cpu_has_scalefreq feature.
+ *
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/units.h>
+
+#include <asm/idle.h>
+#include <asm/loongarch.h>
+#include <asm/loongson.h>
+
+/* Message */
+union smc_message {
+ u32 value;
+ struct {
+ u32 id : 4;
+ u32 info : 4;
+ u32 val : 16;
+ u32 cmd : 6;
+ u32 extra : 1;
+ u32 complete : 1;
+ };
+};
+
+/* Command return values */
+#define CMD_OK 0 /* No error */
+#define CMD_ERROR 1 /* Regular error */
+#define CMD_NOCMD 2 /* Command does not support */
+#define CMD_INVAL 3 /* Invalid Parameter */
+
+/* Version commands */
+/*
+ * CMD_GET_VERSION - Get interface version
+ * Input: none
+ * Output: version
+ */
+#define CMD_GET_VERSION 0x1
+
+/* Feature commands */
+/*
+ * CMD_GET_FEATURE - Get feature state
+ * Input: feature ID
+ * Output: feature flag
+ */
+#define CMD_GET_FEATURE 0x2
+
+/*
+ * CMD_SET_FEATURE - Set feature state
+ * Input: feature ID, feature flag
+ * output: none
+ */
+#define CMD_SET_FEATURE 0x3
+
+/* Feature IDs */
+#define FEATURE_SENSOR 0
+#define FEATURE_FAN 1
+#define FEATURE_DVFS 2
+
+/* Sensor feature flags */
+#define FEATURE_SENSOR_ENABLE BIT(0)
+#define FEATURE_SENSOR_SAMPLE BIT(1)
+
+/* Fan feature flags */
+#define FEATURE_FAN_ENABLE BIT(0)
+#define FEATURE_FAN_AUTO BIT(1)
+
+/* DVFS feature flags */
+#define FEATURE_DVFS_ENABLE BIT(0)
+#define FEATURE_DVFS_BOOST BIT(1)
+#define FEATURE_DVFS_AUTO BIT(2)
+#define FEATURE_DVFS_SINGLE_BOOST BIT(3)
+
+/* Sensor commands */
+/*
+ * CMD_GET_SENSOR_NUM - Get number of sensors
+ * Input: none
+ * Output: number
+ */
+#define CMD_GET_SENSOR_NUM 0x4
+
+/*
+ * CMD_GET_SENSOR_STATUS - Get sensor status
+ * Input: sensor ID, type
+ * Output: sensor status
+ */
+#define CMD_GET_SENSOR_STATUS 0x5
+
+/* Sensor types */
+#define SENSOR_INFO_TYPE 0
+#define SENSOR_INFO_TYPE_TEMP 1
+
+/* Fan commands */
+/*
+ * CMD_GET_FAN_NUM - Get number of fans
+ * Input: none
+ * Output: number
+ */
+#define CMD_GET_FAN_NUM 0x6
+
+/*
+ * CMD_GET_FAN_INFO - Get fan status
+ * Input: fan ID, type
+ * Output: fan info
+ */
+#define CMD_GET_FAN_INFO 0x7
+
+/*
+ * CMD_SET_FAN_INFO - Set fan status
+ * Input: fan ID, type, value
+ * Output: none
+ */
+#define CMD_SET_FAN_INFO 0x8
+
+/* Fan types */
+#define FAN_INFO_TYPE_LEVEL 0
+
+/* DVFS commands */
+/*
+ * CMD_GET_FREQ_LEVEL_NUM - Get number of freq levels
+ * Input: CPU ID
+ * Output: number
+ */
+#define CMD_GET_FREQ_LEVEL_NUM 0x9
+
+/*
+ * CMD_GET_FREQ_BOOST_LEVEL - Get the first boost level
+ * Input: CPU ID
+ * Output: number
+ */
+#define CMD_GET_FREQ_BOOST_LEVEL 0x10
+
+/*
+ * CMD_GET_FREQ_LEVEL_INFO - Get freq level info
+ * Input: CPU ID, level ID
+ * Output: level info
+ */
+#define CMD_GET_FREQ_LEVEL_INFO 0x11
+
+/*
+ * CMD_GET_FREQ_INFO - Get freq info
+ * Input: CPU ID, type
+ * Output: freq info
+ */
+#define CMD_GET_FREQ_INFO 0x12
+
+/*
+ * CMD_SET_FREQ_INFO - Set freq info
+ * Input: CPU ID, type, value
+ * Output: none
+ */
+#define CMD_SET_FREQ_INFO 0x13
+
+/* Freq types */
+#define FREQ_INFO_TYPE_FREQ 0
+#define FREQ_INFO_TYPE_LEVEL 1
+
+#define FREQ_MAX_LEVEL 16
+
+struct loongson3_freq_data {
+ unsigned int def_freq_level;
+ struct cpufreq_frequency_table table[];
+};
+
+static struct mutex cpufreq_mutex[MAX_PACKAGES];
+static struct cpufreq_driver loongson3_cpufreq_driver;
+static DEFINE_PER_CPU(struct loongson3_freq_data *, freq_data);
+
+static inline int do_service_request(u32 id, u32 info, u32 cmd, u32 val, u32 extra)
+{
+ int retries;
+ unsigned int cpu = smp_processor_id();
+ unsigned int package = cpu_data[cpu].package;
+ union smc_message msg, last;
+
+ mutex_lock(&cpufreq_mutex[package]);
+
+ last.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX);
+ if (!last.complete) {
+ mutex_unlock(&cpufreq_mutex[package]);
+ return -EPERM;
+ }
+
+ msg.id = id;
+ msg.info = info;
+ msg.cmd = cmd;
+ msg.val = val;
+ msg.extra = extra;
+ msg.complete = 0;
+
+ iocsr_write32(msg.value, LOONGARCH_IOCSR_SMCMBX);
+ iocsr_write32(iocsr_read32(LOONGARCH_IOCSR_MISC_FUNC) | IOCSR_MISC_FUNC_SOFT_INT,
+ LOONGARCH_IOCSR_MISC_FUNC);
+
+ for (retries = 0; retries < 10000; retries++) {
+ msg.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX);
+ if (msg.complete)
+ break;
+
+ usleep_range(8, 12);
+ }
+
+ if (!msg.complete || msg.cmd != CMD_OK) {
+ mutex_unlock(&cpufreq_mutex[package]);
+ return -EPERM;
+ }
+
+ mutex_unlock(&cpufreq_mutex[package]);
+
+ return msg.val;
+}
+
+static unsigned int loongson3_cpufreq_get(unsigned int cpu)
+{
+ int ret;
+
+ ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_INFO, 0, 0);
+
+ return ret * KILO;
+}
+
+static int loongson3_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ int ret;
+
+ ret = do_service_request(cpu_data[policy->cpu].core,
+ FREQ_INFO_TYPE_LEVEL, CMD_SET_FREQ_INFO, index, 0);
+
+ return (ret >= 0) ? 0 : ret;
+}
+
+static int configure_freq_table(int cpu)
+{
+ int i, ret, boost_level, max_level, freq_level;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct loongson3_freq_data *data;
+
+ if (per_cpu(freq_data, cpu))
+ return 0;
+
+ ret = do_service_request(cpu, 0, CMD_GET_FREQ_LEVEL_NUM, 0, 0);
+ if (ret < 0)
+ return ret;
+ max_level = ret;
+
+ ret = do_service_request(cpu, 0, CMD_GET_FREQ_BOOST_LEVEL, 0, 0);
+ if (ret < 0)
+ return ret;
+ boost_level = ret;
+
+ freq_level = min(max_level, FREQ_MAX_LEVEL);
+ data = devm_kzalloc(&pdev->dev, struct_size(data, table, freq_level + 1), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->def_freq_level = boost_level - 1;
+
+ for (i = 0; i < freq_level; i++) {
+ ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_LEVEL_INFO, i, 0);
+ if (ret < 0) {
+ devm_kfree(&pdev->dev, data);
+ return ret;
+ }
+
+ data->table[i].frequency = ret * KILO;
+ data->table[i].flags = (i >= boost_level) ? CPUFREQ_BOOST_FREQ : 0;
+ }
+
+ data->table[freq_level].flags = 0;
+ data->table[freq_level].frequency = CPUFREQ_TABLE_END;
+
+ per_cpu(freq_data, cpu) = data;
+
+ return 0;
+}
+
+static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int i, ret, cpu = policy->cpu;
+
+ ret = configure_freq_table(cpu);
+ if (ret < 0)
+ return ret;
+
+ policy->cpuinfo.transition_latency = 10000;
+ policy->freq_table = per_cpu(freq_data, cpu)->table;
+ policy->suspend_freq = policy->freq_table[per_cpu(freq_data, cpu)->def_freq_level].frequency;
+ cpumask_copy(policy->cpus, topology_sibling_cpumask(cpu));
+
+ for_each_cpu(i, policy->cpus) {
+ if (i != cpu)
+ per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
+ }
+
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret < 0) {
+ pr_warn("cpufreq: Failed to enable boost: %d\n", ret);
+ return ret;
+ }
+ loongson3_cpufreq_driver.boost_enabled = true;
+ }
+
+ return 0;
+}
+
+static void loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ int cpu = policy->cpu;
+
+ loongson3_cpufreq_target(policy, per_cpu(freq_data, cpu)->def_freq_level);
+}
+
+static int loongson3_cpufreq_cpu_online(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static int loongson3_cpufreq_cpu_offline(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver loongson3_cpufreq_driver = {
+ .name = "loongson3",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = loongson3_cpufreq_cpu_init,
+ .exit = loongson3_cpufreq_cpu_exit,
+ .online = loongson3_cpufreq_cpu_online,
+ .offline = loongson3_cpufreq_cpu_offline,
+ .get = loongson3_cpufreq_get,
+ .target_index = loongson3_cpufreq_target,
+ .attr = cpufreq_generic_attr,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .suspend = cpufreq_generic_suspend,
+};
+
+static int loongson3_cpufreq_probe(struct platform_device *pdev)
+{
+ int i, ret;
+
+ for (i = 0; i < MAX_PACKAGES; i++)
+ devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
+
+ ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0);
+ if (ret <= 0)
+ return -EPERM;
+
+ ret = do_service_request(FEATURE_DVFS, 0, CMD_SET_FEATURE,
+ FEATURE_DVFS_ENABLE | FEATURE_DVFS_BOOST, 0);
+ if (ret < 0)
+ return -EPERM;
+
+ loongson3_cpufreq_driver.driver_data = pdev;
+
+ ret = cpufreq_register_driver(&loongson3_cpufreq_driver);
+ if (ret)
+ return ret;
+
+ pr_info("cpufreq: Loongson-3 CPU frequency driver.\n");
+
+ return 0;
+}
+
+static void loongson3_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&loongson3_cpufreq_driver);
+}
+
+static struct platform_device_id cpufreq_id_table[] = {
+ { "loongson3_cpufreq", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, cpufreq_id_table);
+
+static struct platform_driver loongson3_platform_driver = {
+ .driver = {
+ .name = "loongson3_cpufreq",
+ },
+ .id_table = cpufreq_id_table,
+ .probe = loongson3_cpufreq_probe,
+ .remove_new = loongson3_cpufreq_remove,
+};
+module_platform_driver(loongson3_platform_driver);
+
+MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
+MODULE_DESCRIPTION("CPUFreq driver for Loongson-3 processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 8d097dcddda4..8925e096d5b9 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -260,7 +260,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct mtk_cpufreq_data *data = policy->driver_data;
struct resource *res = data->res;
@@ -270,8 +270,6 @@ static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
iounmap(base);
release_mem_region(res->start, resource_size(res));
-
- return 0;
}
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 518606adf14e..3a1aadaa723c 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -390,28 +390,23 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
int ret;
cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- dev_err(cpu_dev, "failed to get cpu%d device\n", cpu);
- return -ENODEV;
- }
+ if (!cpu_dev)
+ return dev_err_probe(cpu_dev, -ENODEV, "failed to get cpu%d device\n", cpu);
info->cpu_dev = cpu_dev;
info->ccifreq_bound = false;
if (info->soc_data->ccifreq_supported) {
info->cci_dev = of_get_cci(info->cpu_dev);
- if (IS_ERR(info->cci_dev)) {
- ret = PTR_ERR(info->cci_dev);
- dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
- return -ENODEV;
- }
+ if (IS_ERR(info->cci_dev))
+ return dev_err_probe(cpu_dev, PTR_ERR(info->cci_dev),
+ "cpu%d: failed to get cci device\n",
+ cpu);
}
info->cpu_clk = clk_get(cpu_dev, "cpu");
- if (IS_ERR(info->cpu_clk)) {
- ret = PTR_ERR(info->cpu_clk);
- return dev_err_probe(cpu_dev, ret,
+ if (IS_ERR(info->cpu_clk))
+ return dev_err_probe(cpu_dev, PTR_ERR(info->cpu_clk),
"cpu%d: failed to get cpu clk\n", cpu);
- }
info->inter_clk = clk_get(cpu_dev, "intermediate");
if (IS_ERR(info->inter_clk)) {
@@ -431,7 +426,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
ret = regulator_enable(info->proc_reg);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vproc\n", cpu);
goto out_free_proc_reg;
}
@@ -439,14 +434,17 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
info->sram_reg = regulator_get_optional(cpu_dev, "sram");
if (IS_ERR(info->sram_reg)) {
ret = PTR_ERR(info->sram_reg);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ dev_err_probe(cpu_dev, ret,
+ "cpu%d: Failed to get sram regulator\n", cpu);
goto out_disable_proc_reg;
+ }
info->sram_reg = NULL;
} else {
ret = regulator_enable(info->sram_reg);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vsram\n", cpu);
goto out_free_sram_reg;
}
}
@@ -454,31 +452,34 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
if (ret) {
- dev_err(cpu_dev,
+ dev_err_probe(cpu_dev, ret,
"cpu%d: failed to get OPP-sharing information\n", cpu);
goto out_disable_sram_reg;
}
ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: no OPP table\n", cpu);
goto out_disable_sram_reg;
}
ret = clk_prepare_enable(info->cpu_clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable cpu clk\n", cpu);
goto out_free_opp_table;
+ }
ret = clk_prepare_enable(info->inter_clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable inter clk\n", cpu);
goto out_disable_mux_clock;
+ }
if (info->soc_data->ccifreq_supported) {
info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
if (info->vproc_on_boot < 0) {
- ret = info->vproc_on_boot;
- dev_err(info->cpu_dev,
- "invalid Vproc value: %d\n", info->vproc_on_boot);
+ ret = dev_err_probe(info->cpu_dev, info->vproc_on_boot,
+ "invalid Vproc value\n");
goto out_disable_inter_clock;
}
}
@@ -487,8 +488,8 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
rate = clk_get_rate(info->inter_clk);
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
if (IS_ERR(opp)) {
- dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu);
- ret = PTR_ERR(opp);
+ ret = dev_err_probe(cpu_dev, PTR_ERR(opp),
+ "cpu%d: failed to get intermediate opp\n", cpu);
goto out_disable_inter_clock;
}
info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
@@ -501,7 +502,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier;
ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb);
if (ret) {
- dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to register opp notifier\n", cpu);
goto out_disable_inter_clock;
}
@@ -599,13 +600,11 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
-static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
+static void mtk_cpufreq_exit(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
-
- return 0;
}
static struct cpufreq_driver mtk_cpufreq_driver = {
@@ -629,11 +628,9 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
int cpu, ret;
data = dev_get_platdata(&pdev->dev);
- if (!data) {
- dev_err(&pdev->dev,
- "failed to get mtk cpufreq platform data\n");
- return -ENODEV;
- }
+ if (!data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "failed to get mtk cpufreq platform data\n");
for_each_possible_cpu(cpu) {
info = mtk_cpu_dvfs_info_lookup(cpu);
@@ -642,25 +639,22 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
- ret = -ENOMEM;
+ ret = dev_err_probe(&pdev->dev, -ENOMEM,
+ "Failed to allocate dvfs_info\n");
goto release_dvfs_info_list;
}
info->soc_data = data;
ret = mtk_cpu_dvfs_info_init(info, cpu);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to initialize dvfs info for cpu%d\n",
- cpu);
+ if (ret)
goto release_dvfs_info_list;
- }
list_add(&info->list_head, &dvfs_info_list);
}
ret = cpufreq_register_driver(&mtk_cpufreq_driver);
if (ret) {
- dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
+ dev_err_probe(&pdev->dev, ret, "failed to register mtk cpufreq driver\n");
goto release_dvfs_info_list;
}
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 895690856665..3458d5cc9b7f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -135,11 +135,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int omap_cpu_exit(struct cpufreq_policy *policy)
+static void omap_cpu_exit(struct cpufreq_policy *policy)
{
freq_table_free();
clk_put(policy->clk);
- return 0;
}
static struct cpufreq_driver omap_driver = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 039a66bbe1be..ee925b53b6b9 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -204,21 +204,19 @@ out:
return err;
}
-static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
/*
* We don't support CPU hotplug. Don't unmap after the system
* has already made it to a running state.
*/
if (system_state >= SYSTEM_RUNNING)
- return 0;
+ return;
if (sdcasr_mapbase)
iounmap(sdcasr_mapbase);
if (sdcpwr_mapbase)
iounmap(sdcpwr_mapbase);
-
- return 0;
}
static int pas_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 6f8b5ea7aeae..771efbf51a48 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -562,18 +562,12 @@ out:
return result;
}
-static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver pcc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.get = pcc_get_freq,
.verify = pcc_cpufreq_verify,
.target = pcc_cpufreq_target,
.init = pcc_cpufreq_cpu_init,
- .exit = pcc_cpufreq_cpu_exit,
.name = "pcc-cpufreq",
};
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 41eefef95d87..f0a4a6c31204 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -219,7 +219,7 @@ have_busfreq:
}
-static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
+static void powernow_k6_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int i;
@@ -234,10 +234,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_target(policy, i);
cpufreq_freq_transition_end(policy, &freqs, 0);
- break;
+ return;
}
}
- return 0;
}
static unsigned int powernow_k6_get(unsigned int cpu)
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 5d515fc34836..4271446c8725 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -644,7 +644,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int powernow_cpu_exit(struct cpufreq_policy *policy)
+static void powernow_cpu_exit(struct cpufreq_policy *policy)
{
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) {
@@ -655,7 +655,6 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
#endif
kfree(powernow_table);
- return 0;
}
static struct cpufreq_driver powernow_driver = {
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index b10f7a1b77f1..a01170f7d01c 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1089,13 +1089,13 @@ err_out:
return -ENODEV;
}
-static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+static void powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
int cpu;
if (!data)
- return -EINVAL;
+ return;
powernow_k8_cpu_exit_acpi(data);
@@ -1104,8 +1104,6 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
/* pol->cpus will be empty here, use related_cpus instead. */
for_each_cpu(cpu, pol->related_cpus)
per_cpu(powernow_data, cpu) = NULL;
-
- return 0;
}
static void query_values_on_cpu(void *_err)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index fddbd1ea1635..50c62929f7ca 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -874,7 +874,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
struct global_pstate_info *gpstates = policy->driver_data;
@@ -886,8 +886,6 @@ static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
del_timer_sync(&gpstates->timer);
kfree(policy->driver_data);
-
- return 0;
}
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 88afc49941b7..5ee4c7bfdcc5 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -113,10 +113,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
cbe_cpufreq_pmi_policy_exit(policy);
- return 0;
}
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index ec8df5496a0c..370fe6a0104b 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -573,7 +573,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return qcom_cpufreq_hw_lmh_init(policy, index);
}
-static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
struct qcom_cpufreq_data *data = policy->driver_data;
@@ -583,8 +583,6 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
qcom_cpufreq_hw_lmh_exit(data);
kfree(policy->freq_table);
kfree(data);
-
- return 0;
}
static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 0a46b5d49d32..939702dfa73f 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -456,7 +456,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
struct nvmem_cell *speedbin_nvmem;
- struct device_node *np;
struct device *cpu_dev;
char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
char *pvs_name = pvs_name_buffer;
@@ -468,16 +467,15 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (!cpu_dev)
return -ENODEV;
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return -ENOENT;
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") ||
of_device_is_compatible(np, "operating-points-v2-krait-cpu");
- if (!ret) {
- of_node_put(np);
+ if (!ret)
return -ENOENT;
- }
drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
GFP_KERNEL);
@@ -503,7 +501,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
nvmem_cell_put(speedbin_nvmem);
}
- of_node_put(np);
for_each_possible_cpu(cpu) {
struct device **virt_devs = NULL;
@@ -639,7 +636,7 @@ MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list);
*/
static int __init qcom_cpufreq_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
int ret;
@@ -647,7 +644,6 @@ static int __init qcom_cpufreq_init(void)
return -ENODEV;
match = of_match_node(qcom_cpufreq_match_list, np);
- of_node_put(np);
if (!match)
return -ENODEV;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 0aecaecbb0e6..3519bf34d397 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -225,7 +225,7 @@ err_np:
return -ENODEV;
}
-static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
@@ -233,8 +233,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
kfree(data->table);
kfree(data);
policy->driver_data = NULL;
-
- return 0;
}
static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 3b4f6bfb2f4c..5892c73e129d 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -63,9 +63,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct scmi_data *priv = policy->driver_data;
+ unsigned long freq = target_freq;
- if (!perf_ops->freq_set(ph, priv->domain_id,
- target_freq * 1000, true))
+ if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true))
return target_freq;
return 0;
@@ -308,7 +308,7 @@ out_free_priv:
return ret;
}
-static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
+static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
@@ -316,8 +316,6 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
-
- return 0;
}
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index d33be56983ed..8d73e6e8be2a 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -167,7 +167,7 @@ out_free_opp:
return ret;
}
-static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+static void scpi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scpi_data *priv = policy->driver_data;
@@ -175,8 +175,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
kfree(priv);
-
- return 0;
}
static struct cpufreq_driver scpi_cpufreq_driver = {
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index b8704232c27b..aa74036d0420 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -135,14 +135,12 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
clk_put(cpuclk);
-
- return 0;
}
static struct cpufreq_driver sh_cpufreq_driver = {
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 2783d3d55fce..8a0cd5312a59 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -296,10 +296,9 @@ static int us2e_freq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
+static void us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
us2e_freq_target(policy, 0);
- return 0;
}
static struct cpufreq_driver cpufreq_us2e_driver = {
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index 6c3657679a88..b50f9d13e6d2 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -140,10 +140,9 @@ static int us3_freq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+static void us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
us3_freq_target(policy, 0);
- return 0;
}
static struct cpufreq_driver cpufreq_us3_driver = {
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 75b10ecdb60f..3fafedb983b5 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -400,16 +400,12 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int centrino_cpu_exit(struct cpufreq_policy *policy)
+static void centrino_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- if (!per_cpu(centrino_model, cpu))
- return -ENODEV;
-
- per_cpu(centrino_model, cpu) = NULL;
-
- return 0;
+ if (per_cpu(centrino_model, cpu))
+ per_cpu(centrino_model, cpu) = NULL;
}
/**
@@ -520,10 +516,10 @@ static struct cpufreq_driver centrino_driver = {
* or ASCII model IDs.
*/
static const struct x86_cpu_id centrino_ids[] = {
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM( 6, 9), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM( 6, 13), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM(15, 3), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM(15, 4), X86_FEATURE_EST, NULL),
{}
};
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index 9c542e723a15..8e2e703c3865 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -18,7 +18,7 @@
#include <linux/regmap.h>
#define VERSION_ELEMENTS 3
-#define MAX_PCODE_NAME_LEN 7
+#define MAX_PCODE_NAME_LEN 16
#define VERSION_SHIFT 28
#define HW_INFO_INDEX 1
@@ -293,6 +293,7 @@ module_init(sti_cpufreq_init);
static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
{ .compatible = "st,stih407" },
{ .compatible = "st,stih410" },
+ { .compatible = "st,stih418" },
{ },
};
MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 0b882765cd66..95ac8d46c156 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -91,6 +91,9 @@ static u32 sun50i_h616_efuse_xlate(u32 speedbin)
case 0x5d00:
value = 0;
break;
+ case 0x6c00:
+ value = 5;
+ break;
default:
pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n",
speedbin & 0xffff);
@@ -131,26 +134,24 @@ static const struct of_device_id cpu_opp_match_list[] = {
static bool dt_has_supported_hw(void)
{
bool has_opp_supported_hw = false;
- struct device_node *np, *opp;
struct device *cpu_dev;
cpu_dev = get_cpu_device(0);
if (!cpu_dev)
return false;
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return false;
- for_each_child_of_node(np, opp) {
+ for_each_child_of_node_scoped(np, opp) {
if (of_find_property(opp, "opp-supported-hw", NULL)) {
has_opp_supported_hw = true;
break;
}
}
- of_node_put(np);
-
return has_opp_supported_hw;
}
@@ -165,7 +166,6 @@ static int sun50i_cpufreq_get_efuse(void)
const struct sunxi_cpufreq_data *opp_data;
struct nvmem_cell *speedbin_nvmem;
const struct of_device_id *match;
- struct device_node *np;
struct device *cpu_dev;
u32 *speedbin;
int ret;
@@ -174,19 +174,18 @@ static int sun50i_cpufreq_get_efuse(void)
if (!cpu_dev)
return -ENODEV;
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return -ENOENT;
match = of_match_node(cpu_opp_match_list, np);
- if (!match) {
- of_node_put(np);
+ if (!match)
return -ENOENT;
- }
+
opp_data = match->data;
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
- of_node_put(np);
if (IS_ERR(speedbin_nvmem))
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
@@ -301,14 +300,9 @@ MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
static const struct of_device_id *sun50i_cpufreq_match_node(void)
{
- const struct of_device_id *match;
- struct device_node *np;
-
- np = of_find_node_by_path("/");
- match = of_match_node(sun50i_cpufreq_match_list, np);
- of_node_put(np);
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
- return match;
+ return of_match_node(sun50i_cpufreq_match_list, np);
}
/*
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 59865ea455a8..07ea7ed61b68 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -551,14 +551,12 @@ static int tegra194_cpufreq_offline(struct cpufreq_policy *policy)
return 0;
}
-static int tegra194_cpufreq_exit(struct cpufreq_policy *policy)
+static void tegra194_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
-
- return 0;
}
static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 714ed53753fa..4d3f27958fbd 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -47,6 +47,35 @@
#define AM625_SUPPORT_S_MPU_OPP BIT(1)
#define AM625_SUPPORT_T_MPU_OPP BIT(2)
+enum {
+ AM62A7_EFUSE_M_MPU_OPP = 13,
+ AM62A7_EFUSE_N_MPU_OPP,
+ AM62A7_EFUSE_O_MPU_OPP,
+ AM62A7_EFUSE_P_MPU_OPP,
+ AM62A7_EFUSE_Q_MPU_OPP,
+ AM62A7_EFUSE_R_MPU_OPP,
+ AM62A7_EFUSE_S_MPU_OPP,
+ /*
+ * The V, U, and T speed grade numbering is out of order
+ * to align with the AM625 more uniformly. I promise I know
+ * my ABCs ;)
+ */
+ AM62A7_EFUSE_V_MPU_OPP,
+ AM62A7_EFUSE_U_MPU_OPP,
+ AM62A7_EFUSE_T_MPU_OPP,
+};
+
+#define AM62A7_SUPPORT_N_MPU_OPP BIT(0)
+#define AM62A7_SUPPORT_R_MPU_OPP BIT(1)
+#define AM62A7_SUPPORT_V_MPU_OPP BIT(2)
+
+#define AM62P5_EFUSE_O_MPU_OPP 15
+#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_U_MPU_OPP 21
+
+#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
+#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
@@ -112,6 +141,49 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
return BIT(efuse);
}
+static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
+
+ switch (efuse) {
+ case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
+ fallthrough;
+ case AM62P5_EFUSE_O_MPU_OPP:
+ calculated_efuse |= AM62P5_SUPPORT_O_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
+static unsigned long am62a7_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM62A7_SUPPORT_N_MPU_OPP;
+
+ switch (efuse) {
+ case AM62A7_EFUSE_V_MPU_OPP:
+ case AM62A7_EFUSE_U_MPU_OPP:
+ case AM62A7_EFUSE_T_MPU_OPP:
+ case AM62A7_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_V_MPU_OPP;
+ fallthrough;
+ case AM62A7_EFUSE_R_MPU_OPP:
+ case AM62A7_EFUSE_Q_MPU_OPP:
+ case AM62A7_EFUSE_P_MPU_OPP:
+ case AM62A7_EFUSE_O_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_R_MPU_OPP;
+ fallthrough;
+ case AM62A7_EFUSE_N_MPU_OPP:
+ case AM62A7_EFUSE_M_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_N_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long efuse)
{
@@ -234,6 +306,24 @@ static struct ti_cpufreq_soc_data am625_soc_data = {
.multi_regulator = false,
};
+static struct ti_cpufreq_soc_data am62a7_soc_data = {
+ .efuse_xlate = am62a7_efuse_xlate,
+ .efuse_offset = 0x0,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
+ .rev_offset = 0x0014,
+ .multi_regulator = false,
+};
+
+static struct ti_cpufreq_soc_data am62p5_soc_data = {
+ .efuse_xlate = am62p5_efuse_xlate,
+ .efuse_offset = 0x0,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
+ .rev_offset = 0x0014,
+ .multi_regulator = false,
+};
+
/**
* ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
* @opp_data: pointer to ti_cpufreq_data context
@@ -337,8 +427,8 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
- { .compatible = "ti,am62a7", .data = &am625_soc_data, },
- { .compatible = "ti,am62p5", .data = &am625_soc_data, },
+ { .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
@@ -417,7 +507,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
if (ret < 0) {
- dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n");
goto fail_put_node;
}
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 9ac4ea50b874..3fadf536c429 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -447,7 +447,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
-static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
+static void ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
@@ -455,11 +455,10 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu);
- return -ENODEV;
+ return;
}
put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
- return 0;
}
static struct cpufreq_driver ve_spc_cpufreq_driver = {
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index d8515d5c0853..bcd03e893a0a 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -141,5 +141,6 @@ static void __exit haltpoll_exit(void)
module_init(haltpoll_init);
module_exit(haltpoll_exit);
+MODULE_DESCRIPTION("cpuidle driver for haltpoll governor");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marcelo Tosatti <mtosatti@redhat.com>");
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b96e3da0fedd..f3c9d49f0f2a 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -14,8 +14,6 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
-#include <linux/sched.h>
-#include <linux/sched/loadavg.h>
#include <linux/sched/stat.h>
#include <linux/math64.h>
@@ -95,16 +93,11 @@
* state, and thus the less likely a busy CPU will hit such a deep
* C state.
*
- * Two factors are used in determing this multiplier:
- * a value of 10 is added for each point of "per cpu load average" we have.
- * a value of 5 points is added for each process that is waiting for
- * IO on this CPU.
- * (these values are experimentally determined)
- *
- * The load average factor gives a longer term (few seconds) input to the
- * decision, while the iowait value gives a cpu local instantanious input.
- * The iowait factor may look low, but realize that this is also already
- * represented in the system load average.
+ * Currently there is only one value determining the factor:
+ * 10 points are added for each process that is waiting for IO on this CPU.
+ * (This value was experimentally determined.)
+ * Utilization is no longer a factor as it was shown that it never contributed
+ * significantly to the performance multiplier in the first place.
*
*/
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 7244f71c59c5..f2992f92d8db 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -2,13 +2,8 @@
/*
* Timer events oriented CPU idle governor
*
- * TEO governor:
* Copyright (C) 2018 - 2021 Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * Util-awareness mechanism:
- * Copyright (C) 2022 Arm Ltd.
- * Author: Kajetan Puchalski <kajetan.puchalski@arm.com>
*/
/**
@@ -59,16 +54,12 @@
* shallower than the one whose bin is fallen into by the sleep length (these
* situations are referred to as "intercepts" below).
*
- * In addition to the metrics described above, the governor counts recent
- * intercepts (that is, intercepts that have occurred during the last
- * %NR_RECENT invocations of it for the given CPU) for each bin.
- *
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
* 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 3 sums as
+ * the current sleep length (the candidate idle state) and compute 2 sums as
* follows:
*
* - The sum of the "hits" and "intercepts" metrics for the candidate state
@@ -81,20 +72,15 @@
* idle long enough to avoid being intercepted if the sleep length had been
* equal to the current one).
*
- * - The sum of the numbers of recent intercepts for all of the idle states
- * shallower than the candidate one.
- *
- * 2. If the second sum is greater than the first one or the third sum is
- * greater than %NR_RECENT / 2, the CPU is likely to wake up early, so look
- * for an alternative idle state to select.
+ * 2. If the second sum is greater than the first one the CPU is likely to wake
+ * up early, so look for an alternative idle state to select.
*
* - Traverse the idle states shallower than the candidate one in the
* descending order.
*
- * - For each of them compute the sum of the "intercepts" metrics and the sum
- * of the numbers of recent intercepts over all of the idle states between
- * it and the candidate one (including the former and excluding the
- * latter).
+ * - For each of them compute the sum of the "intercepts" metrics over all
+ * of the idle states between it and the candidate one (including the
+ * former and excluding the latter).
*
* - If each of these sums that needs to be taken into account (because the
* check related to it has indicated that the CPU is likely to wake up
@@ -104,79 +90,31 @@
* select the given idle state instead of the candidate one.
*
* 3. By default, select the candidate state.
- *
- * Util-awareness mechanism:
- *
- * The idea behind the util-awareness extension is that there are two distinct
- * scenarios for the CPU which should result in two different approaches to idle
- * state selection - utilized and not utilized.
- *
- * In this case, 'utilized' means that the average runqueue util of the CPU is
- * above a certain threshold.
- *
- * When the CPU is utilized while going into idle, more likely than not it will
- * be woken up to do more work soon and so a shallower idle state should be
- * selected to minimise latency and maximise performance. When the CPU is not
- * being utilized, the usual metrics-based approach to selecting the deepest
- * available idle state should be preferred to take advantage of the power
- * saving.
- *
- * In order to achieve this, the governor uses a utilization threshold.
- * The threshold is computed per-CPU as a percentage of the CPU's capacity
- * by bit shifting the capacity value. Based on testing, the shift of 6 (~1.56%)
- * seems to be getting the best results.
- *
- * Before selecting the next idle state, the governor compares the current CPU
- * util to the precomputed util threshold. If it's below, it defaults to the
- * TEO metrics mechanism. If it's above, the closest shallower idle state will
- * be selected instead, as long as is not a polling state.
*/
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/sched/clock.h>
-#include <linux/sched/topology.h>
#include <linux/tick.h>
#include "gov.h"
/*
- * The number of bits to shift the CPU's capacity by in order to determine
- * the utilized threshold.
- *
- * 6 was chosen based on testing as the number that achieved the best balance
- * of power and performance on average.
- *
- * The resulting threshold is high enough to not be triggered by background
- * noise and low enough to react quickly when activity starts to ramp up.
- */
-#define UTIL_THRESHOLD_SHIFT 6
-
-/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
#define PULSE 1024
#define DECAY_SHIFT 3
-/*
- * Number of the most recent idle duration values to take into consideration for
- * the detection of recent early wakeup patterns.
- */
-#define NR_RECENT 9
-
/**
* struct teo_bin - Metrics used by the TEO cpuidle governor.
* @intercepts: The "intercepts" metric.
* @hits: The "hits" metric.
- * @recent: The number of recent "intercepts".
*/
struct teo_bin {
unsigned int intercepts;
unsigned int hits;
- unsigned int recent;
};
/**
@@ -185,42 +123,19 @@ struct teo_bin {
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
- * @next_recent_idx: Index of the next @recent_idx entry to update.
- * @recent_idx: Indices of bins corresponding to recent "intercepts".
* @tick_hits: Number of "hits" after TICK_NSEC.
- * @util_threshold: Threshold above which the CPU is considered utilized
*/
struct teo_cpu {
s64 time_span_ns;
s64 sleep_length_ns;
struct teo_bin state_bins[CPUIDLE_STATE_MAX];
unsigned int total;
- int next_recent_idx;
- int recent_idx[NR_RECENT];
unsigned int tick_hits;
- unsigned long util_threshold;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
/**
- * teo_cpu_is_utilized - Check if the CPU's util is above the threshold
- * @cpu: Target CPU
- * @cpu_data: Governor CPU data for the target CPU
- */
-#ifdef CONFIG_SMP
-static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
-{
- return sched_cpu_util(cpu) > cpu_data->util_threshold;
-}
-#else
-static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
-{
- return false;
-}
-#endif
-
-/**
* teo_update - Update CPU metrics after wakeup.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
@@ -286,13 +201,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
- i = cpu_data->next_recent_idx++;
- if (cpu_data->next_recent_idx >= NR_RECENT)
- cpu_data->next_recent_idx = 0;
-
- if (cpu_data->recent_idx[i] >= 0)
- cpu_data->state_bins[cpu_data->recent_idx[i]].recent--;
-
/*
* If the deepest state's target residency is below the tick length,
* make a record of it to help teo_select() decide whether or not
@@ -319,14 +227,10 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* Otherwise, update the "intercepts" metric for the bin fallen into by
* the measured idle duration.
*/
- if (idx_timer == idx_duration) {
+ if (idx_timer == idx_duration)
cpu_data->state_bins[idx_timer].hits += PULSE;
- cpu_data->recent_idx[i] = -1;
- } else {
+ else
cpu_data->state_bins[idx_duration].intercepts += PULSE;
- cpu_data->state_bins[idx_duration].recent++;
- cpu_data->recent_idx[i] = idx_duration;
- }
end:
cpu_data->total += PULSE;
@@ -379,14 +283,11 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
unsigned int tick_intercept_sum = 0;
unsigned int idx_intercept_sum = 0;
unsigned int intercept_sum = 0;
- unsigned int idx_recent_sum = 0;
- unsigned int recent_sum = 0;
unsigned int idx_hit_sum = 0;
unsigned int hit_sum = 0;
int constraint_idx = 0;
int idx0 = 0, idx = -1;
- bool alt_intercepts, alt_recent;
- bool cpu_utilized;
+ int prev_intercept_idx;
s64 duration_ns;
int i;
@@ -411,32 +312,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (!dev->states_usage[0].disable)
idx = 0;
- cpu_utilized = teo_cpu_is_utilized(dev->cpu, cpu_data);
- /*
- * If the CPU is being utilized over the threshold and there are only 2
- * states to choose from, the metrics need not be considered, so choose
- * the shallowest non-polling state and exit.
- */
- if (drv->state_count < 3 && cpu_utilized) {
- /*
- * If state 0 is enabled and it is not a polling one, select it
- * right away unless the scheduler tick has been stopped, in
- * which case care needs to be taken to leave the CPU in a deep
- * enough state in case it is not woken up any time soon after
- * all. If state 1 is disabled, though, state 0 must be used
- * anyway.
- */
- if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
- teo_state_ok(0, drv)) || dev->states_usage[1].disable) {
- idx = 0;
- goto out_tick;
- }
- /* Assume that state 1 is not a polling one and use it. */
- idx = 1;
- duration_ns = drv->states[1].target_residency_ns;
- goto end;
- }
-
/* Compute the sums of metrics for early wakeup pattern detection. */
for (i = 1; i < drv->state_count; i++) {
struct teo_bin *prev_bin = &cpu_data->state_bins[i-1];
@@ -448,7 +323,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
intercept_sum += prev_bin->intercepts;
hit_sum += prev_bin->hits;
- recent_sum += prev_bin->recent;
if (dev->states_usage[i].disable)
continue;
@@ -464,7 +338,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/* Save the sums for the current state. */
idx_intercept_sum = intercept_sum;
idx_hit_sum = hit_sum;
- idx_recent_sum = recent_sum;
}
/* Avoid unnecessary overhead. */
@@ -489,37 +362,29 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* If the sum of the intercepts metric for all of the idle states
* shallower than the current candidate one (idx) is greater than the
* sum of the intercepts and hits metrics for the candidate state and
- * all of the deeper states, or the sum of the numbers of recent
- * intercepts over all of the states shallower than the candidate one
- * is greater than a half of the number of recent events taken into
- * account, a shallower idle state is likely to be a better choice.
+ * all of the deeper states a shallower idle state is likely to be a
+ * better choice.
*/
- alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum;
- alt_recent = idx_recent_sum > NR_RECENT / 2;
- if (alt_recent || alt_intercepts) {
+ prev_intercept_idx = idx;
+ if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) {
int first_suitable_idx = idx;
/*
* Look for the deepest idle state whose target residency had
* not exceeded the idle duration in over a half of the relevant
- * cases (both with respect to intercepts overall and with
- * respect to the recent intercepts only) in the past.
+ * cases in the past.
*
* Take the possible duration limitation present if the tick
* has been stopped already into account.
*/
intercept_sum = 0;
- recent_sum = 0;
for (i = idx - 1; i >= 0; i--) {
struct teo_bin *bin = &cpu_data->state_bins[i];
intercept_sum += bin->intercepts;
- recent_sum += bin->recent;
- if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
- (!alt_intercepts ||
- 2 * intercept_sum > idx_intercept_sum)) {
+ if (2 * intercept_sum > idx_intercept_sum) {
/*
* Use the current state unless it is too
* shallow or disabled, in which case take the
@@ -552,6 +417,15 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
first_suitable_idx = i;
}
}
+ if (!idx && prev_intercept_idx) {
+ /*
+ * We have to query the sleep length here otherwise we don't
+ * know after wakeup if our guess was correct.
+ */
+ duration_ns = tick_nohz_get_sleep_length(&delta_tick);
+ cpu_data->sleep_length_ns = duration_ns;
+ goto out_tick;
+ }
/*
* If there is a latency constraint, it may be necessary to select an
@@ -561,18 +435,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = constraint_idx;
/*
- * If the CPU is being utilized over the threshold, choose a shallower
- * non-polling state to improve latency, unless the scheduler tick has
- * been stopped already and the shallower state's target residency is
- * not sufficiently large.
- */
- if (cpu_utilized) {
- i = teo_find_shallower_state(drv, dev, idx, KTIME_MAX, true);
- if (teo_state_ok(i, drv))
- idx = i;
- }
-
- /*
* Skip the timers check if state 0 is the current candidate one,
* because an immediate non-timer wakeup is expected in that case.
*/
@@ -592,7 +454,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
cpu_data->sleep_length_ns = duration_ns;
/*
- * If the closest expected timer is before the terget residency of the
+ * If the closest expected timer is before the target residency of the
* candidate state, a shallower one needs to be found.
*/
if (drv->states[idx].target_residency_ns > duration_ns) {
@@ -667,14 +529,8 @@ static int teo_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- unsigned long max_capacity = arch_scale_cpu_capacity(dev->cpu);
- int i;
memset(cpu_data, 0, sizeof(*cpu_data));
- cpu_data->util_threshold = max_capacity >> UTIL_THRESHOLD_SHIFT;
-
- for (i = 0; i < NR_RECENT; i++)
- cpu_data->recent_idx[i] = -1;
return 0;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index e486027f8b07..9aab7abc2ae9 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1494,53 +1494,53 @@ static const struct idle_cpu idle_cpu_srf __initconst = {
};
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_G, &idle_cpu_nehalem),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &idle_cpu_nehalem),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &idle_cpu_nhx),
- X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &idle_cpu_nhx),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL, &idle_cpu_atom),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL_MID, &idle_cpu_lincroft),
- X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &idle_cpu_nhx),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &idle_cpu_snb),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &idle_cpu_snx),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL, &idle_cpu_atom),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &idle_cpu_byt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &idle_cpu_tangier),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &idle_cpu_cht),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &idle_cpu_ivb),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &idle_cpu_ivt),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &idle_cpu_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &idle_cpu_hsx),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &idle_cpu_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &idle_cpu_hsw),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &idle_cpu_avn),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &idle_cpu_bdw),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &idle_cpu_bdw),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &idle_cpu_bdx),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &idle_cpu_bdx),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &idle_cpu_skl),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &idle_cpu_skl),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &idle_cpu_mtl_l),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf),
+ X86_MATCH_VFM(INTEL_NEHALEM_EP, &idle_cpu_nhx),
+ X86_MATCH_VFM(INTEL_NEHALEM, &idle_cpu_nehalem),
+ X86_MATCH_VFM(INTEL_NEHALEM_G, &idle_cpu_nehalem),
+ X86_MATCH_VFM(INTEL_WESTMERE, &idle_cpu_nehalem),
+ X86_MATCH_VFM(INTEL_WESTMERE_EP, &idle_cpu_nhx),
+ X86_MATCH_VFM(INTEL_NEHALEM_EX, &idle_cpu_nhx),
+ X86_MATCH_VFM(INTEL_ATOM_BONNELL, &idle_cpu_atom),
+ X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &idle_cpu_lincroft),
+ X86_MATCH_VFM(INTEL_WESTMERE_EX, &idle_cpu_nhx),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE, &idle_cpu_snb),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &idle_cpu_snx),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &idle_cpu_atom),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &idle_cpu_byt),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &idle_cpu_tangier),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &idle_cpu_cht),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE, &idle_cpu_ivb),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &idle_cpu_ivt),
+ X86_MATCH_VFM(INTEL_HASWELL, &idle_cpu_hsw),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &idle_cpu_hsx),
+ X86_MATCH_VFM(INTEL_HASWELL_L, &idle_cpu_hsw),
+ X86_MATCH_VFM(INTEL_HASWELL_G, &idle_cpu_hsw),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &idle_cpu_avn),
+ X86_MATCH_VFM(INTEL_BROADWELL, &idle_cpu_bdw),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, &idle_cpu_bdw),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &idle_cpu_bdx),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &idle_cpu_bdx),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &idle_cpu_skl),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &idle_cpu_skl),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &idle_cpu_skl),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &idle_cpu_skl),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &idle_cpu_skx),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &idle_cpu_icx),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &idle_cpu_icx),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &idle_cpu_adl),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &idle_cpu_adl_l),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &idle_cpu_mtl_l),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &idle_cpu_dnv),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
{}
};
@@ -1990,27 +1990,27 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
{
int cstate;
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_IVYBRIDGE_X:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_IVYBRIDGE_X:
ivt_idle_state_table_update();
break;
- case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_ATOM_GOLDMONT:
+ case INTEL_ATOM_GOLDMONT_PLUS:
bxt_idle_state_table_update();
break;
- case INTEL_FAM6_SKYLAKE:
+ case INTEL_SKYLAKE:
sklh_idle_state_table_update();
break;
- case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_SKYLAKE_X:
skx_idle_state_table_update();
break;
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
- case INTEL_FAM6_EMERALDRAPIDS_X:
+ case INTEL_SAPPHIRERAPIDS_X:
+ case INTEL_EMERALDRAPIDS_X:
spr_idle_state_table_update();
break;
- case INTEL_FAM6_ALDERLAKE:
- case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_ATOM_GRACEMONT:
+ case INTEL_ALDERLAKE:
+ case INTEL_ALDERLAKE_L:
+ case INTEL_ATOM_GRACEMONT:
adl_idle_state_table_update();
break;
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index cb4611fe1b5b..5f4598246a87 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1102,8 +1102,7 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
return 0;
}
-static int _set_opp_level(struct device *dev, struct opp_table *opp_table,
- struct dev_pm_opp *opp)
+static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp)
{
unsigned int level = 0;
int ret = 0;
@@ -1171,7 +1170,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
if (opp_table->regulators)
regulator_disable(opp_table->regulators[0]);
- ret = _set_opp_level(dev, opp_table, NULL);
+ ret = _set_opp_level(dev, NULL);
if (ret)
goto out;
@@ -1220,7 +1219,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
return ret;
}
- ret = _set_opp_level(dev, opp_table, opp);
+ ret = _set_opp_level(dev, opp);
if (ret)
return ret;
@@ -1267,7 +1266,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
return ret;
}
- ret = _set_opp_level(dev, opp_table, opp);
+ ret = _set_opp_level(dev, opp);
if (ret)
return ret;
@@ -2443,8 +2442,10 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
* Cross check it again and fix if required.
*/
gdev = dev_to_genpd_dev(virt_dev);
- if (IS_ERR(gdev))
- return PTR_ERR(gdev);
+ if (IS_ERR(gdev)) {
+ ret = PTR_ERR(gdev);
+ goto err;
+ }
genpd_table = _find_opp_table(gdev);
if (!IS_ERR(genpd_table)) {
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 282eb5966fd0..55c8cfef97d4 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -1444,6 +1444,38 @@ put_required_np:
EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
/**
+ * dev_pm_opp_of_has_required_opp - Find out if a required-opps exists.
+ * @dev: The device to investigate.
+ *
+ * Returns true if the device's node has a "operating-points-v2" property and if
+ * the corresponding node for the opp-table describes opp nodes that uses the
+ * "required-opps" property.
+ *
+ * Return: True if a required-opps is present, else false.
+ */
+bool dev_pm_opp_of_has_required_opp(struct device *dev)
+{
+ struct device_node *opp_np, *np;
+ int count;
+
+ opp_np = _opp_of_get_opp_desc_node(dev->of_node, 0);
+ if (!opp_np)
+ return false;
+
+ np = of_get_next_available_child(opp_np, NULL);
+ of_node_put(opp_np);
+ if (!np) {
+ dev_warn(dev, "Empty OPP table\n");
+ return false;
+ }
+
+ count = of_count_phandle_with_args(np, "required-opps", NULL);
+ of_node_put(np);
+
+ return count > 0;
+}
+
+/**
* dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
* @opp: opp for which DT node has to be returned for
*
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index e3b97cd1fbbf..ec0056a4bb13 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -393,10 +393,12 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
}
ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators);
- if (ret < 0)
+ if (ret < 0) {
_free_optimized_voltages(dev, &opp_data);
+ return ret;
+ }
- return ret;
+ return 0;
}
static struct platform_driver ti_opp_supply_driver = {
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index e18a2cc4e46a..bafc59904ed3 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -127,7 +127,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
struct idle_inject_device *ii_dev =
container_of(timer, struct idle_inject_device, timer);
- if (!ii_dev->update || (ii_dev->update && ii_dev->update()))
+ if (!ii_dev->update || ii_dev->update())
idle_inject_wakeup(ii_dev);
duration_us = READ_ONCE(ii_dev->run_duration_us);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index aac0744011a3..3cffa6c79538 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1222,66 +1222,66 @@ static const struct rapl_defaults rapl_defaults_amd = {
};
static const struct x86_cpu_id rapl_ids[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &rapl_defaults_core),
-
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &rapl_defaults_core),
-
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &rapl_defaults_hsw_server),
-
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &rapl_defaults_hsw_server),
-
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &rapl_defaults_hsw_server),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &rapl_defaults_hsw_server),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &rapl_defaults_hsw_server),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &rapl_defaults_spr_server),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
-
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &rapl_defaults_tng),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &rapl_defaults_ann),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &rapl_defaults_core),
-
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &rapl_defaults_hsw_server),
- X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &rapl_defaults_core),
+
+ X86_MATCH_VFM(INTEL_IVYBRIDGE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &rapl_defaults_core),
+
+ X86_MATCH_VFM(INTEL_HASWELL, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_HASWELL_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_HASWELL_G, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_HASWELL_X, &rapl_defaults_hsw_server),
+
+ X86_MATCH_VFM(INTEL_BROADWELL, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, &rapl_defaults_hsw_server),
+
+ X86_MATCH_VFM(INTEL_SKYLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ICELAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_METEORLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &rapl_defaults_spr_server),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_LAKEFIELD, &rapl_defaults_core),
+
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &rapl_defaults_cht),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &rapl_defaults_tng),
+ X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &rapl_defaults_ann),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &rapl_defaults_core),
+
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &rapl_defaults_hsw_server),
+ X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &rapl_defaults_hsw_server),
X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd),
X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd),
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index 35cb152fa9aa..733a36f67fbc 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -139,14 +139,14 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
/* List of verified CPUs. */
static const struct x86_cpu_id pl4_support_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
{}
};
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 20f7e98ee8af..d4d2f4d1d7cb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -396,7 +396,7 @@ struct cpufreq_driver {
int (*online)(struct cpufreq_policy *policy);
int (*offline)(struct cpufreq_policy *policy);
- int (*exit)(struct cpufreq_policy *policy);
+ void (*exit)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
@@ -785,7 +785,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#ifdef CONFIG_CPU_FREQ
int cpufreq_boost_trigger_state(int state);
-int cpufreq_boost_enabled(void);
+bool cpufreq_boost_enabled(void);
int cpufreq_enable_boost_support(void);
bool policy_has_boost_freq(struct cpufreq_policy *policy);
@@ -1164,9 +1164,9 @@ static inline int cpufreq_boost_trigger_state(int state)
{
return 0;
}
-static inline int cpufreq_boost_enabled(void)
+static inline bool cpufreq_boost_enabled(void)
{
- return 0;
+ return false;
}
static inline int cpufreq_enable_boost_support(void)
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index dd7c8441af42..6424692c30b7 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -474,6 +474,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
+bool dev_pm_opp_of_has_required_opp(struct device *dev);
int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
@@ -552,6 +553,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np,
return -EOPNOTSUPP;
}
+static inline bool dev_pm_opp_of_has_required_opp(struct device *dev)
+{
+ return false;
+}
+
static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
{
return -EOPNOTSUPP;
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index b53753dee02f..6c02f401069e 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -67,6 +67,7 @@ LANGUAGES = de fr it cs pt ka
bindir ?= /usr/bin
sbindir ?= /usr/sbin
mandir ?= /usr/man
+libdir ?= /usr/lib
includedir ?= /usr/include
localedir ?= /usr/share/locale
docdir ?= /usr/share/doc/packages/cpupower
@@ -94,15 +95,6 @@ RANLIB = $(CROSS)ranlib
HOSTCC = gcc
MKDIR = mkdir
-# 64bit library detection
-include ../../scripts/Makefile.arch
-
-ifeq ($(IS_64_BIT), 1)
-libdir ?= /usr/lib64
-else
-libdir ?= /usr/lib
-endif
-
# Now we set up the build system
#
@@ -332,4 +324,39 @@ uninstall:
rm -f $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
done;
-.PHONY: all utils libcpupower update-po create-gmo install-lib install-tools install-man install-gmo install uninstall clean
+help:
+ @echo 'Building targets:'
+ @echo ' all - Default target. Could be omitted. Put build artifacts'
+ @echo ' to "O" cmdline option dir (default: current dir)'
+ @echo ' install - Install previously built project files from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or'
+ @echo ' Makefile config block option (default: "")'
+ @echo ' install-lib - Install previously built library binary from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' and library headers from "lib/" for userspace to the install'
+ @echo ' dir defined by "DESTDIR" cmdline (default: "")'
+ @echo ' install-tools - Install previously built "cpupower" util from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir) and'
+ @echo ' "cpupower-completion.sh" script from the src dir to the'
+ @echo ' install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-man - Install man pages from the "man" src subdir to the'
+ @echo ' install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-gmo - Install previously built language files from the output'
+ @echo ' dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ' install-bench - Install previously built "cpufreq-bench" util files from the'
+ @echo ' output dir defined by "O" cmdline option (default: current dir)'
+ @echo ' to the install dir defined by "DESTDIR" cmdline or Makefile'
+ @echo ' config block option (default: "")'
+ @echo ''
+ @echo 'Cleaning targets:'
+ @echo ' clean - Clean build artifacts from the dir defined by "O" cmdline'
+ @echo ' option (default: current dir)'
+ @echo ' uninstall - Remove previously installed files from the dir defined by "DESTDIR"'
+ @echo ' cmdline or Makefile config block option (default: "")'
+
+.PHONY: all utils libcpupower update-po create-gmo install-lib install-tools install-man install-gmo install uninstall clean help
diff --git a/tools/power/cpupower/README b/tools/power/cpupower/README
index 1c68f47663b2..2678ed81d311 100644
--- a/tools/power/cpupower/README
+++ b/tools/power/cpupower/README
@@ -22,16 +22,156 @@ interfaces [depending on configuration, see below].
compilation and installation
----------------------------
-make
-su
-make install
-
-should suffice on most systems. It builds libcpupower to put in
-/usr/lib; cpupower, cpufreq-bench_plot.sh to put in /usr/bin; and
-cpufreq-bench to put in /usr/sbin. If you want to set up the paths
-differently and/or want to configure the package to your specific
-needs, you need to open "Makefile" with an editor of your choice and
-edit the block marked CONFIGURATION.
+There are 2 output directories - one for the build output and another for
+the installation of the build results, that is the utility, library,
+man pages, etc...
+
+default directory
+-----------------
+
+In the case of default directory, build and install process requires no
+additional parameters:
+
+build
+-----
+
+$ make
+
+The output directory for the 'make' command is the current directory and
+its subdirs in the kernel tree:
+tools/power/cpupower
+
+install
+-------
+
+$ sudo make install
+
+'make install' command puts targets to default system dirs:
+
+-----------------------------------------------------------------------
+| Installing file | System dir |
+-----------------------------------------------------------------------
+| libcpupower | /usr/lib |
+-----------------------------------------------------------------------
+| cpupower | /usr/bin |
+-----------------------------------------------------------------------
+| cpufreq-bench_plot.sh | /usr/bin |
+-----------------------------------------------------------------------
+| man pages | /usr/man |
+-----------------------------------------------------------------------
+
+To put it in other words it makes build results available system-wide,
+enabling any user to simply start using it without any additional steps
+
+custom directory
+----------------
+
+There are 2 make's command-line variables 'O' and 'DESTDIR' that setup
+appropriate dirs:
+'O' - build directory
+'DESTDIR' - installation directory. This variable could also be setup in
+the 'CONFIGURATION' block of the "Makefile"
+
+build
+-----
+
+$ make O=<your_custom_build_catalog>
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build
+
+install
+-------
+
+$ make O=<your_custom_build_catalog> DESTDIR=<your_custom_install_catalog>
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build DESTDIR=/home/hedin/prj/cpupower \
+> install
+
+Notice that both variables 'O' and 'DESTDIR' have been provided. The reason
+is that the build results are saved in the custom output dir defined by 'O'
+variable. So, this dir is the source for the installation step. If only
+'DESTDIR' were provided then the 'install' target would assume that the
+build directory is the current one, build everything there and install
+from the current dir.
+
+The files will be installed to the following dirs:
+
+-----------------------------------------------------------------------
+| Installing file | System dir |
+-----------------------------------------------------------------------
+| libcpupower | ${DESTDIR}/usr/lib |
+-----------------------------------------------------------------------
+| cpupower | ${DESTDIR}/usr/bin |
+-----------------------------------------------------------------------
+| cpufreq-bench_plot.sh | ${DESTDIR}/usr/bin |
+-----------------------------------------------------------------------
+| man pages | ${DESTDIR}/usr/man |
+-----------------------------------------------------------------------
+
+If you look at the table for the default 'make' output dirs you will
+notice that the only difference with the non-default case is the
+${DESTDIR} prefix. So, the structure of the output dirs remains the same
+regardles of the root output directory.
+
+
+clean and uninstall
+-------------------
+
+'clean' target is intended for cleanup the build catalog from build results
+'uninstall' target is intended for removing installed files from the
+installation directory
+
+default directory
+-----------------
+
+This case is a straightforward one:
+$ make clean
+$ make uninstall
+
+custom directory
+----------------
+
+Use 'O' command line variable to remove previously built files from the
+build dir:
+$ make O=<your_custom_build_catalog> clean
+
+Example:
+$ make O=/home/hedin/prj/cpupower/build clean
+
+Use 'DESTDIR' command line variable to uninstall previously installed files
+from the given dir:
+$ make DESTDIR=<your_custom_install_catalog>
+
+Example:
+make DESTDIR=/home/hedin/prj/cpupower uninstall
+
+
+running the tool
+----------------
+
+default directory
+-----------------
+
+$ sudo cpupower
+
+custom directory
+----------------
+
+When it comes to run the utility from the custom build catalog things
+become a little bit complicated as 'just run' approach doesn't work.
+Assuming that the current dir is '<your_custom_install_catalog>/usr',
+issuing the following command:
+
+$ sudo ./bin/cpupower
+will produce the following error output:
+./bin/cpupower: error while loading shared libraries: libcpupower.so.1:
+cannot open shared object file: No such file or directory
+
+The issue is that binary cannot find the 'libcpupower' library. So, we
+shall point to the lib dir:
+sudo LD_LIBRARY_PATH=lib64/ ./bin/cpupower
THANKS
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index a4b902f9e1c4..34e5894476eb 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -1,4 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+ifeq ($(MAKELEVEL),0)
+$(error This Makefile is not intended to be run standalone, but only as a part \
+of the main one in the parent dir)
+endif
+
OUTPUT := ./
ifeq ("$(origin O)", "command line")
ifneq ($(O),)
diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1
index 8ee737eefa5c..89af019f8dc4 100644
--- a/tools/power/cpupower/man/cpupower-monitor.1
+++ b/tools/power/cpupower/man/cpupower-monitor.1
@@ -81,11 +81,6 @@ Measure idle and frequency characteristics of an arbitrary command/workload.
The executable \fBcommand\fP is forked and upon its exit, statistics gathered since it was
forked are displayed.
.RE
-.PP
-\-v
-.RS 4
-Increase verbosity if the binary was compiled with the DEBUG option set.
-.RE
.SH MONITOR DESCRIPTIONS
.SS "Idle_Stats"
@@ -172,9 +167,11 @@ displayed.
"BIOS and Kernel Developer’s Guide (BKDG) for AMD Family 14h Processors"
https://support.amd.com/us/Processor_TechDocs/43170.pdf
-"Intel® Turbo Boost Technology
-in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
-http://download.intel.com/design/processor/applnots/320354.pdf
+"What Is Intel® Turbo Boost Technology?"
+https://www.intel.com/content/www/us/en/gaming/resources/turbo-boost.html
+
+"Power Management - Technology Overview"
+https://cdrdv2.intel.com/v1/dl/getContent/637748
"Intel® 64 and IA-32 Architectures Software Developer's Manual
Volume 3B: System Programming Guide"
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index 075e766ff1f3..f746099b5dac 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -35,7 +35,7 @@ static unsigned int avail_monitors;
static char *progname;
enum operation_mode_e { list = 1, show, show_all };
-static int mode;
+static enum operation_mode_e mode;
static int interval = 1;
static char *show_monitors_param;
static struct cpupower_topology cpu_top;
diff --git a/tools/power/pm-graph/bootgraph.py b/tools/power/pm-graph/bootgraph.py
index f96f50e0c336..8a3ef94fe88f 100755
--- a/tools/power/pm-graph/bootgraph.py
+++ b/tools/power/pm-graph/bootgraph.py
@@ -77,12 +77,12 @@ class SystemValues(aslib.SystemValues):
fp.close()
self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S')
def kernelVersion(self, msg):
- m = re.match('^[Ll]inux *[Vv]ersion *(?P<v>\S*) .*', msg)
+ m = re.match(r'^[Ll]inux *[Vv]ersion *(?P<v>\S*) .*', msg)
if m:
return m.group('v')
return 'unknown'
def checkFtraceKernelVersion(self):
- m = re.match('^(?P<x>[0-9]*)\.(?P<y>[0-9]*)\.(?P<z>[0-9]*).*', self.kernel)
+ m = re.match(r'^(?P<x>[0-9]*)\.(?P<y>[0-9]*)\.(?P<z>[0-9]*).*', self.kernel)
if m:
val = tuple(map(int, m.groups()))
if val >= (4, 10, 0):
@@ -324,7 +324,7 @@ def parseKernelLog():
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
@@ -332,24 +332,24 @@ def parseKernelLog():
break
msg = m.group('msg')
data.dmesgtext.append(line)
- if(ktime == 0.0 and re.match('^Linux version .*', msg)):
+ if(ktime == 0.0 and re.match(r'^Linux version .*', msg)):
if(not sysvals.stamp['kernel']):
sysvals.stamp['kernel'] = sysvals.kernelVersion(msg)
continue
- m = re.match('.* setting system clock to (?P<d>[0-9\-]*)[ A-Z](?P<t>[0-9:]*) UTC.*', msg)
+ m = re.match(r'.* setting system clock to (?P<d>[0-9\-]*)[ A-Z](?P<t>[0-9:]*) UTC.*', msg)
if(m):
bt = datetime.strptime(m.group('d')+' '+m.group('t'), '%Y-%m-%d %H:%M:%S')
bt = bt - timedelta(seconds=int(ktime))
data.boottime = bt.strftime('%Y-%m-%d_%H:%M:%S')
sysvals.stamp['time'] = bt.strftime('%B %d %Y, %I:%M:%S %p')
continue
- m = re.match('^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
+ m = re.match(r'^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
if(m):
func = m.group('f')
pid = int(m.group('p'))
devtemp[func] = (ktime, pid)
continue
- m = re.match('^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
+ m = re.match(r'^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
if(m):
data.valid = True
data.end = ktime
@@ -359,7 +359,7 @@ def parseKernelLog():
data.newAction(phase, f, pid, start, ktime, int(r), int(t))
del devtemp[f]
continue
- if(re.match('^Freeing unused kernel .*', msg)):
+ if(re.match(r'^Freeing unused kernel .*', msg)):
data.tUserMode = ktime
data.dmesg['kernel']['end'] = ktime
data.dmesg['user']['start'] = ktime
diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
index 40ad221e8881..ef87e63c05c7 100755
--- a/tools/power/pm-graph/sleepgraph.py
+++ b/tools/power/pm-graph/sleepgraph.py
@@ -86,7 +86,7 @@ def ascii(text):
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
- version = '5.11'
+ version = '5.12'
ansi = False
rs = 0
display = ''
@@ -420,11 +420,11 @@ class SystemValues:
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
- m = re.match('(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
+ m = re.match(r'(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
- m = re.match('(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
+ m = re.match(r'(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
@@ -464,15 +464,15 @@ class SystemValues:
if os.path.exists('/proc/cpuinfo'):
with open('/proc/cpuinfo', 'r') as fp:
for line in fp:
- if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
+ if re.match(r'^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
if os.path.exists('/proc/meminfo'):
with open('/proc/meminfo', 'r') as fp:
for line in fp:
- m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
+ m = re.match(r'^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
- m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
+ m = re.match(r'^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memfree = int(m.group('sz'))
if os.path.exists('/etc/os-release'):
@@ -539,7 +539,7 @@ class SystemValues:
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
break
@@ -553,7 +553,7 @@ class SystemValues:
idx = line.find('[')
if idx > 1:
line = line[idx:]
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
@@ -636,11 +636,11 @@ class SystemValues:
# now process the args
for arg in sorted(args):
arglist[arg] = ''
- m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
+ m = re.match(r'.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
- m = re.match('.* '+arg+'=(?P<arg>.*)', data);
+ m = re.match(r'.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
@@ -989,7 +989,7 @@ class SystemValues:
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
- m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
+ m = re.match(r'.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
@@ -999,7 +999,7 @@ class SystemValues:
# now get the syspath for each target device
for dirname, dirnames, filenames in os.walk('/sys/devices'):
- if(re.match('.*/power', dirname) and 'async' in filenames):
+ if(re.match(r'.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
@@ -1143,12 +1143,12 @@ class SystemValues:
elif value and os.path.exists(file):
fp = open(file, 'r+')
if fmt == 'radio':
- m = re.match('.*\[(?P<v>.*)\].*', fp.read())
+ m = re.match(r'.*\[(?P<v>.*)\].*', fp.read())
if m:
self.cfgdef[file] = m.group('v')
elif fmt == 'acpi':
line = fp.read().strip().split('\n')[-1]
- m = re.match('.* (?P<v>[0-9A-Fx]*) .*', line)
+ m = re.match(r'.* (?P<v>[0-9A-Fx]*) .*', line)
if m:
self.cfgdef[file] = m.group('v')
else:
@@ -1173,7 +1173,7 @@ class SystemValues:
fp = Popen([cmd, '-v'], stdout=PIPE, stderr=PIPE).stderr
out = ascii(fp.read()).strip()
fp.close()
- if re.match('turbostat version .*', out):
+ if re.match(r'turbostat version .*', out):
self.vprint(out)
return True
return False
@@ -1181,33 +1181,33 @@ class SystemValues:
cmd = self.getExec('turbostat')
rawout = keyline = valline = ''
fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
- fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE).stderr
- for line in fp:
+ fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE)
+ for line in fp.stderr:
line = ascii(line)
rawout += line
if keyline and valline:
continue
- if re.match('(?i)Avg_MHz.*', line):
+ if re.match(r'(?i)Avg_MHz.*', line):
keyline = line.strip().split()
elif keyline:
valline = line.strip().split()
- fp.close()
+ fp.wait()
if not keyline or not valline or len(keyline) != len(valline):
errmsg = 'unrecognized turbostat output:\n'+rawout.strip()
self.vprint(errmsg)
if not self.verbose:
pprint(errmsg)
- return ''
+ return (fp.returncode, '')
if self.verbose:
pprint(rawout.strip())
out = []
for key in keyline:
idx = keyline.index(key)
val = valline[idx]
- if key == 'SYS%LPI' and not s0ixready and re.match('^[0\.]*$', val):
+ if key == 'SYS%LPI' and not s0ixready and re.match(r'^[0\.]*$', val):
continue
out.append('%s=%s' % (key, val))
- return '|'.join(out)
+ return (fp.returncode, '|'.join(out))
def netfixon(self, net='both'):
cmd = self.getExec('netfix')
if not cmd:
@@ -1232,7 +1232,7 @@ class SystemValues:
except:
return ''
for line in reversed(w.split('\n')):
- m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', line)
+ m = re.match(r' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', line)
if not m or (dev and dev != m.group('dev')):
continue
return m.group('dev')
@@ -1261,14 +1261,14 @@ class SystemValues:
return
arr = msg.split()
for j in range(len(arr)):
- if re.match('^[0-9,\-\.]*$', arr[j]):
- arr[j] = '[0-9,\-\.]*'
+ if re.match(r'^[0-9,\-\.]*$', arr[j]):
+ arr[j] = r'[0-9,\-\.]*'
else:
arr[j] = arr[j]\
- .replace('\\', '\\\\').replace(']', '\]').replace('[', '\[')\
- .replace('.', '\.').replace('+', '\+').replace('*', '\*')\
- .replace('(', '\(').replace(')', '\)').replace('}', '\}')\
- .replace('{', '\{')
+ .replace('\\', r'\\\\').replace(']', r'\]').replace('[', r'\[')\
+ .replace('.', r'\.').replace('+', r'\+').replace('*', r'\*')\
+ .replace('(', r'\(').replace(')', r'\)').replace('}', r'\}')\
+ .replace('{', r'\{')
mstr = ' *'.join(arr)
entry = {
'line': msg,
@@ -1340,7 +1340,7 @@ class SystemValues:
fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout
ret = 'unknown'
for line in fp:
- m = re.match('[\s]*Monitor is (?P<m>.*)', ascii(line))
+ m = re.match(r'[\s]*Monitor is (?P<m>.*)', ascii(line))
if(m and len(m.group('m')) >= 2):
out = m.group('m').lower()
ret = out[3:] if out[0:2] == 'in' else out
@@ -1566,7 +1566,7 @@ class Data:
i += 1
if tp.stampInfo(line, sysvals):
continue
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if not m:
continue
t = float(m.group('ktime'))
@@ -1574,7 +1574,7 @@ class Data:
continue
dir = 'suspend' if t < self.tSuspended else 'resume'
msg = m.group('msg')
- if re.match('capability: warning: .*', msg):
+ if re.match(r'capability: warning: .*', msg):
continue
for err in self.errlist:
if re.match(self.errlist[err], msg):
@@ -1679,8 +1679,8 @@ class Data:
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
- mc = re.match('\(.*\) *(?P<args>.*)', cdata)
- mr = re.match('\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
+ mc = re.match(r'\(.*\) *(?P<args>.*)', cdata)
+ mr = re.match(r'\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
if mc and mr:
c = mr.group('caller').split('+')[0]
a = mc.group('args').strip()
@@ -1997,7 +1997,7 @@ class Data:
list = self.dmesg[phase]['list']
mydev = ''
for devname in sorted(list):
- if name == devname or re.match('^%s\[(?P<num>[0-9]*)\]$' % name, devname):
+ if name == devname or re.match(r'^%s\[(?P<num>[0-9]*)\]$' % name, devname):
mydev = devname
if mydev:
return list[mydev]
@@ -2099,7 +2099,7 @@ class Data:
for dev in sorted(list):
pdev = list[dev]['par']
pid = list[dev]['pid']
- if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
+ if(pid < 0 or re.match(r'[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
@@ -2190,26 +2190,26 @@ class Data:
if 'resume_complete' in dm:
dm['resume_complete']['end'] = time
def initcall_debug_call(self, line, quick=False):
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- 'PM: *calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'PM: *calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- 'calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
- '(?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
+ r'(?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)', line)
if m:
return True if quick else m.group('t', 'f', 'n', 'p')
return False if quick else ('', '', '', '')
def initcall_debug_return(self, line, quick=False):
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: PM: '+\
- '.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: PM: '+\
+ r'.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
- '.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ r'.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
if not m:
- m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
- '(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', line)
+ m = re.match(r'.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
+ r'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', line)
if m:
return True if quick else m.group('t', 'f', 'dt')
return False if quick else ('', '', '')
@@ -2294,28 +2294,28 @@ class FTraceLine:
if not m and not d:
return
# is this a trace event
- if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
+ if(d == 'traceevent' or re.match(r'^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
- em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
+ em = re.match(r'^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
- emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
+ emm = re.match(r'^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
- km = re.match('^(?P<n>.*)_cal$', self.type)
+ km = re.match(r'^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
- km = re.match('^(?P<n>.*)_ret$', self.type)
+ km = re.match(r'^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
@@ -2327,7 +2327,7 @@ class FTraceLine:
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
- match = re.match('^(?P<d> *)(?P<o>.*)$', m)
+ match = re.match(r'^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
@@ -2337,7 +2337,7 @@ class FTraceLine:
self.freturn = True
if(len(m) > 1):
# includes comment with function name
- match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
+ match = re.match(r'^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
@@ -2345,13 +2345,13 @@ class FTraceLine:
self.fcall = True
# function call with children
if(m[-1] == '{'):
- match = re.match('^(?P<n>.*) *\(.*', m)
+ match = re.match(r'^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
- match = re.match('^(?P<n>.*) *\(.*', m)
+ match = re.match(r'^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
@@ -2385,7 +2385,7 @@ class FTraceLine:
return False
else:
if(self.type == 'suspend_resume' and
- re.match('suspend_enter\[.*\] begin', self.name)):
+ re.match(r'suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
@@ -2398,7 +2398,7 @@ class FTraceLine:
return False
else:
if(self.type == 'suspend_resume' and
- re.match('thaw_processes\[.*\] end', self.name)):
+ re.match(r'thaw_processes\[.*\] end', self.name)):
return True
return False
@@ -2976,30 +2976,30 @@ class Timeline:
# Description:
# A list of values describing the properties of these test runs
class TestProps:
- stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
- '(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
- ' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
- wififmt = '^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
- tstatfmt = '^# turbostat (?P<t>\S*)'
- testerrfmt = '^# enter_sleep_error (?P<e>.*)'
- sysinfofmt = '^# sysinfo .*'
- cmdlinefmt = '^# command \| (?P<cmd>.*)'
- kparamsfmt = '^# kparams \| (?P<kp>.*)'
- devpropfmt = '# Device Properties: .*'
- pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9,_]*): (?P<info>.*)'
- tracertypefmt = '# tracer: (?P<t>.*)'
- firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
- procexecfmt = 'ps - (?P<ps>.*)$'
- procmultifmt = '@(?P<n>[0-9]*)\|(?P<ps>.*)$'
+ stampfmt = r'# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
+ r'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
+ r' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
+ wififmt = r'^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
+ tstatfmt = r'^# turbostat (?P<t>\S*)'
+ testerrfmt = r'^# enter_sleep_error (?P<e>.*)'
+ sysinfofmt = r'^# sysinfo .*'
+ cmdlinefmt = r'^# command \| (?P<cmd>.*)'
+ kparamsfmt = r'^# kparams \| (?P<kp>.*)'
+ devpropfmt = r'# Device Properties: .*'
+ pinfofmt = r'# platform-(?P<val>[a-z,A-Z,0-9,_]*): (?P<info>.*)'
+ tracertypefmt = r'# tracer: (?P<t>.*)'
+ firmwarefmt = r'# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
+ procexecfmt = r'ps - (?P<ps>.*)$'
+ procmultifmt = r'@(?P<n>[0-9]*)\|(?P<ps>.*)$'
ftrace_line_fmt_fg = \
- '^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
- ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
- '[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
+ r'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
+ r' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
+ r'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
- ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
- '(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
- '(?P<msg>.*)'
- machinesuspend = 'machine_suspend\[.*'
+ r' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
+ r'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
+ r'(?P<msg>.*)'
+ machinesuspend = r'machine_suspend\[.*'
multiproclist = dict()
multiproctime = 0.0
multiproccnt = 0
@@ -3081,14 +3081,14 @@ class TestProps:
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'freeze':
- self.machinesuspend = 'timekeeping_freeze\[.*'
+ self.machinesuspend = r'timekeeping_freeze\[.*'
else:
- self.machinesuspend = 'machine_suspend\[.*'
+ self.machinesuspend = r'machine_suspend\[.*'
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem', 'disk']
fp = sv.openlog(sv.ftracefile, 'r')
for line in fp:
- m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line)
+ m = re.match(r'.* machine_suspend\[(?P<mode>.*)\]', line)
if m and m.group('mode') in ['1', '2', '3', '4']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
@@ -3401,9 +3401,9 @@ def loadTraceLog():
for i in range(len(blk)):
if 'SUSPEND START' in blk[i][3]:
first.append(i)
- elif re.match('.* timekeeping_freeze.*begin', blk[i][3]):
+ elif re.match(r'.* timekeeping_freeze.*begin', blk[i][3]):
last.append(i)
- elif re.match('.* timekeeping_freeze.*end', blk[i][3]):
+ elif re.match(r'.* timekeeping_freeze.*end', blk[i][3]):
first.append(i)
elif 'RESUME COMPLETE' in blk[i][3]:
last.append(i)
@@ -3514,28 +3514,28 @@ def parseTraceLog(live=False):
if(t.fevent):
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
- if(re.match('(?P<name>.*) begin$', t.name)):
+ if(re.match(r'(?P<name>.*) begin$', t.name)):
isbegin = True
- elif(re.match('(?P<name>.*) end$', t.name)):
+ elif(re.match(r'(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
if '[' in t.name:
- m = re.match('(?P<name>.*)\[.*', t.name)
+ m = re.match(r'(?P<name>.*)\[.*', t.name)
else:
- m = re.match('(?P<name>.*) .*', t.name)
+ m = re.match(r'(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
- if(re.match('suspend_enter\[.*', t.name)):
+ if(re.match(r'suspend_enter\[.*', t.name)):
if(isbegin and data.tKernSus == 0):
data.tKernSus = t.time
continue
# suspend_prepare start
- elif(re.match('dpm_prepare\[.*', t.name)):
+ elif(re.match(r'dpm_prepare\[.*', t.name)):
if isbegin and data.first_suspend_prepare:
data.first_suspend_prepare = False
if data.tKernSus == 0:
@@ -3544,15 +3544,15 @@ def parseTraceLog(live=False):
phase = data.setPhase('suspend_prepare', t.time, isbegin)
continue
# suspend start
- elif(re.match('dpm_suspend\[.*', t.name)):
+ elif(re.match(r'dpm_suspend\[.*', t.name)):
phase = data.setPhase('suspend', t.time, isbegin)
continue
# suspend_late start
- elif(re.match('dpm_suspend_late\[.*', t.name)):
+ elif(re.match(r'dpm_suspend_late\[.*', t.name)):
phase = data.setPhase('suspend_late', t.time, isbegin)
continue
# suspend_noirq start
- elif(re.match('dpm_suspend_noirq\[.*', t.name)):
+ elif(re.match(r'dpm_suspend_noirq\[.*', t.name)):
phase = data.setPhase('suspend_noirq', t.time, isbegin)
continue
# suspend_machine/resume_machine
@@ -3589,19 +3589,19 @@ def parseTraceLog(live=False):
data.tResumed = t.time
continue
# resume_noirq start
- elif(re.match('dpm_resume_noirq\[.*', t.name)):
+ elif(re.match(r'dpm_resume_noirq\[.*', t.name)):
phase = data.setPhase('resume_noirq', t.time, isbegin)
continue
# resume_early start
- elif(re.match('dpm_resume_early\[.*', t.name)):
+ elif(re.match(r'dpm_resume_early\[.*', t.name)):
phase = data.setPhase('resume_early', t.time, isbegin)
continue
# resume start
- elif(re.match('dpm_resume\[.*', t.name)):
+ elif(re.match(r'dpm_resume\[.*', t.name)):
phase = data.setPhase('resume', t.time, isbegin)
continue
# resume complete start
- elif(re.match('dpm_complete\[.*', t.name)):
+ elif(re.match(r'dpm_complete\[.*', t.name)):
phase = data.setPhase('resume_complete', t.time, isbegin)
continue
# skip trace events inside devices calls
@@ -3635,7 +3635,7 @@ def parseTraceLog(live=False):
elif(t.type == 'device_pm_callback_start'):
if phase not in data.dmesg:
continue
- m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
+ m = re.match(r'(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
@@ -3650,7 +3650,7 @@ def parseTraceLog(live=False):
elif(t.type == 'device_pm_callback_end'):
if phase not in data.dmesg:
continue
- m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
+ m = re.match(r'(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
@@ -3904,24 +3904,24 @@ def loadKernelLog():
line = line[idx:]
if tp.stampInfo(line, sysvals):
continue
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
- if re.match('PM: Syncing filesystems.*', msg) or \
- re.match('PM: suspend entry.*', msg):
+ if re.match(r'PM: Syncing filesystems.*', msg) or \
+ re.match(r'PM: suspend entry.*', msg):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if(not data):
continue
- m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
+ m = re.match(r'.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
- m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
+ m = re.match(r'PM: Preparing system for (?P<m>.*) sleep', msg)
if not m:
- m = re.match('PM: Preparing system for sleep \((?P<m>.*)\)', msg)
+ m = re.match(r'PM: Preparing system for sleep \((?P<m>.*)\)', msg)
if m:
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
@@ -3984,7 +3984,7 @@ def parseKernelLog(data):
'resume_machine': ['[PM: ]*Timekeeping suspended for.*',
'ACPI: Low-level resume complete.*',
'ACPI: resume from mwait',
- 'Suspended for [0-9\.]* seconds'],
+ r'Suspended for [0-9\.]* seconds'],
'resume_noirq': ['PM: resume from suspend-to-idle',
'ACPI: Waking up from system sleep state.*'],
'resume_early': ['PM: noirq resume of devices complete after.*',
@@ -3993,7 +3993,7 @@ def parseKernelLog(data):
'PM: early restore of devices complete after.*'],
'resume_complete': ['PM: resume of devices complete after.*',
'PM: restore of devices complete after.*'],
- 'post_resume': ['.*Restarting tasks \.\.\..*'],
+ 'post_resume': [r'.*Restarting tasks \.\.\..*'],
}
# action table (expected events that occur and show up in dmesg)
@@ -4021,7 +4021,7 @@ def parseKernelLog(data):
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
- m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
+ m = re.match(r'[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
@@ -4145,26 +4145,26 @@ def parseKernelLog(data):
if(a in actions and actions[a][-1]['begin'] == actions[a][-1]['end']):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
- if(re.match('Disabling non-boot CPUs .*', msg)):
+ if(re.match(r'Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
- elif(re.match('Enabling non-boot CPUs .*', msg)):
+ elif(re.match(r'Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
- elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
- or re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
+ elif(re.match(r'smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
+ or re.match(r'psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
# end of a cpu suspend, start of the next
- m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
+ m = re.match(r'smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
if(not m):
- m = re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)
+ m = re.match(r'psci: CPU(?P<cpu>[0-9]*) killed.*', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
- elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
+ elif(re.match(r'CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
- m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
+ m = re.match(r'CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
@@ -4343,7 +4343,8 @@ def createHTMLSummarySimple(testruns, htmlfile, title):
list[mode]['data'].append([data['host'], data['kernel'],
data['time'], tVal[0], tVal[1], data['url'], res,
data['issues'], data['sus_worst'], data['sus_worsttime'],
- data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi])
+ data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi,
+ (data['fullmode'] if 'fullmode' in data else mode)])
idx = len(list[mode]['data']) - 1
if res.startswith('fail in'):
res = 'fail'
@@ -4449,7 +4450,7 @@ def createHTMLSummarySimple(testruns, htmlfile, title):
elif idx == iMed[i]:
tHigh[i] = ' id="%smed" class=medval title="Median"' % tag
html += td.format("%d" % (list[mode]['data'].index(d) + 1)) # row
- html += td.format(mode) # mode
+ html += td.format(d[15]) # mode
html += td.format(d[0]) # host
html += td.format(d[1]) # kernel
html += td.format(d[2]) # time
@@ -5061,6 +5062,7 @@ def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
+ hf.write('<script type="text/javascript">\n');
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
@@ -5068,384 +5070,383 @@ def addScriptCode(hf, testruns):
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
- script_code = \
- '<script type="text/javascript">\n'+detail+\
- ' var resolution = -1;\n'\
- ' var dragval = [0, 0];\n'\
- ' function redrawTimescale(t0, tMax, tS) {\n'\
- ' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
- ' var tTotal = tMax - t0;\n'\
- ' var list = document.getElementsByClassName("tblock");\n'\
- ' for (var i = 0; i < list.length; i++) {\n'\
- ' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
- ' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
- ' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
- ' var mMax = m0 + mTotal;\n'\
- ' var html = "";\n'\
- ' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
- ' if(divTotal > 1000) continue;\n'\
- ' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
- ' var pos = 0.0, val = 0.0;\n'\
- ' for (var j = 0; j < divTotal; j++) {\n'\
- ' var htmlline = "";\n'\
- ' var mode = list[i].id[5];\n'\
- ' if(mode == "s") {\n'\
- ' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
- ' val = (j-divTotal+1)*tS;\n'\
- ' if(j == divTotal - 1)\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S&rarr;</cS></div>\';\n'\
- ' else\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
- ' } else {\n'\
- ' pos = 100 - (((j)*tS*100)/mTotal);\n'\
- ' val = (j)*tS;\n'\
- ' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
- ' if(j == 0)\n'\
- ' if(mode == "r")\n'\
- ' htmlline = rline+"<cS>&larr;R</cS></div>";\n'\
- ' else\n'\
- ' htmlline = rline+"<cS>0ms</div>";\n'\
- ' }\n'\
- ' html += htmlline;\n'\
- ' }\n'\
- ' timescale.innerHTML = html;\n'\
- ' }\n'\
- ' }\n'\
- ' function zoomTimeline() {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var zoombox = document.getElementById("dmesgzoombox");\n'\
- ' var left = zoombox.scrollLeft;\n'\
- ' var val = parseFloat(dmesg.style.width);\n'\
- ' var newval = 100;\n'\
- ' var sh = window.outerWidth / 2;\n'\
- ' if(this.id == "zoomin") {\n'\
- ' newval = val * 1.2;\n'\
- ' if(newval > 910034) newval = 910034;\n'\
- ' dmesg.style.width = newval+"%";\n'\
- ' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
- ' } else if (this.id == "zoomout") {\n'\
- ' newval = val / 1.2;\n'\
- ' if(newval < 100) newval = 100;\n'\
- ' dmesg.style.width = newval+"%";\n'\
- ' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
- ' } else {\n'\
- ' zoombox.scrollLeft = 0;\n'\
- ' dmesg.style.width = "100%";\n'\
- ' }\n'\
- ' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
- ' var t0 = bounds[0];\n'\
- ' var tMax = bounds[1];\n'\
- ' var tTotal = tMax - t0;\n'\
- ' var wTotal = tTotal * 100.0 / newval;\n'\
- ' var idx = 7*window.innerWidth/1100;\n'\
- ' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
- ' if(i >= tS.length) i = tS.length - 1;\n'\
- ' if(tS[i] == resolution) return;\n'\
- ' resolution = tS[i];\n'\
- ' redrawTimescale(t0, tMax, tS[i]);\n'\
- ' }\n'\
- ' function deviceName(title) {\n'\
- ' var name = title.slice(0, title.indexOf(" ("));\n'\
- ' return name;\n'\
- ' }\n'\
- ' function deviceHover() {\n'\
- ' var name = deviceName(this.title);\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' var cpu = -1;\n'\
- ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(7));\n'\
- ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(8));\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dname = deviceName(dev[i].title);\n'\
- ' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
- ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
- ' (name == dname))\n'\
- ' {\n'\
- ' dev[i].className = "hover "+cname;\n'\
- ' } else {\n'\
- ' dev[i].className = cname;\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function deviceUnhover() {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
- ' }\n'\
- ' }\n'\
- ' function deviceTitle(title, total, cpu) {\n'\
- ' var prefix = "Total";\n'\
- ' if(total.length > 3) {\n'\
- ' prefix = "Average";\n'\
- ' total[1] = (total[1]+total[3])/2;\n'\
- ' total[2] = (total[2]+total[4])/2;\n'\
- ' }\n'\
- ' var devtitle = document.getElementById("devicedetailtitle");\n'\
- ' var name = deviceName(title);\n'\
- ' if(cpu >= 0) name = "CPU"+cpu;\n'\
- ' var driver = "";\n'\
- ' var tS = "<t2>(</t2>";\n'\
- ' var tR = "<t2>)</t2>";\n'\
- ' if(total[1] > 0)\n'\
- ' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
- ' if(total[2] > 0)\n'\
- ' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
- ' var s = title.indexOf("{");\n'\
- ' var e = title.indexOf("}");\n'\
- ' if((s >= 0) && (e >= 0))\n'\
- ' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
- ' if(total[1] > 0 && total[2] > 0)\n'\
- ' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
- ' else\n'\
- ' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
- ' return name;\n'\
- ' }\n'\
- ' function deviceDetail() {\n'\
- ' var devinfo = document.getElementById("devicedetail");\n'\
- ' devinfo.style.display = "block";\n'\
- ' var name = deviceName(this.title);\n'\
- ' var cpu = -1;\n'\
- ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(7));\n'\
- ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
- ' cpu = parseInt(name.slice(8));\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' var idlist = [];\n'\
- ' var pdata = [[]];\n'\
- ' if(document.getElementById("devicedetail1"))\n'\
- ' pdata = [[], []];\n'\
- ' var pd = pdata[0];\n'\
- ' var total = [0.0, 0.0, 0.0];\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dname = deviceName(dev[i].title);\n'\
- ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
- ' (name == dname))\n'\
- ' {\n'\
- ' idlist[idlist.length] = dev[i].id;\n'\
- ' var tidx = 1;\n'\
- ' if(dev[i].id[0] == "a") {\n'\
- ' pd = pdata[0];\n'\
- ' } else {\n'\
- ' if(pdata.length == 1) pdata[1] = [];\n'\
- ' if(total.length == 3) total[3]=total[4]=0.0;\n'\
- ' pd = pdata[1];\n'\
- ' tidx = 3;\n'\
- ' }\n'\
- ' var info = dev[i].title.split(" ");\n'\
- ' var pname = info[info.length-1];\n'\
- ' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
- ' total[0] += pd[pname];\n'\
- ' if(pname.indexOf("suspend") >= 0)\n'\
- ' total[tidx] += pd[pname];\n'\
- ' else\n'\
- ' total[tidx+1] += pd[pname];\n'\
- ' }\n'\
- ' }\n'\
- ' var devname = deviceTitle(this.title, total, cpu);\n'\
- ' var left = 0.0;\n'\
- ' for (var t = 0; t < pdata.length; t++) {\n'\
- ' pd = pdata[t];\n'\
- ' devinfo = document.getElementById("devicedetail"+t);\n'\
- ' var phases = devinfo.getElementsByClassName("phaselet");\n'\
- ' for (var i = 0; i < phases.length; i++) {\n'\
- ' if(phases[i].id in pd) {\n'\
- ' var w = 100.0*pd[phases[i].id]/total[0];\n'\
- ' var fs = 32;\n'\
- ' if(w < 8) fs = 4*w | 0;\n'\
- ' var fs2 = fs*3/4;\n'\
- ' phases[i].style.width = w+"%";\n'\
- ' phases[i].style.left = left+"%";\n'\
- ' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
- ' left += w;\n'\
- ' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
- ' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
- ' phases[i].innerHTML = time+pname;\n'\
- ' } else {\n'\
- ' phases[i].style.width = "0%";\n'\
- ' phases[i].style.left = left+"%";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' if(typeof devstats !== \'undefined\')\n'\
- ' callDetail(this.id, this.title);\n'\
- ' var cglist = document.getElementById("callgraphs");\n'\
- ' if(!cglist) return;\n'\
- ' var cg = cglist.getElementsByClassName("atop");\n'\
- ' if(cg.length < 10) return;\n'\
- ' for (var i = 0; i < cg.length; i++) {\n'\
- ' cgid = cg[i].id.split("x")[0]\n'\
- ' if(idlist.indexOf(cgid) >= 0) {\n'\
- ' cg[i].style.display = "block";\n'\
- ' } else {\n'\
- ' cg[i].style.display = "none";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function callDetail(devid, devtitle) {\n'\
- ' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
- ' return;\n'\
- ' var list = devstats[devid];\n'\
- ' var tmp = devtitle.split(" ");\n'\
- ' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
- ' var dd = document.getElementById(phase);\n'\
- ' var total = parseFloat(tmp[1].slice(1));\n'\
- ' var mlist = [];\n'\
- ' var maxlen = 0;\n'\
- ' var info = []\n'\
- ' for(var i in list) {\n'\
- ' if(list[i][0] == "@") {\n'\
- ' info = list[i].split("|");\n'\
- ' continue;\n'\
- ' }\n'\
- ' var tmp = list[i].split("|");\n'\
- ' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
- ' var p = (t*100.0/total).toFixed(2);\n'\
- ' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
- ' if(f.length > maxlen)\n'\
- ' maxlen = f.length;\n'\
- ' }\n'\
- ' var pad = 5;\n'\
- ' if(mlist.length == 0) pad = 30;\n'\
- ' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
- ' if(info.length > 2)\n'\
- ' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
- ' if(info.length > 3)\n'\
- ' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
- ' if(info.length > 4)\n'\
- ' html += ", return=<b>"+info[4]+"</b>";\n'\
- ' html += "</t3></div>";\n'\
- ' if(mlist.length > 0) {\n'\
- ' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
- ' html += "</tr><tr><th>Calls</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][1]+"</td>";\n'\
- ' html += "</tr><tr><th>Time(ms)</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][2]+"</td>";\n'\
- ' html += "</tr><tr><th>Percent</th>";\n'\
- ' for(var i in mlist)\n'\
- ' html += "<td>"+mlist[i][3]+"</td>";\n'\
- ' html += "</tr></table>";\n'\
- ' }\n'\
- ' dd.innerHTML = html;\n'\
- ' var height = (maxlen*5)+100;\n'\
- ' dd.style.height = height+"px";\n'\
- ' document.getElementById("devicedetail").style.height = height+"px";\n'\
- ' }\n'\
- ' function callSelect() {\n'\
- ' var cglist = document.getElementById("callgraphs");\n'\
- ' if(!cglist) return;\n'\
- ' var cg = cglist.getElementsByClassName("atop");\n'\
- ' for (var i = 0; i < cg.length; i++) {\n'\
- ' if(this.id == cg[i].id) {\n'\
- ' cg[i].style.display = "block";\n'\
- ' } else {\n'\
- ' cg[i].style.display = "none";\n'\
- ' }\n'\
- ' }\n'\
- ' }\n'\
- ' function devListWindow(e) {\n'\
- ' var win = window.open();\n'\
- ' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
- ' "<style type=\\"text/css\\">"+\n'\
- ' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
- ' "</style>"\n'\
- ' var dt = devtable[0];\n'\
- ' if(e.target.id != "devlist1")\n'\
- ' dt = devtable[1];\n'\
- ' win.document.write(html+dt);\n'\
- ' }\n'\
- ' function errWindow() {\n'\
- ' var range = this.id.split("_");\n'\
- ' var idx1 = parseInt(range[0]);\n'\
- ' var idx2 = parseInt(range[1]);\n'\
- ' var win = window.open();\n'\
- ' var log = document.getElementById("dmesglog");\n'\
- ' var title = "<title>dmesg log</title>";\n'\
- ' var text = log.innerHTML.split("\\n");\n'\
- ' var html = "";\n'\
- ' for(var i = 0; i < text.length; i++) {\n'\
- ' if(i == idx1) {\n'\
- ' html += "<e id=target>"+text[i]+"</e>\\n";\n'\
- ' } else if(i > idx1 && i <= idx2) {\n'\
- ' html += "<e>"+text[i]+"</e>\\n";\n'\
- ' } else {\n'\
- ' html += text[i]+"\\n";\n'\
- ' }\n'\
- ' }\n'\
- ' win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");\n'\
- ' win.location.hash = "#target";\n'\
- ' win.document.close();\n'\
- ' }\n'\
- ' function logWindow(e) {\n'\
- ' var name = e.target.id.slice(4);\n'\
- ' var win = window.open();\n'\
- ' var log = document.getElementById(name+"log");\n'\
- ' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
- ' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
- ' win.document.close();\n'\
- ' }\n'\
- ' function onMouseDown(e) {\n'\
- ' dragval[0] = e.clientX;\n'\
- ' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
- ' document.onmousemove = onMouseMove;\n'\
- ' }\n'\
- ' function onMouseMove(e) {\n'\
- ' var zoombox = document.getElementById("dmesgzoombox");\n'\
- ' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
- ' }\n'\
- ' function onMouseUp(e) {\n'\
- ' document.onmousemove = null;\n'\
- ' }\n'\
- ' function onKeyPress(e) {\n'\
- ' var c = e.charCode;\n'\
- ' if(c != 42 && c != 43 && c != 45) return;\n'\
- ' var click = document.createEvent("Events");\n'\
- ' click.initEvent("click", true, false);\n'\
- ' if(c == 43) \n'\
- ' document.getElementById("zoomin").dispatchEvent(click);\n'\
- ' else if(c == 45)\n'\
- ' document.getElementById("zoomout").dispatchEvent(click);\n'\
- ' else if(c == 42)\n'\
- ' document.getElementById("zoomdef").dispatchEvent(click);\n'\
- ' }\n'\
- ' window.addEventListener("resize", function () {zoomTimeline();});\n'\
- ' window.addEventListener("load", function () {\n'\
- ' var dmesg = document.getElementById("dmesg");\n'\
- ' dmesg.style.width = "100%"\n'\
- ' dmesg.onmousedown = onMouseDown;\n'\
- ' document.onmouseup = onMouseUp;\n'\
- ' document.onkeypress = onKeyPress;\n'\
- ' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
- ' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
- ' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
- ' var list = document.getElementsByClassName("err");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = errWindow;\n'\
- ' var list = document.getElementsByClassName("logbtn");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = logWindow;\n'\
- ' list = document.getElementsByClassName("devlist");\n'\
- ' for (var i = 0; i < list.length; i++)\n'\
- ' list[i].onclick = devListWindow;\n'\
- ' var dev = dmesg.getElementsByClassName("thread");\n'\
- ' for (var i = 0; i < dev.length; i++) {\n'\
- ' dev[i].onclick = deviceDetail;\n'\
- ' dev[i].onmouseover = deviceHover;\n'\
- ' dev[i].onmouseout = deviceUnhover;\n'\
- ' }\n'\
- ' var dev = dmesg.getElementsByClassName("srccall");\n'\
- ' for (var i = 0; i < dev.length; i++)\n'\
- ' dev[i].onclick = callSelect;\n'\
- ' zoomTimeline();\n'\
- ' });\n'\
- '</script>\n'
+ hf.write(detail);
+ script_code = r""" var resolution = -1;
+ var dragval = [0, 0];
+ function redrawTimescale(t0, tMax, tS) {
+ var rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">';
+ var tTotal = tMax - t0;
+ var list = document.getElementsByClassName("tblock");
+ for (var i = 0; i < list.length; i++) {
+ var timescale = list[i].getElementsByClassName("timescale")[0];
+ var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);
+ var mTotal = tTotal*parseFloat(list[i].style.width)/100;
+ var mMax = m0 + mTotal;
+ var html = "";
+ var divTotal = Math.floor(mTotal/tS) + 1;
+ if(divTotal > 1000) continue;
+ var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;
+ var pos = 0.0, val = 0.0;
+ for (var j = 0; j < divTotal; j++) {
+ var htmlline = "";
+ var mode = list[i].id[5];
+ if(mode == "s") {
+ pos = 100 - (((j)*tS*100)/mTotal) - divEdge;
+ val = (j-divTotal+1)*tS;
+ if(j == divTotal - 1)
+ htmlline = '<div class="t" style="right:'+pos+'%"><cS>S&rarr;</cS></div>';
+ else
+ htmlline = '<div class="t" style="right:'+pos+'%">'+val+'ms</div>';
+ } else {
+ pos = 100 - (((j)*tS*100)/mTotal);
+ val = (j)*tS;
+ htmlline = '<div class="t" style="right:'+pos+'%">'+val+'ms</div>';
+ if(j == 0)
+ if(mode == "r")
+ htmlline = rline+"<cS>&larr;R</cS></div>";
+ else
+ htmlline = rline+"<cS>0ms</div>";
+ }
+ html += htmlline;
+ }
+ timescale.innerHTML = html;
+ }
+ }
+ function zoomTimeline() {
+ var dmesg = document.getElementById("dmesg");
+ var zoombox = document.getElementById("dmesgzoombox");
+ var left = zoombox.scrollLeft;
+ var val = parseFloat(dmesg.style.width);
+ var newval = 100;
+ var sh = window.outerWidth / 2;
+ if(this.id == "zoomin") {
+ newval = val * 1.2;
+ if(newval > 910034) newval = 910034;
+ dmesg.style.width = newval+"%";
+ zoombox.scrollLeft = ((left + sh) * newval / val) - sh;
+ } else if (this.id == "zoomout") {
+ newval = val / 1.2;
+ if(newval < 100) newval = 100;
+ dmesg.style.width = newval+"%";
+ zoombox.scrollLeft = ((left + sh) * newval / val) - sh;
+ } else {
+ zoombox.scrollLeft = 0;
+ dmesg.style.width = "100%";
+ }
+ var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];
+ var t0 = bounds[0];
+ var tMax = bounds[1];
+ var tTotal = tMax - t0;
+ var wTotal = tTotal * 100.0 / newval;
+ var idx = 7*window.innerWidth/1100;
+ for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);
+ if(i >= tS.length) i = tS.length - 1;
+ if(tS[i] == resolution) return;
+ resolution = tS[i];
+ redrawTimescale(t0, tMax, tS[i]);
+ }
+ function deviceName(title) {
+ var name = title.slice(0, title.indexOf(" ("));
+ return name;
+ }
+ function deviceHover() {
+ var name = deviceName(this.title);
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ var cpu = -1;
+ if(name.match("CPU_ON\[[0-9]*\]"))
+ cpu = parseInt(name.slice(7));
+ else if(name.match("CPU_OFF\[[0-9]*\]"))
+ cpu = parseInt(name.slice(8));
+ for (var i = 0; i < dev.length; i++) {
+ dname = deviceName(dev[i].title);
+ var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));
+ if((cpu >= 0 && dname.match("CPU_O[NF]*\\[*"+cpu+"\\]")) ||
+ (name == dname))
+ {
+ dev[i].className = "hover "+cname;
+ } else {
+ dev[i].className = cname;
+ }
+ }
+ }
+ function deviceUnhover() {
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ for (var i = 0; i < dev.length; i++) {
+ dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));
+ }
+ }
+ function deviceTitle(title, total, cpu) {
+ var prefix = "Total";
+ if(total.length > 3) {
+ prefix = "Average";
+ total[1] = (total[1]+total[3])/2;
+ total[2] = (total[2]+total[4])/2;
+ }
+ var devtitle = document.getElementById("devicedetailtitle");
+ var name = deviceName(title);
+ if(cpu >= 0) name = "CPU"+cpu;
+ var driver = "";
+ var tS = "<t2>(</t2>";
+ var tR = "<t2>)</t2>";
+ if(total[1] > 0)
+ tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";
+ if(total[2] > 0)
+ tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";
+ var s = title.indexOf("{");
+ var e = title.indexOf("}");
+ if((s >= 0) && (e >= 0))
+ driver = title.slice(s+1, e) + " <t1>@</t1> ";
+ if(total[1] > 0 && total[2] > 0)
+ devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;
+ else
+ devtitle.innerHTML = "<t0>"+title+"</t0>";
+ return name;
+ }
+ function deviceDetail() {
+ var devinfo = document.getElementById("devicedetail");
+ devinfo.style.display = "block";
+ var name = deviceName(this.title);
+ var cpu = -1;
+ if(name.match("CPU_ON\[[0-9]*\]"))
+ cpu = parseInt(name.slice(7));
+ else if(name.match("CPU_OFF\[[0-9]*\]"))
+ cpu = parseInt(name.slice(8));
+ var dmesg = document.getElementById("dmesg");
+ var dev = dmesg.getElementsByClassName("thread");
+ var idlist = [];
+ var pdata = [[]];
+ if(document.getElementById("devicedetail1"))
+ pdata = [[], []];
+ var pd = pdata[0];
+ var total = [0.0, 0.0, 0.0];
+ for (var i = 0; i < dev.length; i++) {
+ dname = deviceName(dev[i].title);
+ if((cpu >= 0 && dname.match("CPU_O[NF]*\\[*"+cpu+"\\]")) ||
+ (name == dname))
+ {
+ idlist[idlist.length] = dev[i].id;
+ var tidx = 1;
+ if(dev[i].id[0] == "a") {
+ pd = pdata[0];
+ } else {
+ if(pdata.length == 1) pdata[1] = [];
+ if(total.length == 3) total[3]=total[4]=0.0;
+ pd = pdata[1];
+ tidx = 3;
+ }
+ var info = dev[i].title.split(" ");
+ var pname = info[info.length-1];
+ pd[pname] = parseFloat(info[info.length-3].slice(1));
+ total[0] += pd[pname];
+ if(pname.indexOf("suspend") >= 0)
+ total[tidx] += pd[pname];
+ else
+ total[tidx+1] += pd[pname];
+ }
+ }
+ var devname = deviceTitle(this.title, total, cpu);
+ var left = 0.0;
+ for (var t = 0; t < pdata.length; t++) {
+ pd = pdata[t];
+ devinfo = document.getElementById("devicedetail"+t);
+ var phases = devinfo.getElementsByClassName("phaselet");
+ for (var i = 0; i < phases.length; i++) {
+ if(phases[i].id in pd) {
+ var w = 100.0*pd[phases[i].id]/total[0];
+ var fs = 32;
+ if(w < 8) fs = 4*w | 0;
+ var fs2 = fs*3/4;
+ phases[i].style.width = w+"%";
+ phases[i].style.left = left+"%";
+ phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";
+ left += w;
+ var time = "<t4 style=\"font-size:"+fs+"px\">"+pd[phases[i].id]+" ms<br></t4>";
+ var pname = "<t3 style=\"font-size:"+fs2+"px\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";
+ phases[i].innerHTML = time+pname;
+ } else {
+ phases[i].style.width = "0%";
+ phases[i].style.left = left+"%";
+ }
+ }
+ }
+ if(typeof devstats !== 'undefined')
+ callDetail(this.id, this.title);
+ var cglist = document.getElementById("callgraphs");
+ if(!cglist) return;
+ var cg = cglist.getElementsByClassName("atop");
+ if(cg.length < 10) return;
+ for (var i = 0; i < cg.length; i++) {
+ cgid = cg[i].id.split("x")[0]
+ if(idlist.indexOf(cgid) >= 0) {
+ cg[i].style.display = "block";
+ } else {
+ cg[i].style.display = "none";
+ }
+ }
+ }
+ function callDetail(devid, devtitle) {
+ if(!(devid in devstats) || devstats[devid].length < 1)
+ return;
+ var list = devstats[devid];
+ var tmp = devtitle.split(" ");
+ var name = tmp[0], phase = tmp[tmp.length-1];
+ var dd = document.getElementById(phase);
+ var total = parseFloat(tmp[1].slice(1));
+ var mlist = [];
+ var maxlen = 0;
+ var info = []
+ for(var i in list) {
+ if(list[i][0] == "@") {
+ info = list[i].split("|");
+ continue;
+ }
+ var tmp = list[i].split("|");
+ var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);
+ var p = (t*100.0/total).toFixed(2);
+ mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];
+ if(f.length > maxlen)
+ maxlen = f.length;
+ }
+ var pad = 5;
+ if(mlist.length == 0) pad = 30;
+ var html = '<div style="padding-top:'+pad+'px"><t3> <b>'+name+':</b>';
+ if(info.length > 2)
+ html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";
+ if(info.length > 3)
+ html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";
+ if(info.length > 4)
+ html += ", return=<b>"+info[4]+"</b>";
+ html += "</t3></div>";
+ if(mlist.length > 0) {
+ html += '<table class=fstat style="padding-top:'+(maxlen*5)+'px;"><tr><th>Function</th>';
+ for(var i in mlist)
+ html += "<td class=vt>"+mlist[i][0]+"</td>";
+ html += "</tr><tr><th>Calls</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][1]+"</td>";
+ html += "</tr><tr><th>Time(ms)</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][2]+"</td>";
+ html += "</tr><tr><th>Percent</th>";
+ for(var i in mlist)
+ html += "<td>"+mlist[i][3]+"</td>";
+ html += "</tr></table>";
+ }
+ dd.innerHTML = html;
+ var height = (maxlen*5)+100;
+ dd.style.height = height+"px";
+ document.getElementById("devicedetail").style.height = height+"px";
+ }
+ function callSelect() {
+ var cglist = document.getElementById("callgraphs");
+ if(!cglist) return;
+ var cg = cglist.getElementsByClassName("atop");
+ for (var i = 0; i < cg.length; i++) {
+ if(this.id == cg[i].id) {
+ cg[i].style.display = "block";
+ } else {
+ cg[i].style.display = "none";
+ }
+ }
+ }
+ function devListWindow(e) {
+ var win = window.open();
+ var html = "<title>"+e.target.innerHTML+"</title>"+
+ "<style type=\"text/css\">"+
+ " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+
+ "</style>"
+ var dt = devtable[0];
+ if(e.target.id != "devlist1")
+ dt = devtable[1];
+ win.document.write(html+dt);
+ }
+ function errWindow() {
+ var range = this.id.split("_");
+ var idx1 = parseInt(range[0]);
+ var idx2 = parseInt(range[1]);
+ var win = window.open();
+ var log = document.getElementById("dmesglog");
+ var title = "<title>dmesg log</title>";
+ var text = log.innerHTML.split("\n");
+ var html = "";
+ for(var i = 0; i < text.length; i++) {
+ if(i == idx1) {
+ html += "<e id=target>"+text[i]+"</e>\n";
+ } else if(i > idx1 && i <= idx2) {
+ html += "<e>"+text[i]+"</e>\n";
+ } else {
+ html += text[i]+"\n";
+ }
+ }
+ win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");
+ win.location.hash = "#target";
+ win.document.close();
+ }
+ function logWindow(e) {
+ var name = e.target.id.slice(4);
+ var win = window.open();
+ var log = document.getElementById(name+"log");
+ var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";
+ win.document.write(title+"<pre>"+log.innerHTML+"</pre>");
+ win.document.close();
+ }
+ function onMouseDown(e) {
+ dragval[0] = e.clientX;
+ dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;
+ document.onmousemove = onMouseMove;
+ }
+ function onMouseMove(e) {
+ var zoombox = document.getElementById("dmesgzoombox");
+ zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;
+ }
+ function onMouseUp(e) {
+ document.onmousemove = null;
+ }
+ function onKeyPress(e) {
+ var c = e.charCode;
+ if(c != 42 && c != 43 && c != 45) return;
+ var click = document.createEvent("Events");
+ click.initEvent("click", true, false);
+ if(c == 43)
+ document.getElementById("zoomin").dispatchEvent(click);
+ else if(c == 45)
+ document.getElementById("zoomout").dispatchEvent(click);
+ else if(c == 42)
+ document.getElementById("zoomdef").dispatchEvent(click);
+ }
+ window.addEventListener("resize", function () {zoomTimeline();});
+ window.addEventListener("load", function () {
+ var dmesg = document.getElementById("dmesg");
+ dmesg.style.width = "100%"
+ dmesg.onmousedown = onMouseDown;
+ document.onmouseup = onMouseUp;
+ document.onkeypress = onKeyPress;
+ document.getElementById("zoomin").onclick = zoomTimeline;
+ document.getElementById("zoomout").onclick = zoomTimeline;
+ document.getElementById("zoomdef").onclick = zoomTimeline;
+ var list = document.getElementsByClassName("err");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = errWindow;
+ var list = document.getElementsByClassName("logbtn");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = logWindow;
+ list = document.getElementsByClassName("devlist");
+ for (var i = 0; i < list.length; i++)
+ list[i].onclick = devListWindow;
+ var dev = dmesg.getElementsByClassName("thread");
+ for (var i = 0; i < dev.length; i++) {
+ dev[i].onclick = deviceDetail;
+ dev[i].onmouseover = deviceHover;
+ dev[i].onmouseout = deviceUnhover;
+ }
+ var dev = dmesg.getElementsByClassName("srccall");
+ for (var i = 0; i < dev.length; i++)
+ dev[i].onclick = callSelect;
+ zoomTimeline();
+ });
+</script> """
hf.write(script_code);
# Function: executeSuspend
@@ -5524,7 +5525,9 @@ def executeSuspend(quiet=False):
if ((mode == 'freeze') or (sv.memmode == 's2idle')) \
and sv.haveTurbostat():
# execution will pause here
- turbo = sv.turbostat(s0ixready)
+ retval, turbo = sv.turbostat(s0ixready)
+ if retval != 0:
+ tdata['error'] ='turbostat returned %d' % retval
if turbo:
tdata['turbo'] = turbo
else:
@@ -5532,6 +5535,7 @@ def executeSuspend(quiet=False):
pf.write(mode)
# execution will pause here
try:
+ pf.flush()
pf.close()
except Exception as e:
tdata['error'] = str(e)
@@ -5633,7 +5637,7 @@ def deviceInfo(output=''):
tgtval = 'runtime_status'
lines = dict()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
- if(not re.match('.*/power', dirname) or
+ if(not re.match(r'.*/power', dirname) or
'control' not in filenames or
tgtval not in filenames):
continue
@@ -5702,6 +5706,40 @@ def getModes():
fp.close()
return modes
+def dmidecode_backup(out, fatal=False):
+ cpath, spath, info = '/proc/cpuinfo', '/sys/class/dmi/id', {
+ 'bios-vendor': 'bios_vendor',
+ 'bios-version': 'bios_version',
+ 'bios-release-date': 'bios_date',
+ 'system-manufacturer': 'sys_vendor',
+ 'system-product-name': 'product_name',
+ 'system-version': 'product_version',
+ 'system-serial-number': 'product_serial',
+ 'baseboard-manufacturer': 'board_vendor',
+ 'baseboard-product-name': 'board_name',
+ 'baseboard-version': 'board_version',
+ 'baseboard-serial-number': 'board_serial',
+ 'chassis-manufacturer': 'chassis_vendor',
+ 'chassis-version': 'chassis_version',
+ 'chassis-serial-number': 'chassis_serial',
+ }
+ for key in info:
+ if key not in out:
+ val = sysvals.getVal(os.path.join(spath, info[key])).strip()
+ if val and val.lower() != 'to be filled by o.e.m.':
+ out[key] = val
+ if 'processor-version' not in out and os.path.exists(cpath):
+ with open(cpath, 'r') as fp:
+ for line in fp:
+ m = re.match(r'^model\s*name\s*\:\s*(?P<c>.*)', line)
+ if m:
+ out['processor-version'] = m.group('c').strip()
+ break
+ if fatal and len(out) < 1:
+ doError('dmidecode failed to get info from %s or %s' % \
+ (sysvals.mempath, spath))
+ return out
+
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
@@ -5712,6 +5750,8 @@ def getModes():
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
+ if(not (os.path.exists(mempath) and os.access(mempath, os.R_OK))):
+ return dmidecode_backup(out, fatal)
# the list of values to retrieve, with hardcoded (type, idx)
info = {
@@ -5727,24 +5767,14 @@ def dmidecode(mempath, fatal=False):
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
- 'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
- if(not os.path.exists(mempath)):
- if(fatal):
- doError('file does not exist: %s' % mempath)
- return out
- if(not os.access(mempath, os.R_OK)):
- if(fatal):
- doError('file is not readable: %s' % mempath)
- return out
# by default use legacy scan, but try to use EFI first
- memaddr = 0xf0000
- memsize = 0x10000
+ memaddr, memsize = 0xf0000, 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
@@ -5765,11 +5795,7 @@ def dmidecode(mempath, fatal=False):
fp.seek(memaddr)
buf = fp.read(memsize)
except:
- if(fatal):
- doError('DMI table is unreachable, sorry')
- else:
- pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
- return out
+ return dmidecode_backup(out, fatal)
fp.close()
# search for either an SM table or DMI table
@@ -5785,10 +5811,7 @@ def dmidecode(mempath, fatal=False):
break
i += 16
if base == 0 and length == 0 and num == 0:
- if(fatal):
- doError('Neither SMBIOS nor DMI were found')
- else:
- return out
+ return dmidecode_backup(out, fatal)
# read in the SM or DMI table
try:
@@ -5796,11 +5819,7 @@ def dmidecode(mempath, fatal=False):
fp.seek(base)
buf = fp.read(length)
except:
- if(fatal):
- doError('DMI table is unreachable, sorry')
- else:
- pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
- return out
+ return dmidecode_backup(out, fatal)
fp.close()
# scan the table for the values we want
@@ -6272,7 +6291,10 @@ def find_in_html(html, start, end, firstonly=True):
return out
def data_from_html(file, outpath, issues, fulldetail=False):
- html = open(file, 'r').read()
+ try:
+ html = open(file, 'r').read()
+ except:
+ html = ascii(open(file, 'rb').read())
sysvals.htmlfile = os.path.relpath(file, outpath)
# extract general info
suspend = find_in_html(html, 'Kernel Suspend', 'ms')
@@ -6290,7 +6312,7 @@ def data_from_html(file, outpath, issues, fulldetail=False):
tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
if error:
- m = re.match('[a-z0-9]* failed in (?P<p>\S*).*', error)
+ m = re.match(r'[a-z0-9]* failed in (?P<p>\S*).*', error)
if m:
result = 'fail in %s' % m.group('p')
else:
@@ -6307,8 +6329,9 @@ def data_from_html(file, outpath, issues, fulldetail=False):
d.end = 999999999
d.dmesgtext = log.split('\n')
tp = d.extractErrorInfo()
- for msg in tp.msglist:
- sysvals.errorSummary(issues, msg)
+ if len(issues) < 100:
+ for msg in tp.msglist:
+ sysvals.errorSummary(issues, msg)
if stmp[2] == 'freeze':
extra = d.turbostatInfo()
elist = dict()
@@ -6325,6 +6348,11 @@ def data_from_html(file, outpath, issues, fulldetail=False):
line = find_in_html(log, '# netfix ', '\n')
if line:
extra['netfix'] = line
+ line = find_in_html(log, '# command ', '\n')
+ if line:
+ m = re.match(r'.* -m (?P<m>\S*).*', line)
+ if m:
+ extra['fullmode'] = m.group('m')
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
for lowstr in ['waking', '+']:
if not low:
@@ -6334,7 +6362,7 @@ def data_from_html(file, outpath, issues, fulldetail=False):
if lowstr == '+':
issue = 'S2LOOPx%d' % len(low.split('+'))
else:
- m = re.match('.*waking *(?P<n>[0-9]*) *times.*', low)
+ m = re.match(r'.*waking *(?P<n>[0-9]*) *times.*', low)
issue = 'S2WAKEx%s' % m.group('n') if m else 'S2WAKExNaN'
match = [i for i in issues if i['match'] == issue]
if len(match) > 0:
@@ -6352,10 +6380,10 @@ def data_from_html(file, outpath, issues, fulldetail=False):
# extract device info
devices = dict()
for line in html.split('\n'):
- m = re.match(' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
+ m = re.match(r' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
if not m or 'thread kth' in line or 'thread sec' in line:
continue
- m = re.match('(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
+ m = re.match(r'(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
if not m:
continue
name, time, phase = m.group('n'), m.group('t'), m.group('p')
@@ -6416,9 +6444,9 @@ def genHtml(subdir, force=False):
for filename in filenames:
file = os.path.join(dirname, filename)
if sysvals.usable(file):
- if(re.match('.*_dmesg.txt', filename)):
+ if(re.match(r'.*_dmesg.txt', filename)):
sysvals.dmesgfile = file
- elif(re.match('.*_ftrace.txt', filename)):
+ elif(re.match(r'.*_ftrace.txt', filename)):
sysvals.ftracefile = file
sysvals.setOutputFile()
if (sysvals.dmesgfile or sysvals.ftracefile) and sysvals.htmlfile and \
@@ -6441,7 +6469,7 @@ def runSummary(subdir, local=True, genhtml=False):
desc = {'host':[],'mode':[],'kernel':[]}
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
- if(not re.match('.*.html', filename)):
+ if(not re.match(r'.*.html', filename)):
continue
data = data_from_html(os.path.join(dirname, filename), outpath, issues)
if(not data):