diff options
Diffstat (limited to 'drivers')
861 files changed, 34021 insertions, 10296 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 39ea5cfa8326..61ca4afe83dc 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o obj-$(CONFIG_ACPI_FAN) += fan.o fan-objs := fan_core.o fan-objs += fan_attr.o +fan-$(CONFIG_HWMON) += fan_hwmon.o obj-$(CONFIG_ACPI_VIDEO) += video.o obj-$(CONFIG_ACPI_TAD) += acpi_tad.o diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 09a87fa222c7..eaa70b23dd0b 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -112,7 +112,7 @@ static int get_ac_property(struct power_supply *psy, return 0; } -static enum power_supply_property ac_props[] = { +static const enum power_supply_property ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index bd1ad07f0290..350d3a892889 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -25,6 +25,10 @@ #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 + +#define ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS 0 +#define ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION 1 + static DEFINE_MUTEX(isolated_cpus_lock); static DEFINE_MUTEX(round_robin_lock); @@ -382,16 +386,23 @@ static void acpi_pad_handle_notify(acpi_handle handle) .length = 4, .pointer = (void *)&idle_cpus, }; + u32 status; mutex_lock(&isolated_cpus_lock); num_cpus = acpi_pad_pur(handle); if (num_cpus < 0) { - mutex_unlock(&isolated_cpus_lock); - return; + /* The ACPI specification says that if no action was performed when + * processing the _PUR object, _OST should still be evaluated, albeit + * with a different status code. + */ + status = ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION; + } else { + status = ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS; + acpi_pad_idle_cpus(num_cpus); } - acpi_pad_idle_cpus(num_cpus); + idle_cpus = acpi_pad_idle_cpus_num(); - acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, ¶m); + acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, status, ¶m); mutex_unlock(&isolated_cpus_lock); } diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 7a0dd35d62c9..9916cc7ced39 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -35,6 +35,17 @@ EXPORT_PER_CPU_SYMBOL(processors); struct acpi_processor_errata errata __read_mostly; EXPORT_SYMBOL_GPL(errata); +acpi_handle acpi_get_processor_handle(int cpu) +{ + struct acpi_processor *pr; + + pr = per_cpu(processors, cpu); + if (pr) + return pr->handle; + + return NULL; +} + static int acpi_processor_errata_piix4(struct pci_dev *dev) { u8 value1 = 0; @@ -183,20 +194,44 @@ static void __init acpi_pcc_cpufreq_init(void) {} #endif /* CONFIG_X86 */ /* Initialization */ +static DEFINE_PER_CPU(void *, processor_device_array); + +static int acpi_processor_set_per_cpu(struct acpi_processor *pr, + struct acpi_device *device) +{ + BUG_ON(pr->id >= nr_cpu_ids); + + /* + * Buggy BIOS check. + * ACPI id of processors can be reported wrongly by the BIOS. + * Don't trust it blindly + */ + if (per_cpu(processor_device_array, pr->id) != NULL && + per_cpu(processor_device_array, pr->id) != device) { + dev_warn(&device->dev, + "BIOS reported wrong ACPI id %d for the processor\n", + pr->id); + return -EINVAL; + } + /* + * processor_device_array is not cleared on errors to allow buggy BIOS + * checks. + */ + per_cpu(processor_device_array, pr->id) = device; + per_cpu(processors, pr->id) = pr; + + return 0; +} + #ifdef CONFIG_ACPI_HOTPLUG_CPU -static int acpi_processor_hotadd_init(struct acpi_processor *pr) +static int acpi_processor_hotadd_init(struct acpi_processor *pr, + struct acpi_device *device) { - unsigned long long sta; - acpi_status status; int ret; if (invalid_phys_cpuid(pr->phys_id)) return -ENODEV; - status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); - if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) - return -ENODEV; - cpu_maps_update_begin(); cpus_write_lock(); @@ -204,19 +239,26 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) if (ret) goto out; + ret = acpi_processor_set_per_cpu(pr, device); + if (ret) { + acpi_unmap_cpu(pr->id); + goto out; + } + ret = arch_register_cpu(pr->id); if (ret) { + /* Leave the processor device array in place to detect buggy bios */ + per_cpu(processors, pr->id) = NULL; acpi_unmap_cpu(pr->id); goto out; } /* - * CPU got hot-added, but cpu_data is not initialized yet. Set a flag - * to delay cpu_idle/throttling initialization and do it when the CPU - * gets online for the first time. + * CPU got hot-added, but cpu_data is not initialized yet. Do + * cpu_idle/throttling initialization when the CPU gets online for + * the first time. */ pr_info("CPU%d has been hot-added\n", pr->id); - pr->flags.need_hotplug_init = 1; out: cpus_write_unlock(); @@ -224,7 +266,8 @@ out: return ret; } #else -static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) +static inline int acpi_processor_hotadd_init(struct acpi_processor *pr, + struct acpi_device *device) { return -ENODEV; } @@ -239,6 +282,7 @@ static int acpi_processor_get_info(struct acpi_device *device) acpi_status status = AE_OK; static int cpu0_initialized; unsigned long long value; + int ret; acpi_processor_errata(); @@ -315,19 +359,19 @@ static int acpi_processor_get_info(struct acpi_device *device) } /* - * Extra Processor objects may be enumerated on MP systems with - * less than the max # of CPUs. They should be ignored _iff - * they are physically not present. - * - * NOTE: Even if the processor has a cpuid, it may not be present - * because cpuid <-> apicid mapping is persistent now. + * This code is not called unless we know the CPU is present and + * enabled. The two paths are: + * a) Initially present CPUs on architectures that do not defer + * their arch_register_cpu() calls until this point. + * b) Hotplugged CPUs (enabled bit in _STA has transitioned from not + * enabled to enabled) */ - if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) { - int ret = acpi_processor_hotadd_init(pr); - - if (ret) - return ret; - } + if (!get_cpu_device(pr->id)) + ret = acpi_processor_hotadd_init(pr, device); + else + ret = acpi_processor_set_per_cpu(pr, device); + if (ret) + return ret; /* * On some boxes several processors use the same processor bus id. @@ -372,8 +416,6 @@ static int acpi_processor_get_info(struct acpi_device *device) * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. * Such things have to be put in and set up by the processor driver's .probe(). */ -static DEFINE_PER_CPU(void *, processor_device_array); - static int acpi_processor_add(struct acpi_device *device, const struct acpi_device_id *id) { @@ -400,39 +442,17 @@ static int acpi_processor_add(struct acpi_device *device, result = acpi_processor_get_info(device); if (result) /* Processor is not physically present or unavailable */ - return 0; - - BUG_ON(pr->id >= nr_cpu_ids); - - /* - * Buggy BIOS check. - * ACPI id of processors can be reported wrongly by the BIOS. - * Don't trust it blindly - */ - if (per_cpu(processor_device_array, pr->id) != NULL && - per_cpu(processor_device_array, pr->id) != device) { - dev_warn(&device->dev, - "BIOS reported wrong ACPI id %d for the processor\n", - pr->id); - /* Give up, but do not abort the namespace scan. */ - goto err; - } - /* - * processor_device_array is not cleared on errors to allow buggy BIOS - * checks. - */ - per_cpu(processor_device_array, pr->id) = device; - per_cpu(processors, pr->id) = pr; + goto err_clear_driver_data; dev = get_cpu_device(pr->id); if (!dev) { result = -ENODEV; - goto err; + goto err_clear_per_cpu; } result = acpi_bind_one(dev, device); if (result) - goto err; + goto err_clear_per_cpu; pr->dev = dev; @@ -443,10 +463,11 @@ static int acpi_processor_add(struct acpi_device *device, dev_err(dev, "Processor driver could not be attached\n"); acpi_unbind_one(dev); - err: - free_cpumask_var(pr->throttling.shared_cpu_map); - device->driver_data = NULL; + err_clear_per_cpu: per_cpu(processors, pr->id) = NULL; + err_clear_driver_data: + device->driver_data = NULL; + free_cpumask_var(pr->throttling.shared_cpu_map); err_free_pr: kfree(pr); return result; @@ -454,7 +475,7 @@ static int acpi_processor_add(struct acpi_device *device, #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Removal */ -static void acpi_processor_remove(struct acpi_device *device) +static void acpi_processor_post_eject(struct acpi_device *device) { struct acpi_processor *pr; @@ -476,10 +497,6 @@ static void acpi_processor_remove(struct acpi_device *device) device_release_driver(pr->dev); acpi_unbind_one(pr->dev); - /* Clean up. */ - per_cpu(processor_device_array, pr->id) = NULL; - per_cpu(processors, pr->id) = NULL; - cpu_maps_update_begin(); cpus_write_lock(); @@ -487,6 +504,10 @@ static void acpi_processor_remove(struct acpi_device *device) arch_unregister_cpu(pr->id); acpi_unmap_cpu(pr->id); + /* Clean up. */ + per_cpu(processor_device_array, pr->id) = NULL; + per_cpu(processors, pr->id) = NULL; + cpus_write_unlock(); cpu_maps_update_done(); @@ -598,9 +619,9 @@ static bool __init acpi_early_processor_osc(void) void __init acpi_early_processor_control_setup(void) { if (acpi_early_processor_osc()) { - pr_info("_OSC evaluated successfully for all CPUs\n"); + pr_debug("_OSC evaluated successfully for all CPUs\n"); } else { - pr_info("_OSC evaluation for CPUs failed, trying _PDC\n"); + pr_debug("_OSC evaluation for CPUs failed, trying _PDC\n"); acpi_early_processor_set_pdc(); } } @@ -622,7 +643,7 @@ static struct acpi_scan_handler processor_handler = { .ids = processor_device_ids, .attach = acpi_processor_add, #ifdef CONFIG_ACPI_HOTPLUG_CPU - .detach = acpi_processor_remove, + .post_eject = acpi_processor_post_eject, #endif .hotplug = { .enabled = true, diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c index 1d670dbe4d1d..b831cb8e53dc 100644 --- a/drivers/acpi/acpi_tad.c +++ b/drivers/acpi/acpi_tad.c @@ -27,6 +27,7 @@ #include <linux/pm_runtime.h> #include <linux/suspend.h> +MODULE_DESCRIPTION("ACPI Time and Alarm (TAD) Device Driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Rafael J. Wysocki"); diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 1fda30388297..8274a17872ed 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -1128,8 +1128,8 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg) return -ENOMEM; } - strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); + strscpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_VIDEO_CLASS); data->device_id = device_id; data->video = video; @@ -2010,8 +2010,8 @@ static int acpi_video_bus_add(struct acpi_device *device) } video->device = device; - strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); - strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); + strscpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); + strscpy(acpi_device_class(device), ACPI_VIDEO_CLASS); device->driver_data = video; acpi_video_bus_find_cap(video); diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 726944648c9b..05ecde9eaabe 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_ACPI_AGDI) += agdi.o -obj-$(CONFIG_ACPI_IORT) += iort.o -obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_APMT) += apmt.o +obj-$(CONFIG_ACPI_FFH) += ffh.o +obj-$(CONFIG_ACPI_GTDT) += gtdt.o +obj-$(CONFIG_ACPI_IORT) += iort.o +obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o obj-$(CONFIG_ARM_AMBA) += amba.o obj-y += dma.o init.o obj-y += thermal_cpufreq.o diff --git a/drivers/acpi/arm64/amba.c b/drivers/acpi/arm64/amba.c index e1f0bbb8f393..1350083bce5f 100644 --- a/drivers/acpi/arm64/amba.c +++ b/drivers/acpi/arm64/amba.c @@ -27,11 +27,7 @@ static const struct acpi_device_id amba_id_list[] = { static void amba_register_dummy_clk(void) { - static struct clk *amba_dummy_clk; - - /* If clock already registered */ - if (amba_dummy_clk) - return; + struct clk *amba_dummy_clk; amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0); clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL); diff --git a/drivers/acpi/arm64/cpuidle.c b/drivers/acpi/arm64/cpuidle.c new file mode 100644 index 000000000000..801f9c450142 --- /dev/null +++ b/drivers/acpi/arm64/cpuidle.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM64 CPU idle arch support + * + * Copyright (C) 2014 ARM Ltd. + * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + */ + +#include <linux/acpi.h> +#include <linux/cpuidle.h> +#include <linux/cpu_pm.h> +#include <linux/psci.h> +#include <acpi/processor.h> + +#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) + +static int psci_acpi_cpu_init_idle(unsigned int cpu) +{ + int i, count; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + count = pr->power.count - 1; + if (count <= 0) + return -ENODEV; + + for (i = 0; i < count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i + 1]; + /* + * Only bits[31:0] represent a PSCI power_state while + * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification + */ + state = lpi->address; + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + return -EINVAL; + } + } + + return 0; +} + +int acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return psci_acpi_cpu_init_idle(cpu); +} + +__cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) +{ + u32 state = lpi->address; + + if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter, + lpi->index, state); + else + return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, + lpi->index, state); +} diff --git a/drivers/acpi/arm64/ffh.c b/drivers/acpi/arm64/ffh.c new file mode 100644 index 000000000000..877edc6557e9 --- /dev/null +++ b/drivers/acpi/arm64/ffh.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/acpi.h> +#include <linux/arm-smccc.h> +#include <linux/slab.h> + +/* + * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as + * specified in https://developer.arm.com/docs/den0048/latest + */ +struct acpi_ffh_data { + struct acpi_ffh_info info; + void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *args, + struct arm_smccc_quirk *res); + void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); +}; + +int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt) +{ + enum arm_smccc_conduit conduit; + struct acpi_ffh_data *ffh_ctxt; + + if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) + return -EOPNOTSUPP; + + conduit = arm_smccc_1_1_get_conduit(); + if (conduit == SMCCC_CONDUIT_NONE) { + pr_err("%s: invalid SMCCC conduit\n", __func__); + return -EOPNOTSUPP; + } + + ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL); + if (!ffh_ctxt) + return -ENOMEM; + + if (conduit == SMCCC_CONDUIT_SMC) { + ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc; + ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc; + } else { + ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc; + ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc; + } + + memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info)); + + *region_ctxt = ffh_ctxt; + return AE_OK; +} + +static bool acpi_ffh_smccc_owner_allowed(u32 fid) +{ + int owner = ARM_SMCCC_OWNER_NUM(fid); + + if (owner == ARM_SMCCC_OWNER_STANDARD || + owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM) + return true; + + return false; +} + +int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context) +{ + int ret = 0; + struct acpi_ffh_data *ffh_ctxt = region_context; + + if (ffh_ctxt->info.offset == 0) { + /* SMC/HVC 32bit call */ + struct arm_smccc_res res; + u32 a[8] = { 0 }, *ptr = (u32 *)value; + + if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) || + !acpi_ffh_smccc_owner_allowed(*ptr) || + ffh_ctxt->info.length > 32) { + ret = AE_ERROR; + } else { + int idx, len = ffh_ctxt->info.length >> 2; + + for (idx = 0; idx < len; idx++) + a[idx] = *(ptr + idx); + + ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4], + a[5], a[6], a[7], &res, NULL); + memcpy(value, &res, sizeof(res)); + } + + } else if (ffh_ctxt->info.offset == 1) { + /* SMC/HVC 64bit call */ + struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value; + + if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) || + !acpi_ffh_smccc_owner_allowed(r->a0) || + ffh_ctxt->info.length > sizeof(*r)) { + ret = AE_ERROR; + } else { + ffh_ctxt->invoke_ffh64_fn(r, r); + memcpy(value, r, ffh_ctxt->info.length); + } + } else { + ret = AE_ERROR; + } + + return ret; +} diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b379401ff1c2..da3a879d638a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -38,9 +38,10 @@ /* Battery power unit: 0 means mW, 1 means mA */ #define ACPI_BATTERY_POWER_UNIT_MA 1 -#define ACPI_BATTERY_STATE_DISCHARGING 0x1 -#define ACPI_BATTERY_STATE_CHARGING 0x2 -#define ACPI_BATTERY_STATE_CRITICAL 0x4 +#define ACPI_BATTERY_STATE_DISCHARGING 0x1 +#define ACPI_BATTERY_STATE_CHARGING 0x2 +#define ACPI_BATTERY_STATE_CRITICAL 0x4 +#define ACPI_BATTERY_STATE_CHARGE_LIMITING 0x8 #define MAX_STRING_LENGTH 64 @@ -155,7 +156,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery); static int acpi_battery_is_charged(struct acpi_battery *battery) { - /* charging, discharging or critical low */ + /* charging, discharging, critical low or charge limited */ if (battery->state != 0) return 0; @@ -215,6 +216,8 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = acpi_battery_handle_discharging(battery); else if (battery->state & ACPI_BATTERY_STATE_CHARGING) val->intval = POWER_SUPPLY_STATUS_CHARGING; + else if (battery->state & ACPI_BATTERY_STATE_CHARGE_LIMITING) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else if (acpi_battery_is_charged(battery)) val->intval = POWER_SUPPLY_STATUS_FULL; else @@ -308,7 +311,7 @@ static int acpi_battery_get_property(struct power_supply *psy, return ret; } -static enum power_supply_property charge_battery_props[] = { +static const enum power_supply_property charge_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -326,7 +329,7 @@ static enum power_supply_property charge_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; -static enum power_supply_property charge_battery_full_cap_broken_props[] = { +static const enum power_supply_property charge_battery_full_cap_broken_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -340,7 +343,7 @@ static enum power_supply_property charge_battery_full_cap_broken_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; -static enum power_supply_property energy_battery_props[] = { +static const enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -358,7 +361,7 @@ static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; -static enum power_supply_property energy_battery_full_cap_broken_props[] = { +static const enum power_supply_property energy_battery_full_cap_broken_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -661,7 +664,7 @@ static ssize_t acpi_battery_alarm_show(struct device *dev, { struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); - return sprintf(buf, "%d\n", battery->alarm * 1000); + return sysfs_emit(buf, "%d\n", battery->alarm * 1000); } static ssize_t acpi_battery_alarm_store(struct device *dev, @@ -678,12 +681,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; } -static const struct device_attribute alarm_attr = { +static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, }; +static struct attribute *acpi_battery_attrs[] = { + &alarm_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(acpi_battery); + /* * The Battery Hooking API * @@ -756,6 +765,21 @@ end: } EXPORT_SYMBOL_GPL(battery_hook_register); +static void devm_battery_hook_unregister(void *data) +{ + struct acpi_battery_hook *hook = data; + + battery_hook_unregister(hook); +} + +int devm_battery_hook_register(struct device *dev, struct acpi_battery_hook *hook) +{ + battery_hook_register(hook); + + return devm_add_action_or_reset(dev, devm_battery_hook_unregister, hook); +} +EXPORT_SYMBOL_GPL(devm_battery_hook_register); + /* * This function gets called right after the battery sysfs * attributes have been added, so that the drivers that @@ -823,7 +847,10 @@ static void __exit battery_hook_exit(void) static int sysfs_add_battery(struct acpi_battery *battery) { - struct power_supply_config psy_cfg = { .drv_data = battery, }; + struct power_supply_config psy_cfg = { + .drv_data = battery, + .attr_grp = acpi_battery_groups, + }; bool full_cap_broken = false; if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && @@ -868,7 +895,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) return result; } battery_hook_add_battery(battery); - return device_create_file(&battery->bat->dev, &alarm_attr); + return 0; } static void sysfs_remove_battery(struct acpi_battery *battery) @@ -879,7 +906,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery) return; } battery_hook_remove_battery(battery); - device_remove_file(&battery->bat->dev, &alarm_attr); power_supply_unregister(battery->bat); battery->bat = NULL; mutex_unlock(&battery->sysfs_lock); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 787eca838410..bdbd60ae8897 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -329,6 +329,8 @@ static void acpi_bus_osc_negotiate_platform_control(void) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_THERMAL)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_BATTERY)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 1d857978f5f4..dd3d3082c8c7 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -160,6 +160,7 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); +show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); @@ -196,6 +197,7 @@ static struct attribute *cppc_attrs[] = { &highest_perf.attr, &lowest_perf.attr, &lowest_nonlinear_perf.attr, + &guaranteed_perf.attr, &nominal_perf.attr, &nominal_freq.attr, &lowest_freq.attr, @@ -1837,7 +1839,7 @@ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { u16 val = (u16)get_unaligned((const u16 *) (dmi_data + DMI_PROCESSOR_MAX_SPEED)); - *mhz = val > *mhz ? val : *mhz; + *mhz = umax(val, *mhz); } } diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h index f89d19c922dc..db25a3898af7 100644 --- a/drivers/acpi/fan.h +++ b/drivers/acpi/fan.h @@ -10,6 +10,8 @@ #ifndef _ACPI_FAN_H_ #define _ACPI_FAN_H_ +#include <linux/kconfig.h> + #define ACPI_FAN_DEVICE_IDS \ {"INT3404", }, /* Fan */ \ {"INTC1044", }, /* Fan for Tiger Lake generation */ \ @@ -57,4 +59,11 @@ struct acpi_fan { int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst); int acpi_fan_create_attributes(struct acpi_device *device); void acpi_fan_delete_attributes(struct acpi_device *device); + +#if IS_REACHABLE(CONFIG_HWMON) +int devm_acpi_fan_create_hwmon(struct acpi_device *device); +#else +static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; }; +#endif + #endif diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c index ff72e4ef8738..7cea4495f19b 100644 --- a/drivers/acpi/fan_core.c +++ b/drivers/acpi/fan_core.c @@ -336,6 +336,10 @@ static int acpi_fan_probe(struct platform_device *pdev) if (result) return result; + result = devm_acpi_fan_create_hwmon(device); + if (result) + return result; + result = acpi_fan_create_attributes(device); if (result) return result; diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c new file mode 100644 index 000000000000..bd0d31a398fa --- /dev/null +++ b/drivers/acpi/fan_hwmon.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * hwmon interface for the ACPI Fan driver. + * + * Copyright (C) 2024 Armin Wolf <W_Armin@gmx.de> + */ + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/limits.h> +#include <linux/types.h> +#include <linux/units.h> + +#include "fan.h" + +/* Returned when the ACPI fan does not support speed reporting */ +#define FAN_SPEED_UNAVAILABLE U32_MAX +#define FAN_POWER_UNAVAILABLE U32_MAX + +static struct acpi_fan_fps *acpi_fan_get_current_fps(struct acpi_fan *fan, u64 control) +{ + unsigned int i; + + for (i = 0; i < fan->fps_count; i++) { + if (fan->fps[i].control == control) + return &fan->fps[i]; + } + + return NULL; +} + +static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct acpi_fan *fan = drvdata; + unsigned int i; + + switch (type) { + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + return 0444; + case hwmon_fan_target: + /* + * When in fine grain control mode, not every fan control value + * has an associated fan performance state. + */ + if (fan->fif.fine_grain_ctrl) + return 0; + + return 0444; + default: + return 0; + } + case hwmon_power: + switch (attr) { + case hwmon_power_input: + /* + * When in fine grain control mode, not every fan control value + * has an associated fan performance state. + */ + if (fan->fif.fine_grain_ctrl) + return 0; + + /* + * When all fan performance states contain no valid power data, + * when the associated attribute should not be created. + */ + for (i = 0; i < fan->fps_count; i++) { + if (fan->fps[i].power != FAN_POWER_UNAVAILABLE) + return 0444; + } + + return 0; + default: + return 0; + } + default: + return 0; + } +} + +static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct acpi_device *adev = to_acpi_device(dev->parent); + struct acpi_fan *fan = dev_get_drvdata(dev); + struct acpi_fan_fps *fps; + struct acpi_fan_fst fst; + int ret; + + ret = acpi_fan_get_fst(adev, &fst); + if (ret < 0) + return ret; + + switch (type) { + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + if (fst.speed == FAN_SPEED_UNAVAILABLE) + return -ENODEV; + + if (fst.speed > LONG_MAX) + return -EOVERFLOW; + + *val = fst.speed; + return 0; + case hwmon_fan_target: + fps = acpi_fan_get_current_fps(fan, fst.control); + if (!fps) + return -EIO; + + if (fps->speed > LONG_MAX) + return -EOVERFLOW; + + *val = fps->speed; + return 0; + default: + return -EOPNOTSUPP; + } + case hwmon_power: + switch (attr) { + case hwmon_power_input: + fps = acpi_fan_get_current_fps(fan, fst.control); + if (!fps) + return -EIO; + + if (fps->power == FAN_POWER_UNAVAILABLE) + return -ENODEV; + + if (fps->power > LONG_MAX / MICROWATT_PER_MILLIWATT) + return -EOVERFLOW; + + *val = fps->power * MICROWATT_PER_MILLIWATT; + return 0; + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_ops acpi_fan_hwmon_ops = { + .is_visible = acpi_fan_hwmon_is_visible, + .read = acpi_fan_hwmon_read, +}; + +static const struct hwmon_channel_info * const acpi_fan_hwmon_info[] = { + HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_TARGET), + HWMON_CHANNEL_INFO(power, HWMON_P_INPUT), + NULL +}; + +static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = { + .ops = &acpi_fan_hwmon_ops, + .info = acpi_fan_hwmon_info, +}; + +int devm_acpi_fan_create_hwmon(struct acpi_device *device) +{ + struct acpi_fan *fan = acpi_driver_data(device); + struct device *hdev; + + hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan, + &acpi_fan_hwmon_chip_info, NULL); + return PTR_ERR_OR_ZERO(hdev); +} diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c index 2c8ccc91ebe6..febd9e51350b 100644 --- a/drivers/acpi/numa/hmat.c +++ b/drivers/acpi/numa/hmat.c @@ -408,7 +408,7 @@ static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_px if (target && target->processor_pxm == init_pxm) { hmat_update_target_access(target, type, value, ACCESS_COORDINATE_LOCAL); - /* If the node has a CPU, update access 1 */ + /* If the node has a CPU, update access ACCESS_COORDINATE_CPU */ if (node_state(pxm_to_node(init_pxm), N_CPU)) hmat_update_target_access(target, type, value, ACCESS_COORDINATE_CPU); @@ -948,7 +948,7 @@ static int hmat_set_default_dram_perf(void) target = find_mem_target(pxm); if (!target) continue; - attrs = &target->coord[1]; + attrs = &target->coord[ACCESS_COORDINATE_CPU]; rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT"); if (rc) return rc; @@ -975,7 +975,7 @@ static int hmat_calculate_adistance(struct notifier_block *self, hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU); mutex_unlock(&target_lock); - perf = &target->coord[1]; + perf = &target->coord[ACCESS_COORDINATE_CPU]; if (mt_perf_to_adistance(perf, adist)) return NOTIFY_OK; diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index 4a9704730224..d2f7fd7743a1 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -217,4 +217,5 @@ int platform_profile_remove(void) EXPORT_SYMBOL_GPL(platform_profile_remove); MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>"); +MODULE_DESCRIPTION("ACPI platform profile sysfs interface"); MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c index f20dbda1a831..134e9ca8eaa2 100644 --- a/drivers/acpi/pmic/intel_pmic.c +++ b/drivers/acpi/pmic/intel_pmic.c @@ -31,7 +31,7 @@ struct intel_pmic_opregion { static struct intel_pmic_opregion *intel_pmic_opregion; -static int pmic_get_reg_bit(int address, struct pmic_table *table, +static int pmic_get_reg_bit(int address, const struct pmic_table *table, int count, int *reg, int *bit) { int i; diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h index d956b03a6ca0..006f0780ffab 100644 --- a/drivers/acpi/pmic/intel_pmic.h +++ b/drivers/acpi/pmic/intel_pmic.h @@ -21,9 +21,9 @@ struct intel_pmic_opregion_data { u32 reg_address, u32 value, u32 mask); int (*lpat_raw_to_temp)(struct acpi_lpat_conversion_table *lpat_table, int raw); - struct pmic_table *power_table; + const struct pmic_table *power_table; int power_table_count; - struct pmic_table *thermal_table; + const struct pmic_table *thermal_table; int thermal_table_count; /* For generic exec_mipi_pmic_seq_element handling */ int pmic_i2c_address; diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c index e247615189fa..c332afbf82bd 100644 --- a/drivers/acpi/pmic/intel_pmic_bxtwc.c +++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c @@ -24,7 +24,7 @@ #define VSWITCH1_OUTPUT BIT(4) #define VUSBPHY_CHARGE BIT(1) -static struct pmic_table power_table[] = { +static const struct pmic_table power_table[] = { { .address = 0x0, .reg = 0x63, @@ -177,7 +177,7 @@ static struct pmic_table power_table[] = { } /* MOFF -> MODEMCTRL Bit 0 */ }; -static struct pmic_table thermal_table[] = { +static const struct pmic_table thermal_table[] = { { .address = 0x00, .reg = 0x4F39 diff --git a/drivers/acpi/pmic/intel_pmic_bytcrc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c index 2b09f8da5400..b4c21a75294a 100644 --- a/drivers/acpi/pmic/intel_pmic_bytcrc.c +++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c @@ -16,7 +16,7 @@ #define PMIC_A0LOCK_REG 0xc5 -static struct pmic_table power_table[] = { +static const struct pmic_table power_table[] = { /* { .address = 0x00, .reg = ??, @@ -134,7 +134,7 @@ static struct pmic_table power_table[] = { }, /* V105 -> V1P05S, L2 SRAM */ }; -static struct pmic_table thermal_table[] = { +static const struct pmic_table thermal_table[] = { { .address = 0x00, .reg = 0x75 diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c index c84ef3d15181..ecb36fbc1e7f 100644 --- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c +++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c @@ -8,18 +8,22 @@ */ #include <linux/acpi.h> +#include <linux/bits.h> #include <linux/init.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/platform_device.h> +#include <asm/byteorder.h> #include "intel_pmic.h" /* registers stored in 16bit BE (high:low, total 10bit) */ +#define PMIC_REG_MASK GENMASK(9, 0) + #define CHTDC_TI_VBAT 0x54 #define CHTDC_TI_DIETEMP 0x56 #define CHTDC_TI_BPTHERM 0x58 #define CHTDC_TI_GPADC 0x5a -static struct pmic_table chtdc_ti_power_table[] = { +static const struct pmic_table chtdc_ti_power_table[] = { { .address = 0x00, .reg = 0x41 }, /* LDO1 */ { .address = 0x04, .reg = 0x42 }, /* LDO2 */ { .address = 0x08, .reg = 0x43 }, /* LDO3 */ @@ -35,7 +39,7 @@ static struct pmic_table chtdc_ti_power_table[] = { { .address = 0x30, .reg = 0x4e }, /* LD14 */ }; -static struct pmic_table chtdc_ti_thermal_table[] = { +static const struct pmic_table chtdc_ti_thermal_table[] = { { .address = 0x00, .reg = CHTDC_TI_GPADC @@ -73,7 +77,7 @@ static int chtdc_ti_pmic_get_power(struct regmap *regmap, int reg, int bit, if (regmap_read(regmap, reg, &data)) return -EIO; - *value = data & 1; + *value = data & BIT(0); return 0; } @@ -85,13 +89,12 @@ static int chtdc_ti_pmic_update_power(struct regmap *regmap, int reg, int bit, static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg) { - u8 buf[2]; + __be16 buf; - if (regmap_bulk_read(regmap, reg, buf, 2)) + if (regmap_bulk_read(regmap, reg, &buf, sizeof(buf))) return -EIO; - /* stored in big-endian */ - return ((buf[0] & 0x03) << 8) | buf[1]; + return be16_to_cpu(buf) & PMIC_REG_MASK; } static const struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = { diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c index f2c42f4c79ca..81caede51ca2 100644 --- a/drivers/acpi/pmic/intel_pmic_chtwc.c +++ b/drivers/acpi/pmic/intel_pmic_chtwc.c @@ -70,7 +70,7 @@ * "regulator: whiskey_cove: implements Whiskey Cove pmic VRF support" * https://github.com/intel-aero/meta-intel-aero/blob/master/recipes-kernel/linux/linux-yocto/0019-regulator-whiskey_cove-implements-WhiskeyCove-pmic-V.patch */ -static struct pmic_table power_table[] = { +static const struct pmic_table power_table[] = { { .address = 0x0, .reg = CHT_WC_V1P8A_CTRL, @@ -236,11 +236,12 @@ static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap, u32 reg_address, u32 value, u32 mask) { + struct device *dev = regmap_get_device(regmap); u32 address; if (i2c_client_address > 0xff || reg_address > 0xff) { - pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n", - __func__, i2c_client_address, reg_address); + dev_warn(dev, "warning addresses too big client 0x%x reg 0x%x\n", + i2c_client_address, reg_address); return -ERANGE; } diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index 61bbe4c24d87..49bda5e0c8aa 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -26,7 +26,7 @@ #define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) #define AXP288_ADC_TS_CURRENT_ON (3 << 0) -static struct pmic_table power_table[] = { +static const struct pmic_table power_table[] = { { .address = 0x00, .reg = 0x13, @@ -129,7 +129,7 @@ static struct pmic_table power_table[] = { }; /* TMP0 - TMP5 are the same, all from GPADC */ -static struct pmic_table thermal_table[] = { +static const struct pmic_table thermal_table[] = { { .address = 0x00, .reg = XPOWER_GPADC_LOW @@ -255,7 +255,7 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) if (ret) return ret; - ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); + ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, sizeof(buf)); if (ret == 0) ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); @@ -274,11 +274,12 @@ static int intel_xpower_exec_mipi_pmic_seq_element(struct regmap *regmap, u16 i2c_address, u32 reg_address, u32 value, u32 mask) { + struct device *dev = regmap_get_device(regmap); int ret; if (i2c_address != 0x34) { - pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n", - __func__, i2c_address, reg_address, value, mask); + dev_err(dev, "Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n", + i2c_address, reg_address, value, mask); return -ENXIO; } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index b203cfe28550..b04b684f3190 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -90,7 +90,8 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry, struct acpi_madt_generic_interrupt *gicc = container_of(entry, struct acpi_madt_generic_interrupt, header); - if (!acpi_gicc_is_usable(gicc)) + if (!(gicc->flags & + (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) return -ENODEV; /* device_declaration means Device object in DSDT, in the diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 67db60eda370..cb52dd000b95 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -33,7 +33,6 @@ MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Processor Driver"); MODULE_LICENSE("GPL"); -static int acpi_processor_start(struct device *dev); static int acpi_processor_stop(struct device *dev); static const struct acpi_device_id processor_device_ids[] = { @@ -47,7 +46,6 @@ static struct device_driver acpi_processor_driver = { .name = "processor", .bus = &cpu_subsys, .acpi_match_table = processor_device_ids, - .probe = acpi_processor_start, .remove = acpi_processor_stop, }; @@ -115,12 +113,9 @@ static int acpi_soft_cpu_online(unsigned int cpu) * CPU got physically hotplugged and onlined for the first time: * Initialize missing things. */ - if (pr->flags.need_hotplug_init) { + if (!pr->flags.previously_online) { int ret; - pr_info("Will online and init hotplugged CPU: %d\n", - pr->id); - pr->flags.need_hotplug_init = 0; ret = __acpi_processor_start(device); WARN(ret, "Failed to start CPU: %d\n", pr->id); } else { @@ -167,9 +162,6 @@ static int __acpi_processor_start(struct acpi_device *device) if (!pr) return -ENODEV; - if (pr->flags.need_hotplug_init) - return 0; - result = acpi_cppc_processor_probe(pr); if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS)) dev_dbg(&device->dev, "CPPC data invalid or not present\n"); @@ -185,32 +177,21 @@ static int __acpi_processor_start(struct acpi_device *device) status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_processor_notify, device); - if (ACPI_SUCCESS(status)) - return 0; + if (!ACPI_SUCCESS(status)) { + result = -ENODEV; + goto err_thermal_exit; + } + pr->flags.previously_online = 1; - result = -ENODEV; - acpi_processor_thermal_exit(pr, device); + return 0; +err_thermal_exit: + acpi_processor_thermal_exit(pr, device); err_power_exit: acpi_processor_power_exit(pr); return result; } -static int acpi_processor_start(struct device *dev) -{ - struct acpi_device *device = ACPI_COMPANION(dev); - int ret; - - if (!device) - return -ENODEV; - - /* Protect against concurrent CPU hotplug operations */ - cpu_hotplug_disable(); - ret = __acpi_processor_start(device); - cpu_hotplug_enable(); - return ret; -} - static int acpi_processor_stop(struct device *dev) { struct acpi_device *device = ACPI_COMPANION(dev); @@ -279,9 +260,9 @@ static int __init acpi_processor_driver_init(void) if (result < 0) return result; - result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "acpi/cpu-drv:online", - acpi_soft_cpu_online, NULL); + result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "acpi/cpu-drv:online", + acpi_soft_cpu_online, NULL); if (result < 0) goto err; hp_online = result; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index bd6a7857ce05..831fa4a12159 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -16,7 +16,6 @@ #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/sched.h> /* need_resched() */ -#include <linux/sort.h> #include <linux/tick.h> #include <linux/cpuidle.h> #include <linux/cpu.h> @@ -386,25 +385,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); } -static int acpi_cst_latency_cmp(const void *a, const void *b) +static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length) { - const struct acpi_processor_cx *x = a, *y = b; + int i, j, k; - if (!(x->valid && y->valid)) - return 0; - if (x->latency > y->latency) - return 1; - if (x->latency < y->latency) - return -1; - return 0; -} -static void acpi_cst_latency_swap(void *a, void *b, int n) -{ - struct acpi_processor_cx *x = a, *y = b; + for (i = 1; i < length; i++) { + if (!states[i].valid) + continue; - if (!(x->valid && y->valid)) - return; - swap(x->latency, y->latency); + for (j = i - 1, k = i; j >= 0; j--) { + if (!states[j].valid) + continue; + + if (states[j].latency > states[k].latency) + swap(states[j].latency, states[k].latency); + + k = j; + } + } } static int acpi_processor_power_verify(struct acpi_processor *pr) @@ -449,10 +447,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) if (buggy_latency) { pr_notice("FW issue: working around C-state latencies out of order\n"); - sort(&pr->power.states[1], max_cstate, - sizeof(struct acpi_processor_cx), - acpi_cst_latency_cmp, - acpi_cst_latency_swap); + acpi_cst_latency_sort(&pr->power.states[1], max_cstate); } lapic_timer_propagate_broadcast(pr); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index b5bf8b81a050..df5d5a554b38 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -525,6 +525,20 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { }, }, { + /* Asus Vivobook Pro N6506MU */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506MU"), + }, + }, + { + /* Asus Vivobook Pro N6506MJ */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506MJ"), + }, + }, + { /* LG Electronics 17U70P */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index dc8164b182dc..7a0914055fca 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -77,7 +77,6 @@ struct acpi_battery { u16 spec; u8 id; u8 present:1; - u8 have_sysfs_alarm:1; }; #define to_acpi_battery(x) power_supply_get_drvdata(x) @@ -241,11 +240,11 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy, return 0; } -static enum power_supply_property sbs_ac_props[] = { +static const enum power_supply_property sbs_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; -static enum power_supply_property sbs_charge_battery_props[] = { +static const enum power_supply_property sbs_charge_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -263,7 +262,7 @@ static enum power_supply_property sbs_charge_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property sbs_energy_battery_props[] = { +static const enum power_supply_property sbs_energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; } -static const struct device_attribute alarm_attr = { +static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, }; +static struct attribute *acpi_battery_attrs[] = { + &alarm_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(acpi_battery); + /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -518,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery) static int acpi_battery_add(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; - struct power_supply_config psy_cfg = { .drv_data = battery, }; + struct power_supply_config psy_cfg = { + .drv_data = battery, + .attr_grp = acpi_battery_groups, + }; int result; battery->id = id; @@ -548,10 +556,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) goto end; } - result = device_create_file(&battery->bat->dev, &alarm_attr); - if (result) - goto end; - battery->have_sysfs_alarm = 1; end: pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n", ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), @@ -563,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; - if (battery->bat) { - if (battery->have_sysfs_alarm) - device_remove_file(&battery->bat->dev, &alarm_attr); + if (battery->bat) power_supply_unregister(battery->bat); - } } static int acpi_charger_add(struct acpi_sbs *sbs) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 503773707e01..0aa20623525a 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -243,13 +243,17 @@ static int acpi_scan_try_to_offline(struct acpi_device *device) return 0; } -static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check) +#define ACPI_SCAN_CHECK_FLAG_STATUS BIT(0) +#define ACPI_SCAN_CHECK_FLAG_EJECT BIT(1) + +static int acpi_scan_check_and_detach(struct acpi_device *adev, void *p) { struct acpi_scan_handler *handler = adev->handler; + uintptr_t flags = (uintptr_t)p; - acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, check); + acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, p); - if (check) { + if (flags & ACPI_SCAN_CHECK_FLAG_STATUS) { acpi_bus_get_status(adev); /* * Skip devices that are still there and take the enabled @@ -269,8 +273,6 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check) if (handler) { if (handler->detach) handler->detach(adev); - - adev->handler = NULL; } else { device_release_driver(&adev->dev); } @@ -280,6 +282,28 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check) */ acpi_device_set_power(adev, ACPI_STATE_D3_COLD); adev->flags.initialized = false; + + /* For eject this is deferred to acpi_bus_post_eject() */ + if (!(flags & ACPI_SCAN_CHECK_FLAG_EJECT)) { + adev->handler = NULL; + acpi_device_clear_enumerated(adev); + } + return 0; +} + +static int acpi_bus_post_eject(struct acpi_device *adev, void *not_used) +{ + struct acpi_scan_handler *handler = adev->handler; + + acpi_dev_for_each_child_reverse(adev, acpi_bus_post_eject, NULL); + + if (handler) { + if (handler->post_eject) + handler->post_eject(adev); + + adev->handler = NULL; + } + acpi_device_clear_enumerated(adev); return 0; @@ -287,7 +311,9 @@ static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check) static void acpi_scan_check_subtree(struct acpi_device *adev) { - acpi_scan_check_and_detach(adev, (void *)true); + uintptr_t flags = ACPI_SCAN_CHECK_FLAG_STATUS; + + acpi_scan_check_and_detach(adev, (void *)flags); } static int acpi_scan_hot_remove(struct acpi_device *device) @@ -295,6 +321,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device) acpi_handle handle = device->handle; unsigned long long sta; acpi_status status; + uintptr_t flags = ACPI_SCAN_CHECK_FLAG_EJECT; if (device->handler && device->handler->hotplug.demand_offline) { if (!acpi_scan_is_offline(device, true)) @@ -307,7 +334,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device) acpi_handle_debug(handle, "Ejecting\n"); - acpi_bus_trim(device); + acpi_scan_check_and_detach(device, (void *)flags); acpi_evaluate_lck(handle, 0); /* @@ -330,6 +357,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device) } else if (sta & ACPI_STA_DEVICE_ENABLED) { acpi_handle_warn(handle, "Eject incomplete - status 0x%llx\n", sta); + } else { + acpi_bus_post_eject(device, NULL); } return 0; @@ -2596,7 +2625,9 @@ EXPORT_SYMBOL(acpi_bus_scan); */ void acpi_bus_trim(struct acpi_device *adev) { - acpi_scan_check_and_detach(adev, NULL); + uintptr_t flags = 0; + + acpi_scan_check_and_detach(adev, (void *)flags); } EXPORT_SYMBOL_GPL(acpi_bus_trim); diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index b976e5fc3fbc..9e1b01c35070 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -198,6 +198,20 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) } break; + case ACPI_MADT_TYPE_MULTIPROC_WAKEUP: + { + struct acpi_madt_multiproc_wakeup *p = + (struct acpi_madt_multiproc_wakeup *)header; + u64 reset_vector = 0; + + if (p->version >= ACPI_MADT_MP_WAKEUP_VERSION_V1) + reset_vector = p->reset_vector; + + pr_debug("MP Wakeup (version[%d], mailbox[%#llx], reset[%#llx])\n", + p->version, p->mailbox_address, reset_vector); + } + break; + case ACPI_MADT_TYPE_CORE_PIC: { struct acpi_madt_core_pic *p = (struct acpi_madt_core_pic *)header; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 2cc3821b2b16..c11cbe5b6eaa 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -540,6 +540,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, { + .callback = video_detect_force_native, + /* Apple MacBook Air 9,1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"), + }, + }, + { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, /* Apple MacBook Pro 12,1 */ @@ -550,6 +558,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_native, + /* Apple MacBook Pro 16,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"), + }, + }, + { + .callback = video_detect_force_native, /* Dell Inspiron N4010 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c index 148e29c2c526..258440b899a9 100644 --- a/drivers/acpi/x86/lpss.c +++ b/drivers/acpi/x86/lpss.c @@ -338,8 +338,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = { }; static const struct x86_cpu_id lpss_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, NULL), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, NULL), {} }; diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 2fe0934dcd64..ab2b5fa83e1f 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -45,37 +45,37 @@ struct override_status_id { unsigned long long status; }; -#define ENTRY(status, hid, uid, path, cpu_model, dmi...) { \ +#define ENTRY(status, hid, uid, path, cpu_vfm, dmi...) { \ { { hid, }, {} }, \ - { X86_MATCH_INTEL_FAM6_MODEL(cpu_model, NULL), {} }, \ + { X86_MATCH_VFM(cpu_vfm, NULL), {} }, \ { { .matches = dmi }, {} }, \ uid, \ path, \ status, \ } -#define PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ - ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_model, dmi) +#define PRESENT_ENTRY_HID(hid, uid, cpu_vfm, dmi...) \ + ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_vfm, dmi) -#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ - ENTRY(0, hid, uid, NULL, cpu_model, dmi) +#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_vfm, dmi...) \ + ENTRY(0, hid, uid, NULL, cpu_vfm, dmi) -#define PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ - ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_model, dmi) +#define PRESENT_ENTRY_PATH(path, cpu_vfm, dmi...) \ + ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_vfm, dmi) -#define NOT_PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ - ENTRY(0, "", NULL, path, cpu_model, dmi) +#define NOT_PRESENT_ENTRY_PATH(path, cpu_vfm, dmi...) \ + ENTRY(0, "", NULL, path, cpu_vfm, dmi) static const struct override_status_id override_status_ids[] = { /* * Bay / Cherry Trail PWM directly poked by GPU driver in win10, * but Linux uses a separate PWM driver, harmless if not used. */ - PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}), - PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}), + PRESENT_ENTRY_HID("80860F09", "1", INTEL_ATOM_SILVERMONT, {}), + PRESENT_ENTRY_HID("80862288", "1", INTEL_ATOM_AIRMONT, {}), /* The Xiaomi Mi Pad 2 uses PWM2 for touchkeys backlight control */ - PRESENT_ENTRY_HID("80862289", "2", ATOM_AIRMONT, { + PRESENT_ENTRY_HID("80862289", "2", INTEL_ATOM_AIRMONT, { DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), }), @@ -84,18 +84,18 @@ static const struct override_status_id override_status_ids[] = { * The INT0002 device is necessary to clear wakeup interrupt sources * on Cherry Trail devices, without it we get nobody cared IRQ msgs. */ - PRESENT_ENTRY_HID("INT0002", "1", ATOM_AIRMONT, {}), + PRESENT_ENTRY_HID("INT0002", "1", INTEL_ATOM_AIRMONT, {}), /* * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides * the touchscreen ACPI device until a certain time * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed * *and* _STA has been called at least 3 times since. */ - PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { + PRESENT_ENTRY_HID("SYNA7500", "1", INTEL_HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), }), - PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { + PRESENT_ENTRY_HID("SYNA7500", "1", INTEL_HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), }), @@ -104,7 +104,7 @@ static const struct override_status_id override_status_ids[] = { * The Dell XPS 15 9550 has a SMO8110 accelerometer / * HDD freefall sensor which is wrongly marked as not present. */ - PRESENT_ENTRY_HID("SMO8810", "1", SKYLAKE, { + PRESENT_ENTRY_HID("SMO8810", "1", INTEL_SKYLAKE, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9550"), }), @@ -121,19 +121,19 @@ static const struct override_status_id override_status_ids[] = { * was copy-pasted from the GPD win, so it has a disabled KIOX000A * node which we should not enable, thus we also check the BIOS date. */ - PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { + PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "02/21/2017") }), - PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { + PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "03/20/2017") }), - PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { + PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), @@ -146,7 +146,7 @@ static const struct override_status_id override_status_ids[] = { * method sets a GPIO causing the PCI wifi card to turn off. * See above remark about uniqueness of the DMI match. */ - NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", ATOM_AIRMONT, { + NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", INTEL_ATOM_AIRMONT, { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), @@ -158,7 +158,7 @@ static const struct override_status_id override_status_ids[] = { * as both ACCL0001 and MAGN0001. As we can only ever register an * i2c client for one of them, ignore MAGN0001. */ - NOT_PRESENT_ENTRY_HID("MAGN0001", "1", ATOM_SILVERMONT, { + NOT_PRESENT_ENTRY_HID("MAGN0001", "1", INTEL_ATOM_SILVERMONT, { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_FAMILY, "YOGATablet2"), }), diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index bb4d30d377ae..67ce756f8dfc 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1024,7 +1024,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain); int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, struct ata_device *dev) { - struct request_queue *q = sdev->request_queue; int depth = 1; if (!ata_id_has_unload(dev->id)) @@ -1038,7 +1037,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, sdev->sector_size = ATA_SECT_SIZE; /* set DMA padding */ - blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); + lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1; /* make room for appending the drain */ lim->max_segments--; diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 3cb455a32d92..1b85e8bf4ef9 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -816,7 +816,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev, /* OHare has issues with non cache aligned DMA on some chipsets */ if (priv->kind == controller_ohare) { lim->dma_alignment = 31; - blk_queue_update_dma_pad(sdev->request_queue, 31); + lim->dma_pad_mask = 31; /* Tell the world about it */ ata_dev_info(dev, "OHare alignment limits applied\n"); @@ -831,7 +831,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev, if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) { /* Allright these are bad, apply restrictions */ lim->dma_alignment = 15; - blk_queue_update_dma_pad(sdev->request_queue, 15); + lim->dma_pad_mask = 15; /* We enable MWI and hack cache line size directly here, this * is specific to this chipset and not normal values, we happen diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index c61ecb0c2ae2..b57326fd48d4 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -95,6 +95,7 @@ void unregister_cpu(struct cpu *cpu) { int logical_cpu = cpu->dev.id; + set_cpu_enabled(logical_cpu, false); unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); device_unregister(&cpu->dev); @@ -273,6 +274,13 @@ static ssize_t print_cpus_offline(struct device *dev, } static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); +static ssize_t print_cpus_enabled(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(cpu_enabled_mask)); +} +static DEVICE_ATTR(enabled, 0444, print_cpus_enabled, NULL); + static ssize_t print_cpus_isolated(struct device *dev, struct device_attribute *attr, char *buf) { @@ -413,6 +421,7 @@ int register_cpu(struct cpu *cpu, int num) register_cpu_under_node(num, cpu_to_node(num)); dev_pm_qos_expose_latency_limit(&cpu->dev, PM_QOS_RESUME_LATENCY_NO_CONSTRAINT); + set_cpu_enabled(num, true); return 0; } @@ -494,6 +503,7 @@ static struct attribute *cpu_root_attrs[] = { &cpu_attrs[2].attr.attr, &dev_attr_kernel_max.attr, &dev_attr_offline.attr, + &dev_attr_enabled.attr, &dev_attr_isolated.attr, #ifdef CONFIG_NO_HZ_FULL &dev_attr_nohz_full.attr, @@ -558,7 +568,7 @@ static void __init cpu_dev_register_generic(void) for_each_present_cpu(i) { ret = arch_register_cpu(i); - if (ret) + if (ret && ret != -EPROBE_DEFER) pr_warn("register_cpu %d failed (%d)\n", i, ret); } } diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c index e42433404854..f0df2da6d522 100644 --- a/drivers/base/regmap/regcache-maple.c +++ b/drivers/base/regmap/regcache-maple.c @@ -132,9 +132,9 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min, lower_index = mas.index; lower_last = min -1; - lower = kmemdup(entry, ((min - mas.index) * - sizeof(unsigned long)), - map->alloc_flags); + lower = kmemdup_array(entry, + min - mas.index, sizeof(*lower), + map->alloc_flags); if (!lower) { ret = -ENOMEM; goto out_unlocked; @@ -145,10 +145,9 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min, upper_index = max + 1; upper_last = mas.last; - upper = kmemdup(&entry[max - mas.index + 1], - ((mas.last - max) * - sizeof(unsigned long)), - map->alloc_flags); + upper = kmemdup_array(&entry[max - mas.index + 1], + mas.last - max, sizeof(*upper), + map->alloc_flags); if (!upper) { ret = -ENOMEM; goto out_unlocked; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 2e41cb12b8e2..7ec1ec605335 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -170,8 +170,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) * a copy of it. */ if (config->reg_defaults) { - tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * - sizeof(struct reg_default), GFP_KERNEL); + tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults, + sizeof(*map->reg_defaults), GFP_KERNEL); if (!tmp_buf) return -ENOMEM; map->reg_defaults = tmp_buf; @@ -407,7 +407,7 @@ out: * have gone out of sync, force writes of all the paging * registers. */ - rb_for_each(node, 0, &map->range_tree, rbtree_all) { + rb_for_each(node, NULL, &map->range_tree, rbtree_all) { struct regmap_range_node *this = rb_entry(node, struct regmap_range_node, node); diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c index b9f76bdf74a9..a561971c459c 100644 --- a/drivers/base/regmap/regmap-ac97.c +++ b/drivers/base/regmap/regmap-ac97.c @@ -86,4 +86,5 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97, } EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97); +MODULE_DESCRIPTION("Register map access API - AC'97 support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index a905e955bbfc..c9b39a02278e 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -397,4 +397,5 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, } EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c); +MODULE_DESCRIPTION("Register map access API - I2C support"); MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 45fd13ef13fc..d3ec1345b5b5 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -305,8 +305,8 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, unsigned int b) { const struct regmap_irq_chip *chip = data->chip; + const struct regmap_irq_sub_irq_map *subreg; struct regmap *map = data->map; - struct regmap_irq_sub_irq_map *subreg; unsigned int reg; int i, ret = 0; diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index be32cd4e84da..d790c7df5cac 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -145,9 +145,9 @@ static struct regmap *gen_regmap(struct kunit *test, const struct regmap_test_param *param = test->param_value; struct regmap_test_priv *priv = test->priv; unsigned int *buf; - struct regmap *ret; + struct regmap *ret = ERR_PTR(-ENOMEM); size_t size; - int i; + int i, error; struct reg_default *defaults; config->cache_type = param->cache; @@ -163,7 +163,7 @@ static struct regmap *gen_regmap(struct kunit *test, config->max_register += (BLOCK_TEST_SIZE * config->reg_stride); } - size = (config->max_register + 1) * sizeof(unsigned int); + size = array_size(config->max_register + 1, sizeof(*buf)); buf = kmalloc(size, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); @@ -172,15 +172,17 @@ static struct regmap *gen_regmap(struct kunit *test, *data = kzalloc(sizeof(**data), GFP_KERNEL); if (!(*data)) - return ERR_PTR(-ENOMEM); + goto out_free; (*data)->vals = buf; if (config->num_reg_defaults) { - defaults = kcalloc(config->num_reg_defaults, - sizeof(struct reg_default), - GFP_KERNEL); + defaults = kunit_kcalloc(test, + config->num_reg_defaults, + sizeof(struct reg_default), + GFP_KERNEL); if (!defaults) - return ERR_PTR(-ENOMEM); + goto out_free; + config->reg_defaults = defaults; for (i = 0; i < config->num_reg_defaults; i++) { @@ -190,12 +192,19 @@ static struct regmap *gen_regmap(struct kunit *test, } ret = regmap_init_ram(priv->dev, config, *data); - if (IS_ERR(ret)) { - kfree(buf); - kfree(*data); - } else { - kunit_add_action(test, regmap_exit_action, ret); - } + if (IS_ERR(ret)) + goto out_free; + + /* This calls regmap_exit() on failure, which frees buf and *data */ + error = kunit_add_action_or_reset(test, regmap_exit_action, ret); + if (error) + ret = ERR_PTR(error); + + return ret; + +out_free: + kfree(buf); + kfree(*data); return ret; } @@ -295,6 +304,77 @@ static void bulk_read(struct kunit *test) KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); } +static void multi_write(struct kunit *test) +{ + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + struct reg_sequence sequence[BLOCK_TEST_SIZE]; + unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + get_random_bytes(&val, sizeof(val)); + + /* + * Data written via the multi API can be read back with single + * reads. + */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) { + sequence[i].reg = i; + sequence[i].def = val[i]; + sequence[i].delay_us = 0; + } + KUNIT_EXPECT_EQ(test, 0, + regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i])); + + KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); + + /* If using a cache the cache satisfied the read */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); +} + +static void multi_read(struct kunit *test) +{ + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int regs[BLOCK_TEST_SIZE]; + unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + get_random_bytes(&val, sizeof(val)); + + /* Data written as single writes can be read via the multi API */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) { + regs[i] = i; + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i])); + } + KUNIT_EXPECT_EQ(test, 0, + regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE)); + KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); + + /* If using a cache the cache satisfied the read */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); +} + static void read_bypassed(struct kunit *test) { const struct regmap_test_param *param = test->param_value; @@ -759,10 +839,9 @@ static void stress_insert(struct kunit *test) if (IS_ERR(map)) return; - vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register, - GFP_KERNEL); + buf_sz = array_size(sizeof(*vals), config.max_register); + vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL); KUNIT_ASSERT_FALSE(test, vals == NULL); - buf_sz = sizeof(unsigned long) * config.max_register; get_random_bytes(vals, buf_sz); @@ -1497,16 +1576,17 @@ static struct regmap *gen_raw_regmap(struct kunit *test, struct regmap_test_priv *priv = test->priv; const struct regmap_test_param *param = test->param_value; u16 *buf; - struct regmap *ret; - size_t size = (config->max_register + 1) * config->reg_bits / 8; - int i; + struct regmap *ret = ERR_PTR(-ENOMEM); + int i, error; struct reg_default *defaults; + size_t size; config->cache_type = param->cache; config->val_format_endian = param->val_endian; config->disable_locking = config->cache_type == REGCACHE_RBTREE || config->cache_type == REGCACHE_MAPLE; + size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits)); buf = kmalloc(size, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); @@ -1515,15 +1595,16 @@ static struct regmap *gen_raw_regmap(struct kunit *test, *data = kzalloc(sizeof(**data), GFP_KERNEL); if (!(*data)) - return ERR_PTR(-ENOMEM); + goto out_free; (*data)->vals = (void *)buf; config->num_reg_defaults = config->max_register + 1; - defaults = kcalloc(config->num_reg_defaults, - sizeof(struct reg_default), - GFP_KERNEL); + defaults = kunit_kcalloc(test, + config->num_reg_defaults, + sizeof(struct reg_default), + GFP_KERNEL); if (!defaults) - return ERR_PTR(-ENOMEM); + goto out_free; config->reg_defaults = defaults; for (i = 0; i < config->num_reg_defaults; i++) { @@ -1536,7 +1617,8 @@ static struct regmap *gen_raw_regmap(struct kunit *test, defaults[i].def = be16_to_cpu(buf[i]); break; default: - return ERR_PTR(-EINVAL); + ret = ERR_PTR(-EINVAL); + goto out_free; } } @@ -1548,12 +1630,19 @@ static struct regmap *gen_raw_regmap(struct kunit *test, config->num_reg_defaults = 0; ret = regmap_init_raw_ram(priv->dev, config, *data); - if (IS_ERR(ret)) { - kfree(buf); - kfree(*data); - } else { - kunit_add_action(test, regmap_exit_action, ret); - } + if (IS_ERR(ret)) + goto out_free; + + /* This calls regmap_exit() on failure, which frees buf and *data */ + error = kunit_add_action_or_reset(test, regmap_exit_action, ret); + if (error) + ret = ERR_PTR(error); + + return ret; + +out_free: + kfree(buf); + kfree(*data); return ret; } @@ -1597,7 +1686,7 @@ static void raw_read_defaults(struct kunit *test) if (IS_ERR(map)) return; - val_len = sizeof(*rval) * (config.max_register + 1); + val_len = array_size(sizeof(*rval), config.max_register + 1); rval = kunit_kmalloc(test, val_len, GFP_KERNEL); KUNIT_ASSERT_TRUE(test, rval != NULL); if (!rval) @@ -1887,6 +1976,8 @@ static struct kunit_case regmap_test_cases[] = { KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params), KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), + KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params), + KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params), KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params), KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params), KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params), @@ -1958,4 +2049,5 @@ static struct kunit_suite regmap_test_suite = { }; kunit_test_suite(regmap_test_suite); +MODULE_DESCRIPTION("Regmap KUnit tests"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c index 5b4cbf982a11..4e5b4518ce4d 100644 --- a/drivers/base/regmap/regmap-ram.c +++ b/drivers/base/regmap/regmap-ram.c @@ -83,4 +83,5 @@ struct regmap *__regmap_init_ram(struct device *dev, } EXPORT_SYMBOL_GPL(__regmap_init_ram); +MODULE_DESCRIPTION("Register map access API - Memory region"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c index 69eabfb89eda..76c98814fb8a 100644 --- a/drivers/base/regmap/regmap-raw-ram.c +++ b/drivers/base/regmap/regmap-raw-ram.c @@ -142,4 +142,5 @@ struct regmap *__regmap_init_raw_ram(struct device *dev, } EXPORT_SYMBOL_GPL(__regmap_init_raw_ram); +MODULE_DESCRIPTION("Register map access API - Memory region with raw access"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c index 986af26d88c2..12bbbb03e5f2 100644 --- a/drivers/base/regmap/regmap-sccb.c +++ b/drivers/base/regmap/regmap-sccb.c @@ -125,4 +125,5 @@ struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c, } EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb); +MODULE_DESCRIPTION("Register map access API - SCCB support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c index 8075db788b39..54eb7d227cf4 100644 --- a/drivers/base/regmap/regmap-slimbus.c +++ b/drivers/base/regmap/regmap-slimbus.c @@ -68,4 +68,5 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, } EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus); +MODULE_DESCRIPTION("Register map access API - SLIMbus support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c index 4c2b94b3e30b..d86a06cadcdb 100644 --- a/drivers/base/regmap/regmap-spi-avmm.c +++ b/drivers/base/regmap/regmap-spi-avmm.c @@ -710,4 +710,5 @@ struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, } EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm); +MODULE_DESCRIPTION("Register map access API - SPI AVMM support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 094cf2a2ca3c..14b1d88997cb 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi, return ERR_PTR(-ENOMEM); max_msg_size = spi_max_message_size(spi); - reg_reserve_size = config->reg_bits / BITS_PER_BYTE - + config->pad_bits / BITS_PER_BYTE; + reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE; if (max_size + reg_reserve_size > max_msg_size) max_size -= reg_reserve_size; diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c index cdf12d2aa3a1..347bfe9544ce 100644 --- a/drivers/base/regmap/regmap-spmi.c +++ b/drivers/base/regmap/regmap-spmi.c @@ -222,4 +222,5 @@ struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev, } EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext); +MODULE_DESCRIPTION("Register map access API - SPMI support"); MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c index 3a8b402db852..29fd24f9c7ed 100644 --- a/drivers/base/regmap/regmap-w1.c +++ b/drivers/base/regmap/regmap-w1.c @@ -234,4 +234,5 @@ struct regmap *__devm_regmap_init_w1(struct device *w1_dev, } EXPORT_SYMBOL_GPL(__devm_regmap_init_w1); +MODULE_DESCRIPTION("Register map access API - W1 (1-Wire) support"); MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0a34dd3c4f38..bfc6bc1eb3a4 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2347,7 +2347,7 @@ out: } else { void *wval; - wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); + wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags); if (!wval) return -ENOMEM; @@ -3101,8 +3101,53 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id, } EXPORT_SYMBOL_GPL(regmap_fields_read); +static int _regmap_bulk_read(struct regmap *map, unsigned int reg, + unsigned int *regs, void *val, size_t val_count) +{ + u32 *u32 = val; + u16 *u16 = val; + u8 *u8 = val; + int ret, i; + + map->lock(map->lock_arg); + + for (i = 0; i < val_count; i++) { + unsigned int ival; + + if (regs) { + if (!IS_ALIGNED(regs[i], map->reg_stride)) { + ret = -EINVAL; + goto out; + } + ret = _regmap_read(map, regs[i], &ival); + } else { + ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival); + } + if (ret != 0) + goto out; + + switch (map->format.val_bytes) { + case 4: + u32[i] = ival; + break; + case 2: + u16[i] = ival; + break; + case 1: + u8[i] = ival; + break; + default: + ret = -EINVAL; + goto out; + } + } +out: + map->unlock(map->lock_arg); + return ret; +} + /** - * regmap_bulk_read() - Read multiple registers from the device + * regmap_bulk_read() - Read multiple sequential registers from the device * * @map: Register map to read from * @reg: First register to be read from @@ -3132,47 +3177,35 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(val + i); } else { - u32 *u32 = val; - u16 *u16 = val; - u8 *u8 = val; - - map->lock(map->lock_arg); - - for (i = 0; i < val_count; i++) { - unsigned int ival; - - ret = _regmap_read(map, reg + regmap_get_offset(map, i), - &ival); - if (ret != 0) - goto out; - - switch (map->format.val_bytes) { - case 4: - u32[i] = ival; - break; - case 2: - u16[i] = ival; - break; - case 1: - u8[i] = ival; - break; - default: - ret = -EINVAL; - goto out; - } - } - -out: - map->unlock(map->lock_arg); + ret = _regmap_bulk_read(map, reg, NULL, val, val_count); } - if (!ret) trace_regmap_bulk_read(map, reg, val, val_bytes * val_count); - return ret; } EXPORT_SYMBOL_GPL(regmap_bulk_read); +/** + * regmap_multi_reg_read() - Read multiple non-sequential registers from the device + * + * @map: Register map to read from + * @regs: Array of registers to read from + * @val: Pointer to store read value, in native register size for device + * @val_count: Number of registers to read + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val, + size_t val_count) +{ + if (val_count == 0) + return -EINVAL; + + return _regmap_bulk_read(map, 0, regs, val, val_count); +} +EXPORT_SYMBOL_GPL(regmap_multi_reg_read); + static int _regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change, bool force_write) diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 5b9d4aaebb81..ed209f4f2798 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -354,6 +354,15 @@ config VIRTIO_BLK This is the virtual block driver for virtio. It can be used with QEMU based VMMs (like KVM or Xen). Say Y or M. +config BLK_DEV_RUST_NULL + tristate "Rust null block driver (Experimental)" + depends on RUST + help + This is the Rust implementation of the null block driver. For now it + is only a minimal stub. + + If unsure, say N. + config BLK_DEV_RBD tristate "Rados block device (RBD)" depends on INET && BLOCK diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 101612cba303..1105a2d4fdcb 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -9,6 +9,9 @@ # needed for trace events ccflags-y += -I$(src) +obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o +rnull_mod-y := rnull.o + obj-$(CONFIG_MAC_FLOPPY) += swim3.o obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o obj-$(CONFIG_BLK_DEV_FD) += floppy.o diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index a25414228e47..49ced65bef4c 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -232,6 +232,7 @@ static DEFINE_MUTEX(amiflop_mutex); static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it doesn't identify */ module_param(fd_def_df0, ulong, 0); +MODULE_DESCRIPTION("Amiga floppy driver"); MODULE_LICENSE("GPL"); /* @@ -1776,10 +1777,13 @@ static const struct blk_mq_ops amiflop_mq_ops = { static int fd_alloc_disk(int drive, int system) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; struct gendisk *disk; int err; - disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL); + disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL); if (IS_ERR(disk)) return PTR_ERR(disk); diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index b6dac8cee70f..2028795ec61c 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -337,6 +337,7 @@ aoeblk_gdalloc(void *vp) struct queue_limits lim = { .max_hw_sectors = aoe_maxsectors, .io_opt = SZ_2M, + .features = BLK_FEAT_ROTATIONAL, }; ulong flags; int late = 0; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index cacc4ba942a8..4ba98c6654be 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1992,9 +1992,12 @@ static const struct blk_mq_ops ataflop_mq_ops = { static int ataflop_alloc_disk(unsigned int drive, unsigned int type) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; struct gendisk *disk; - disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL); + disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL); if (IS_ERR(disk)) return PTR_ERR(disk); @@ -2197,4 +2200,5 @@ static void __exit atari_floppy_exit(void) module_init(atari_floppy_init) module_exit(atari_floppy_exit) +MODULE_DESCRIPTION("Atari floppy driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 558d8e670566..2fd1ed101748 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -296,6 +296,7 @@ static int max_part = 1; module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices"); +MODULE_DESCRIPTION("Ram backed block device driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); MODULE_ALIAS("rd"); @@ -335,6 +336,8 @@ static int brd_alloc(int i) .max_hw_discard_sectors = UINT_MAX, .max_discard_segments = 1, .discard_granularity = PAGE_SIZE, + .features = BLK_FEAT_SYNCHRONOUS | + BLK_FEAT_NOWAIT, }; list_for_each_entry(brd, &brd_devices, brd_list) @@ -366,10 +369,6 @@ static int brd_alloc(int i) strscpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); - /* Tell the block layer that this is not a rotational device */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue); - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue); err = add_disk(disk); if (err) goto out_cleanup_disk; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 113b441d4d36..f92673f05c7a 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2697,6 +2697,9 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig * connect. */ .max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8, + .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | + BLK_FEAT_ROTATIONAL | + BLK_FEAT_STABLE_WRITES, }; device = minor_to_device(minor); @@ -2735,9 +2738,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig sprintf(disk->disk_name, "drbd%d", minor); disk->private_data = device; - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); - blk_queue_write_cache(disk->queue, true, true); - device->md_io.page = alloc_page(GFP_KERNEL); if (!device->md_io.page) goto out_no_io_page; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 25c9d85667f1..3affb538b989 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4516,7 +4516,8 @@ static bool floppy_available(int drive) static int floppy_alloc_disk(unsigned int drive, unsigned int type) { struct queue_limits lim = { - .max_hw_sectors = 64, + .max_hw_sectors = 64, + .features = BLK_FEAT_ROTATIONAL, }; struct gendisk *disk; @@ -5016,6 +5017,7 @@ module_param(floppy, charp, 0); module_param(FLOPPY_IRQ, int, 0); module_param(FLOPPY_DMA, int, 0); MODULE_AUTHOR("Alain L. Knaff"); +MODULE_DESCRIPTION("Normal floppy disk support"); MODULE_LICENSE("GPL"); /* This doesn't actually get used other than for module information */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1153721bc7c2..78a7bb28defe 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -211,13 +211,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) if (lo->lo_state == Lo_bound) blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; - if (use_dio) { - blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); + if (use_dio) lo->lo_flags |= LO_FLAGS_DIRECT_IO; - } else { - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); + else lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; - } if (lo->lo_state == Lo_bound) blk_mq_unfreeze_queue(lo->lo_queue); } @@ -939,24 +936,6 @@ static void loop_free_idle_workers_timer(struct timer_list *timer) return loop_free_idle_workers(lo, false); } -static void loop_update_rotational(struct loop_device *lo) -{ - struct file *file = lo->lo_backing_file; - struct inode *file_inode = file->f_mapping->host; - struct block_device *file_bdev = file_inode->i_sb->s_bdev; - struct request_queue *q = lo->lo_queue; - bool nonrot = true; - - /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */ - if (file_bdev) - nonrot = bdev_nonrot(file_bdev); - - if (nonrot) - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - else - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); -} - /** * loop_set_status_from_info - configure device from loop_info * @lo: struct loop_device to configure @@ -998,17 +977,40 @@ loop_set_status_from_info(struct loop_device *lo, return 0; } -static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize, - bool update_discard_settings) +static unsigned short loop_default_blocksize(struct loop_device *lo, + struct block_device *backing_bdev) { + /* In case of direct I/O, match underlying block size */ + if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev) + return bdev_logical_block_size(backing_bdev); + return SECTOR_SIZE; +} + +static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize) +{ + struct file *file = lo->lo_backing_file; + struct inode *inode = file->f_mapping->host; + struct block_device *backing_bdev = NULL; struct queue_limits lim; + if (S_ISBLK(inode->i_mode)) + backing_bdev = I_BDEV(inode); + else if (inode->i_sb->s_bdev) + backing_bdev = inode->i_sb->s_bdev; + + if (!bsize) + bsize = loop_default_blocksize(lo, backing_bdev); + lim = queue_limits_start_update(lo->lo_queue); lim.logical_block_size = bsize; lim.physical_block_size = bsize; lim.io_min = bsize; - if (update_discard_settings) - loop_config_discard(lo, &lim); + lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL); + if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY)) + lim.features |= BLK_FEAT_WRITE_CACHE; + if (backing_bdev && !bdev_nonrot(backing_bdev)) + lim.features |= BLK_FEAT_ROTATIONAL; + loop_config_discard(lo, &lim); return queue_limits_commit_update(lo->lo_queue, &lim); } @@ -1017,12 +1019,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, const struct loop_config *config) { struct file *file = fget(config->fd); - struct inode *inode; struct address_space *mapping; int error; loff_t size; bool partscan; - unsigned short bsize; bool is_loop; if (!file) @@ -1055,19 +1055,12 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, goto out_unlock; mapping = file->f_mapping; - inode = mapping->host; if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) { error = -EINVAL; goto out_unlock; } - if (config->block_size) { - error = blk_validate_block_size(config->block_size); - if (error) - goto out_unlock; - } - error = loop_set_status_from_info(lo, &config->info); if (error) goto out_unlock; @@ -1098,22 +1091,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); - if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) - blk_queue_write_cache(lo->lo_queue, true, false); - - if (config->block_size) - bsize = config->block_size; - else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev) - /* In case of direct I/O, match underlying block size */ - bsize = bdev_logical_block_size(inode->i_sb->s_bdev); - else - bsize = 512; - - error = loop_reconfigure_limits(lo, bsize, true); - if (WARN_ON_ONCE(error)) + error = loop_reconfigure_limits(lo, config->block_size); + if (error) goto out_unlock; - loop_update_rotational(lo); loop_update_dio(lo); loop_sysfs_init(lo); @@ -1154,22 +1135,12 @@ out_putf: return error; } -static void __loop_clr_fd(struct loop_device *lo, bool release) +static void __loop_clr_fd(struct loop_device *lo) { + struct queue_limits lim; struct file *filp; gfp_t gfp = lo->old_gfp_mask; - if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags)) - blk_queue_write_cache(lo->lo_queue, false, false); - - /* - * Freeze the request queue when unbinding on a live file descriptor and - * thus an open device. When called from ->release we are guaranteed - * that there is no I/O in progress already. - */ - if (!release) - blk_mq_freeze_queue(lo->lo_queue); - spin_lock_irq(&lo->lo_lock); filp = lo->lo_backing_file; lo->lo_backing_file = NULL; @@ -1179,7 +1150,14 @@ static void __loop_clr_fd(struct loop_device *lo, bool release) lo->lo_offset = 0; lo->lo_sizelimit = 0; memset(lo->lo_file_name, 0, LO_NAME_SIZE); - loop_reconfigure_limits(lo, 512, false); + + /* reset the block size to the default */ + lim = queue_limits_start_update(lo->lo_queue); + lim.logical_block_size = SECTOR_SIZE; + lim.physical_block_size = SECTOR_SIZE; + lim.io_min = SECTOR_SIZE; + queue_limits_commit_update(lo->lo_queue, &lim); + invalidate_disk(lo->lo_disk); loop_sysfs_exit(lo); /* let user-space know about this change */ @@ -1187,8 +1165,6 @@ static void __loop_clr_fd(struct loop_device *lo, bool release) mapping_set_gfp_mask(filp->f_mapping, gfp); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); - if (!release) - blk_mq_unfreeze_queue(lo->lo_queue); disk_force_media_change(lo->lo_disk); @@ -1203,11 +1179,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release) * must be at least one and it can only become zero when the * current holder is released. */ - if (!release) - mutex_lock(&lo->lo_disk->open_mutex); err = bdev_disk_changed(lo->lo_disk, false); - if (!release) - mutex_unlock(&lo->lo_disk->open_mutex); if (err) pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", __func__, lo->lo_number, err); @@ -1256,24 +1228,16 @@ static int loop_clr_fd(struct loop_device *lo) return -ENXIO; } /* - * If we've explicitly asked to tear down the loop device, - * and it has an elevated reference count, set it for auto-teardown when - * the last reference goes away. This stops $!~#$@ udev from - * preventing teardown because it decided that it needs to run blkid on - * the loopback device whenever they appear. xfstests is notorious for - * failing tests because blkid via udev races with a losetup - * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d - * command to fail with EBUSY. + * Mark the device for removing the backing device on last close. + * If we are the only opener, also switch the state to roundown here to + * prevent new openers from coming in. */ - if (disk_openers(lo->lo_disk) > 1) { - lo->lo_flags |= LO_FLAGS_AUTOCLEAR; - loop_global_unlock(lo, true); - return 0; - } - lo->lo_state = Lo_rundown; + + lo->lo_flags |= LO_FLAGS_AUTOCLEAR; + if (disk_openers(lo->lo_disk) == 1) + lo->lo_state = Lo_rundown; loop_global_unlock(lo, true); - __loop_clr_fd(lo, false); return 0; } @@ -1500,10 +1464,6 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) if (lo->lo_state != Lo_bound) return -ENXIO; - err = blk_validate_block_size(arg); - if (err) - return err; - if (lo->lo_queue->limits.logical_block_size == arg) return 0; @@ -1511,7 +1471,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) invalidate_bdev(lo->lo_device); blk_mq_freeze_queue(lo->lo_queue); - err = loop_reconfigure_limits(lo, arg, false); + err = loop_reconfigure_limits(lo, arg); loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue); @@ -1740,25 +1700,43 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode, } #endif +static int lo_open(struct gendisk *disk, blk_mode_t mode) +{ + struct loop_device *lo = disk->private_data; + int err; + + err = mutex_lock_killable(&lo->lo_mutex); + if (err) + return err; + + if (lo->lo_state == Lo_deleting || lo->lo_state == Lo_rundown) + err = -ENXIO; + mutex_unlock(&lo->lo_mutex); + return err; +} + static void lo_release(struct gendisk *disk) { struct loop_device *lo = disk->private_data; + bool need_clear = false; if (disk_openers(disk) > 0) return; + /* + * Clear the backing device information if this is the last close of + * a device that's been marked for auto clear, or on which LOOP_CLR_FD + * has been called. + */ mutex_lock(&lo->lo_mutex); - if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) { + if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) lo->lo_state = Lo_rundown; - mutex_unlock(&lo->lo_mutex); - /* - * In autoclear mode, stop the loop thread - * and remove configuration after last close. - */ - __loop_clr_fd(lo, true); - return; - } + + need_clear = (lo->lo_state == Lo_rundown); mutex_unlock(&lo->lo_mutex); + + if (need_clear) + __loop_clr_fd(lo); } static void lo_free_disk(struct gendisk *disk) @@ -1775,6 +1753,7 @@ static void lo_free_disk(struct gendisk *disk) static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, + .open = lo_open, .release = lo_release, .ioctl = lo_ioctl, #ifdef CONFIG_COMPAT @@ -1853,6 +1832,7 @@ static const struct kernel_param_ops loop_hw_qdepth_param_ops = { device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH)); +MODULE_DESCRIPTION("Loopback device support"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); @@ -2060,14 +2040,6 @@ static int loop_add(int i) lo->lo_queue = lo->lo_disk->queue; /* - * By default, we do buffer IO, so it doesn't make sense to enable - * merge because the I/O submitted to backing file is handled page by - * page. For directio mode, merge does help to dispatch bigger request - * to underlayer disk. We will enable merge once directio is enabled. - */ - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); - - /* * Disable partition scanning by default. The in-kernel partition * scanning can be requested individually per-device during its * setup. Userspace can always add and remove partitions from all diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 43a187609ef7..c6ef0546ffc9 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3485,8 +3485,6 @@ skip_create_disk: goto start_service_thread; /* Set device limits. */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue); dma_set_max_seg_size(&dd->pdev->dev, 0x400000); /* Set the capacity of the device in 512 byte sectors. */ diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index 27b2187e7a6d..b9fdeff31caf 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -150,8 +150,6 @@ static int __init n64cart_probe(struct platform_device *pdev) set_capacity(disk, size >> SECTOR_SHIFT); set_disk_ro(disk, 1); - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - err = add_disk(disk); if (err) goto out_cleanup_disk; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index b87aa80a46dd..41a90150b501 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -342,6 +342,14 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize, lim.max_hw_discard_sectors = UINT_MAX; else lim.max_hw_discard_sectors = 0; + if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) { + lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); + } else if (nbd->config->flags & NBD_FLAG_SEND_FUA) { + lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; + } else { + lim.features |= BLK_FEAT_WRITE_CACHE; + lim.features &= ~BLK_FEAT_FUA; + } lim.logical_block_size = blksize; lim.physical_block_size = blksize; error = queue_limits_commit_update(nbd->disk->queue, &lim); @@ -1279,19 +1287,10 @@ static void nbd_bdev_reset(struct nbd_device *nbd) static void nbd_parse_flags(struct nbd_device *nbd) { - struct nbd_config *config = nbd->config; - if (config->flags & NBD_FLAG_READ_ONLY) + if (nbd->config->flags & NBD_FLAG_READ_ONLY) set_disk_ro(nbd->disk, true); else set_disk_ro(nbd->disk, false); - if (config->flags & NBD_FLAG_SEND_FLUSH) { - if (config->flags & NBD_FLAG_SEND_FUA) - blk_queue_write_cache(nbd->disk->queue, true, true); - else - blk_queue_write_cache(nbd->disk->queue, true, false); - } - else - blk_queue_write_cache(nbd->disk->queue, false, false); } static void send_disconnects(struct nbd_device *nbd) @@ -1801,7 +1800,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) { struct queue_limits lim = { .max_hw_sectors = 65536, - .max_user_sectors = 256, + .io_opt = 256 << SECTOR_SHIFT, .max_segments = USHRT_MAX, .max_segment_size = UINT_MAX, }; @@ -1861,11 +1860,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) goto out_err_disk; } - /* - * Tell the block layer that we are not a rotational device - */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - mutex_init(&nbd->config_lock); refcount_set(&nbd->config_refs, 0); /* diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 75f189e42f88..2f0431e42c49 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -77,7 +77,7 @@ enum { NULL_IRQ_TIMER = 2, }; -static bool g_virt_boundary = false; +static bool g_virt_boundary; module_param_named(virt_boundary, g_virt_boundary, bool, 0444); MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False"); @@ -227,7 +227,7 @@ MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 ( static bool g_fua = true; module_param_named(fua, g_fua, bool, 0444); -MODULE_PARM_DESC(zoned, "Enable/disable FUA support when cache_size is used. Default: true"); +MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true"); static unsigned int g_mbps; module_param_named(mbps, g_mbps, uint, 0444); @@ -262,6 +262,10 @@ module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444 MODULE_PARM_DESC(zone_append_max_sectors, "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation"); +static bool g_zone_full; +module_param_named(zone_full, g_zone_full, bool, S_IRUGO); +MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false"); + static struct nullb_device *null_alloc_dev(void); static void null_free_dev(struct nullb_device *dev); static void null_del_dev(struct nullb *nullb); @@ -458,6 +462,7 @@ NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL); NULLB_DEVICE_ATTR(zone_max_open, uint, NULL); NULLB_DEVICE_ATTR(zone_max_active, uint, NULL); NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL); +NULLB_DEVICE_ATTR(zone_full, bool, NULL); NULLB_DEVICE_ATTR(virt_boundary, bool, NULL); NULLB_DEVICE_ATTR(no_sched, bool, NULL); NULLB_DEVICE_ATTR(shared_tags, bool, NULL); @@ -610,6 +615,7 @@ static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_zone_append_max_sectors, &nullb_device_attr_zone_readonly, &nullb_device_attr_zone_offline, + &nullb_device_attr_zone_full, &nullb_device_attr_virt_boundary, &nullb_device_attr_no_sched, &nullb_device_attr_shared_tags, @@ -700,7 +706,7 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page) "shared_tags,size,submit_queues,use_per_node_hctx," "virt_boundary,zoned,zone_capacity,zone_max_active," "zone_max_open,zone_nr_conv,zone_offline,zone_readonly," - "zone_size,zone_append_max_sectors\n"); + "zone_size,zone_append_max_sectors,zone_full\n"); } CONFIGFS_ATTR_RO(memb_group_, features); @@ -781,6 +787,7 @@ static struct nullb_device *null_alloc_dev(void) dev->zone_max_open = g_zone_max_open; dev->zone_max_active = g_zone_max_active; dev->zone_append_max_sectors = g_zone_append_max_sectors; + dev->zone_full = g_zone_full; dev->virt_boundary = g_virt_boundary; dev->no_sched = g_no_sched; dev->shared_tags = g_shared_tags; @@ -1824,9 +1831,6 @@ static int null_validate_conf(struct nullb_device *dev) dev->queue_mode = NULL_Q_MQ; } - if (blk_validate_block_size(dev->blocksize)) - return -EINVAL; - if (dev->use_per_node_hctx) { if (dev->submit_queues != nr_online_nodes) dev->submit_queues = nr_online_nodes; @@ -1928,6 +1932,13 @@ static int null_add_dev(struct nullb_device *dev) goto out_cleanup_tags; } + if (dev->cache_size > 0) { + set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); + lim.features |= BLK_FEAT_WRITE_CACHE; + if (dev->fua) + lim.features |= BLK_FEAT_FUA; + } + nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb); if (IS_ERR(nullb->disk)) { rv = PTR_ERR(nullb->disk); @@ -1940,13 +1951,7 @@ static int null_add_dev(struct nullb_device *dev) nullb_setup_bwtimer(nullb); } - if (dev->cache_size > 0) { - set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); - blk_queue_write_cache(nullb->q, true, dev->fua); - } - nullb->q->queuedata = nullb; - blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); rv = ida_alloc(&nullb_indexes, GFP_KERNEL); if (rv < 0) diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h index 3234e6c85eed..a7bb32f73ec3 100644 --- a/drivers/block/null_blk/null_blk.h +++ b/drivers/block/null_blk/null_blk.h @@ -101,6 +101,7 @@ struct nullb_device { bool memory_backed; /* if data is stored in memory */ bool discard; /* if support discard */ bool zoned; /* if device is zoned */ + bool zone_full; /* Initialize zones to be full */ bool virt_boundary; /* virtual boundary on/off for the device */ bool no_sched; /* no IO scheduler for the device */ bool shared_tags; /* share tag set between devices for blk-mq */ diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index f118d304f310..9bc768b2ca56 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -145,7 +145,7 @@ int null_init_zoned_dev(struct nullb_device *dev, zone = &dev->zones[i]; null_init_zone_lock(dev, zone); - zone->start = zone->wp = sector; + zone->start = sector; if (zone->start + dev->zone_size_sects > dev_capacity_sects) zone->len = dev_capacity_sects - zone->start; else @@ -153,12 +153,18 @@ int null_init_zoned_dev(struct nullb_device *dev, zone->capacity = min_t(sector_t, zone->len, zone_capacity_sects); zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; - zone->cond = BLK_ZONE_COND_EMPTY; + if (dev->zone_full) { + zone->cond = BLK_ZONE_COND_FULL; + zone->wp = zone->start + zone->capacity; + } else{ + zone->cond = BLK_ZONE_COND_EMPTY; + zone->wp = zone->start; + } sector += dev->zone_size_sects; } - lim->zoned = true; + lim->features |= BLK_FEAT_ZONED; lim->chunk_sectors = dev->zone_size_sects; lim->max_zone_append_sectors = dev->zone_append_max_sectors; lim->max_open_zones = dev->zone_max_open; @@ -171,9 +177,6 @@ int null_register_zoned_dev(struct nullb *nullb) struct request_queue *q = nullb->q; struct gendisk *disk = nullb->disk; - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); - disk->nr_zones = bdev_nr_zones(disk->part0); - pr_info("%s: using %s zone append\n", disk->disk_name, queue_emulates_zone_append(q) ? "emulated" : "native"); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 8a2ce8070010..7cece5884b9c 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2622,6 +2622,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) struct queue_limits lim = { .max_hw_sectors = PACKET_MAX_SECTORS, .logical_block_size = CD_FRAMESIZE, + .features = BLK_FEAT_ROTATIONAL, }; int idx; int ret = -ENOMEM; diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index b810ac0a5c4b..ff45ed766469 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -388,9 +388,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) .max_segments = -1, .max_segment_size = dev->bounce_size, .dma_alignment = dev->blk_size - 1, + .features = BLK_FEAT_WRITE_CACHE | + BLK_FEAT_ROTATIONAL, }; - - struct request_queue *queue; struct gendisk *gendisk; if (dev->blk_size < 512) { @@ -447,10 +447,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) goto fail_free_tag_set; } - queue = gendisk->queue; - - blk_queue_write_cache(queue, true, false); - priv->gendisk = gendisk; gendisk->major = ps3disk_major; gendisk->first_minor = devidx * PS3DISK_MINORS; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 26ff5cd2bf0a..008e850555f4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4949,14 +4949,12 @@ static const struct blk_mq_ops rbd_mq_ops = { static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; - struct request_queue *q; unsigned int objset_bytes = rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; struct queue_limits lim = { .max_hw_sectors = objset_bytes >> SECTOR_SHIFT, - .max_user_sectors = objset_bytes >> SECTOR_SHIFT, + .io_opt = objset_bytes, .io_min = rbd_dev->opts->alloc_size, - .io_opt = rbd_dev->opts->alloc_size, .max_segments = USHRT_MAX, .max_segment_size = UINT_MAX, }; @@ -4980,12 +4978,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT; } + if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) + lim.features |= BLK_FEAT_STABLE_WRITES; + disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev); if (IS_ERR(disk)) { err = PTR_ERR(disk); goto out_tag_set; } - q = disk->queue; snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", rbd_dev->dev_id); @@ -4997,13 +4997,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) disk->minors = RBD_MINORS_PER_MAJOR; disk->fops = &rbd_bd_ops; disk->private_data = rbd_dev; - - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ - - if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); - rbd_dev->disk = disk; return 0; diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c index 39887556cf95..6ea7c12e3a87 100644 --- a/drivers/block/rnbd/rnbd-clt-sysfs.c +++ b/drivers/block/rnbd/rnbd-clt-sysfs.c @@ -475,7 +475,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev) } } -static struct kobj_type rnbd_dev_ktype = { +static const struct kobj_type rnbd_dev_ktype = { .sysfs_ops = &kobj_sysfs_ops, .default_groups = rnbd_dev_groups, }; diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index b7ffe03c6160..c34695d2eea7 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1352,10 +1352,6 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, if (dev->access_mode == RNBD_ACCESS_RO) set_disk_ro(dev->gd, true); - /* - * Network device does not need rotational - */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); err = add_disk(dev->gd); if (err) put_disk(dev->gd); @@ -1389,18 +1385,18 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev, le32_to_cpu(rsp->max_discard_sectors); } + if (rsp->cache_policy & RNBD_WRITEBACK) { + lim.features |= BLK_FEAT_WRITE_CACHE; + if (rsp->cache_policy & RNBD_FUA) + lim.features |= BLK_FEAT_FUA; + } + dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev); if (IS_ERR(dev->gd)) return PTR_ERR(dev->gd); dev->queue = dev->gd->queue; rnbd_init_mq_hw_queues(dev); - blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); - blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); - blk_queue_write_cache(dev->queue, - !!(rsp->cache_policy & RNBD_WRITEBACK), - !!(rsp->cache_policy & RNBD_FUA)); - return rnbd_clt_setup_gen_disk(dev, rsp, idx); } diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c index cba6ba43c2c2..64780094442c 100644 --- a/drivers/block/rnbd/rnbd-srv-sysfs.c +++ b/drivers/block/rnbd/rnbd-srv-sysfs.c @@ -33,7 +33,7 @@ static void rnbd_srv_dev_release(struct kobject *kobj) kfree(dev); } -static struct kobj_type dev_ktype = { +static const struct kobj_type dev_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = rnbd_srv_dev_release }; @@ -184,7 +184,7 @@ static void rnbd_srv_sess_dev_release(struct kobject *kobj) rnbd_destroy_sess_dev(sess_dev, sess_dev->keep_id); } -static struct kobj_type rnbd_srv_sess_dev_ktype = { +static const struct kobj_type rnbd_srv_sess_dev_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = rnbd_srv_sess_dev_release, }; diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs new file mode 100644 index 000000000000..b0227cf9ddd3 --- /dev/null +++ b/drivers/block/rnull.rs @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! This is a Rust implementation of the C null block driver. +//! +//! Supported features: +//! +//! - blk-mq interface +//! - direct completion +//! - block size 4k +//! +//! The driver is not configurable. + +use kernel::{ + alloc::flags, + block::mq::{ + self, + gen_disk::{self, GenDisk}, + Operations, TagSet, + }, + error::Result, + new_mutex, pr_info, + prelude::*, + sync::{Arc, Mutex}, + types::ARef, +}; + +module! { + type: NullBlkModule, + name: "rnull_mod", + author: "Andreas Hindborg", + license: "GPL v2", +} + +struct NullBlkModule { + _disk: Pin<Box<Mutex<GenDisk<NullBlkDevice>>>>, +} + +impl kernel::Module for NullBlkModule { + fn init(_module: &'static ThisModule) -> Result<Self> { + pr_info!("Rust null_blk loaded\n"); + let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?; + + let disk = gen_disk::GenDiskBuilder::new() + .capacity_sectors(4096 << 11) + .logical_block_size(4096)? + .physical_block_size(4096)? + .rotational(false) + .build(format_args!("rnullb{}", 0), tagset)?; + + let disk = Box::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?; + + Ok(Self { _disk: disk }) + } +} + +struct NullBlkDevice; + +#[vtable] +impl Operations for NullBlkDevice { + #[inline(always)] + fn queue_rq(rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result { + mq::Request::end_ok(rq) + .map_err(|_e| kernel::error::code::EIO) + // We take no refcounts on the request, so we expect to be able to + // end the request. The request reference must be unique at this + // point, and so `end_ok` cannot fail. + .expect("Fatal error - expected to be able to end request"); + + Ok(()) + } + + fn commit_rqs() {} +} diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 5286cb8e0824..2d38331ee667 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -791,6 +791,7 @@ static int probe_disk(struct vdc_port *port) .seg_boundary_mask = PAGE_SIZE - 1, .max_segment_size = PAGE_SIZE, .max_segments = port->ring_cookies, + .features = BLK_FEAT_ROTATIONAL, }; struct request_queue *q; struct gendisk *g; diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 6731678f3a41..126f151c4f2c 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -787,6 +787,9 @@ static void swim_cleanup_floppy_disk(struct floppy_state *fs) static int swim_floppy_init(struct swim_priv *swd) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; int err; int drive; struct swim __iomem *base = swd->base; @@ -820,7 +823,7 @@ static int swim_floppy_init(struct swim_priv *swd) goto exit_put_disks; swd->unit[drive].disk = - blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL, + blk_mq_alloc_disk(&swd->unit[drive].tag_set, &lim, &swd->unit[drive]); if (IS_ERR(swd->unit[drive].disk)) { blk_mq_free_tag_set(&swd->unit[drive].tag_set); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index a04756ac778e..90be1017f7bf 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1189,6 +1189,9 @@ static int swim3_add_device(struct macio_dev *mdev, int index) static int swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) { + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + }; struct floppy_state *fs; struct gendisk *disk; int rc; @@ -1210,7 +1213,7 @@ static int swim3_attach(struct macio_dev *mdev, if (rc) goto out_unregister; - disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs); + disk = blk_mq_alloc_disk(&fs->tag_set, &lim, fs); if (IS_ERR(disk)) { rc = PTR_ERR(disk); goto out_free_tag_set; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 4e159948c912..6fb799ec0d88 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -248,8 +248,6 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) static void ublk_dev_param_zoned_apply(struct ublk_device *ub) { - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue); - ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); } @@ -484,16 +482,8 @@ static inline unsigned ublk_pos_to_tag(loff_t pos) static void ublk_dev_param_basic_apply(struct ublk_device *ub) { - struct request_queue *q = ub->ub_disk->queue; const struct ublk_param_basic *p = &ub->params.basic; - blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE, - p->attrs & UBLK_ATTR_FUA); - if (p->attrs & UBLK_ATTR_ROTATIONAL) - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - else - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - if (p->attrs & UBLK_ATTR_READ_ONLY) set_disk_ro(ub->ub_disk, true); @@ -2204,12 +2194,21 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) return -EOPNOTSUPP; - lim.zoned = true; + lim.features |= BLK_FEAT_ZONED; lim.max_active_zones = p->max_active_zones; lim.max_open_zones = p->max_open_zones; lim.max_zone_append_sectors = p->max_zone_append_sectors; } + if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) { + lim.features |= BLK_FEAT_WRITE_CACHE; + if (ub->params.basic.attrs & UBLK_ATTR_FUA) + lim.features |= BLK_FEAT_FUA; + } + + if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL) + lim.features |= BLK_FEAT_ROTATIONAL; + if (wait_for_completion_interruptible(&ub->completion) != 0) return -EINTR; @@ -3017,4 +3016,5 @@ module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644); MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)"); MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>"); +MODULE_DESCRIPTION("Userspace block device"); MODULE_LICENSE("GPL"); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2351f411fa46..e3147a611151 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk, dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); - lim->zoned = true; + lim->features |= BLK_FEAT_ZONED; virtio_cread(vdev, struct virtio_blk_config, zoned.max_open_zones, &v); @@ -1089,14 +1089,6 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev) return writeback; } -static void virtblk_update_cache_mode(struct virtio_device *vdev) -{ - u8 writeback = virtblk_get_cache_mode(vdev); - struct virtio_blk *vblk = vdev->priv; - - blk_queue_write_cache(vblk->disk->queue, writeback, false); -} - static const char *const virtblk_cache_types[] = { "write through", "write back" }; @@ -1108,6 +1100,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr, struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; struct virtio_device *vdev = vblk->vdev; + struct queue_limits lim; int i; BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); @@ -1116,7 +1109,17 @@ cache_type_store(struct device *dev, struct device_attribute *attr, return i; virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); - virtblk_update_cache_mode(vdev); + + lim = queue_limits_start_update(disk->queue); + if (virtblk_get_cache_mode(vdev)) + lim.features |= BLK_FEAT_WRITE_CACHE; + else + lim.features &= ~BLK_FEAT_WRITE_CACHE; + blk_mq_freeze_queue(disk->queue); + i = queue_limits_commit_update(disk->queue, &lim); + blk_mq_unfreeze_queue(disk->queue); + if (i) + return i; return count; } @@ -1247,7 +1250,7 @@ static int virtblk_read_limits(struct virtio_blk *vblk, struct queue_limits *lim) { struct virtio_device *vdev = vblk->vdev; - u32 v, blk_size, max_size, sg_elems, opt_io_size; + u32 v, max_size, sg_elems, opt_io_size; u32 max_discard_segs = 0; u32 discard_granularity = 0; u16 min_io_size; @@ -1286,46 +1289,36 @@ static int virtblk_read_limits(struct virtio_blk *vblk, lim->max_segment_size = max_size; /* Host can optionally specify the block size of the device */ - err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, + virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, struct virtio_blk_config, blk_size, - &blk_size); - if (!err) { - err = blk_validate_block_size(blk_size); - if (err) { - dev_err(&vdev->dev, - "virtio_blk: invalid block size: 0x%x\n", - blk_size); - return err; - } - - lim->logical_block_size = blk_size; - } else - blk_size = lim->logical_block_size; + &lim->logical_block_size); /* Use topology information if available */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, physical_block_exp, &physical_block_exp); if (!err && physical_block_exp) - lim->physical_block_size = blk_size * (1 << physical_block_exp); + lim->physical_block_size = + lim->logical_block_size * (1 << physical_block_exp); err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, alignment_offset, &alignment_offset); if (!err && alignment_offset) - lim->alignment_offset = blk_size * alignment_offset; + lim->alignment_offset = + lim->logical_block_size * alignment_offset; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, min_io_size, &min_io_size); if (!err && min_io_size) - lim->io_min = blk_size * min_io_size; + lim->io_min = lim->logical_block_size * min_io_size; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, opt_io_size, &opt_io_size); if (!err && opt_io_size) - lim->io_opt = blk_size * opt_io_size; + lim->io_opt = lim->logical_block_size * opt_io_size; if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { virtio_cread(vdev, struct virtio_blk_config, @@ -1419,7 +1412,7 @@ static int virtblk_read_limits(struct virtio_blk *vblk, lim->discard_granularity = discard_granularity << SECTOR_SHIFT; else - lim->discard_granularity = blk_size; + lim->discard_granularity = lim->logical_block_size; } if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) { @@ -1448,7 +1441,10 @@ static int virtblk_read_limits(struct virtio_blk *vblk, static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; - struct queue_limits lim = { }; + struct queue_limits lim = { + .features = BLK_FEAT_ROTATIONAL, + .logical_block_size = SECTOR_SIZE, + }; int err, index; unsigned int queue_depth; @@ -1512,6 +1508,9 @@ static int virtblk_probe(struct virtio_device *vdev) if (err) goto out_free_tags; + if (virtblk_get_cache_mode(vdev)) + lim.features |= BLK_FEAT_WRITE_CACHE; + vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk); if (IS_ERR(vblk->disk)) { err = PTR_ERR(vblk->disk); @@ -1527,9 +1526,6 @@ static int virtblk_probe(struct virtio_device *vdev) vblk->disk->fops = &virtblk_fops; vblk->index = index; - /* configure queue flush support */ - virtblk_update_cache_mode(vdev); - /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) set_disk_ro(vblk->disk, 1); @@ -1541,8 +1537,8 @@ static int virtblk_probe(struct virtio_device *vdev) * All steps that follow use the VQs therefore they need to be * placed after the virtio_device_ready() call above. */ - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) { - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue); + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + (lim.features & BLK_FEAT_ZONED)) { err = blk_revalidate_disk_zones(vblk->disk); if (err) goto out_cleanup_disk; diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 944576d582fb..838064593f62 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1563,5 +1563,6 @@ static void __exit xen_blkif_fini(void) module_exit(xen_blkif_fini); +MODULE_DESCRIPTION("Virtual block device back-end driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vbd"); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index fd7c0ff2139c..59ce113b882a 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -788,6 +788,11 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri * A barrier request a superset of FUA, so we can * implement it the same way. (It's also a FLUSH+FUA, * since it is guaranteed ordered WRT previous writes.) + * + * Note that can end up here with a FUA write and the + * flags cleared. This happens when the flag was + * run-time disabled after a failing I/O, and we'll + * simplify submit it as a normal write. */ if (info->feature_flush && info->feature_fua) ring_req->operation = @@ -795,8 +800,6 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri else if (info->feature_flush) ring_req->operation = BLKIF_OP_FLUSH_DISKCACHE; - else - ring_req->operation = 0; } ring_req->u.rw.nr_segments = num_grant; if (unlikely(require_extra_req)) { @@ -887,16 +890,6 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo) notify_remote_via_irq(rinfo->irq); } -static inline bool blkif_request_flush_invalid(struct request *req, - struct blkfront_info *info) -{ - return (blk_rq_is_passthrough(req) || - ((req_op(req) == REQ_OP_FLUSH) && - !info->feature_flush) || - ((req->cmd_flags & REQ_FUA) && - !info->feature_fua)); -} - static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *qd) { @@ -908,12 +901,22 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, rinfo = get_rinfo(info, qid); blk_mq_start_request(qd->rq); spin_lock_irqsave(&rinfo->ring_lock, flags); - if (RING_FULL(&rinfo->ring)) - goto out_busy; - if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info)) - goto out_err; + /* + * Check if the backend actually supports flushes. + * + * While the block layer won't send us flushes if we don't claim to + * support them, the Xen protocol allows the backend to revoke support + * at any time. That is of course a really bad idea and dangerous, but + * has been allowed for 10+ years. In that case we simply clear the + * flags, and directly return here for an empty flush and ignore the + * FUA flag later on. + */ + if (unlikely(req_op(qd->rq) == REQ_OP_FLUSH && !info->feature_flush)) + goto complete; + if (RING_FULL(&rinfo->ring)) + goto out_busy; if (blkif_queue_request(qd->rq, rinfo)) goto out_busy; @@ -921,14 +924,14 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, spin_unlock_irqrestore(&rinfo->ring_lock, flags); return BLK_STS_OK; -out_err: - spin_unlock_irqrestore(&rinfo->ring_lock, flags); - return BLK_STS_IOERR; - out_busy: blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&rinfo->ring_lock, flags); return BLK_STS_DEV_RESOURCE; +complete: + spin_unlock_irqrestore(&rinfo->ring_lock, flags); + blk_mq_end_request(qd->rq, BLK_STS_OK); + return BLK_STS_OK; } static void blkif_complete_rq(struct request *rq) @@ -956,6 +959,12 @@ static void blkif_set_queue_limits(const struct blkfront_info *info, lim->max_secure_erase_sectors = UINT_MAX; } + if (info->feature_flush) { + lim->features |= BLK_FEAT_WRITE_CACHE; + if (info->feature_fua) + lim->features |= BLK_FEAT_FUA; + } + /* Hard sector size and max sectors impersonate the equiv. hardware. */ lim->logical_block_size = info->sector_size; lim->physical_block_size = info->physical_sector_size; @@ -984,8 +993,6 @@ static const char *flush_info(struct blkfront_info *info) static void xlvbd_flush(struct blkfront_info *info) { - blk_queue_write_cache(info->rq, info->feature_flush ? true : false, - info->feature_fua ? true : false); pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? @@ -1063,8 +1070,7 @@ static char *encode_disk_name(char *ptr, unsigned int n) } static int xlvbd_alloc_gendisk(blkif_sector_t capacity, - struct blkfront_info *info, u16 sector_size, - unsigned int physical_sector_size) + struct blkfront_info *info) { struct queue_limits lim = {}; struct gendisk *gd; @@ -1139,7 +1145,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, err = PTR_ERR(gd); goto out_free_tag_set; } - blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue); strcpy(gd->disk_name, DEV_NAME); ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset); @@ -1159,8 +1164,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, info->rq = gd->queue; info->gd = gd; - info->sector_size = sector_size; - info->physical_sector_size = physical_sector_size; xlvbd_flush(info); @@ -1605,8 +1608,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) blkif_req(req)->error = BLK_STS_NOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; - blk_queue_max_discard_sectors(rq, 0); - blk_queue_max_secure_erase_sectors(rq, 0); + blk_queue_disable_discard(rq); + blk_queue_disable_secure_erase(rq); } break; case BLKIF_OP_FLUSH_DISKCACHE: @@ -1627,7 +1630,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) blkif_req(req)->error = BLK_STS_OK; info->feature_fua = 0; info->feature_flush = 0; - xlvbd_flush(info); } fallthrough; case BLKIF_OP_READ: @@ -2315,8 +2317,6 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) static void blkfront_connect(struct blkfront_info *info) { unsigned long long sectors; - unsigned long sector_size; - unsigned int physical_sector_size; int err, i; struct blkfront_ring_info *rinfo; @@ -2355,7 +2355,7 @@ static void blkfront_connect(struct blkfront_info *info) err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%llu", §ors, "info", "%u", &info->vdisk_info, - "sector-size", "%lu", §or_size, + "sector-size", "%lu", &info->sector_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, @@ -2369,9 +2369,9 @@ static void blkfront_connect(struct blkfront_info *info) * provide this. Assume physical sector size to be the same as * sector_size in that case. */ - physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, + info->physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, "physical-sector-size", - sector_size); + info->sector_size); blkfront_gather_backend_features(info); for_each_rinfo(info, rinfo, i) { err = blkfront_setup_indirect(rinfo); @@ -2383,8 +2383,7 @@ static void blkfront_connect(struct blkfront_info *info) } } - err = xlvbd_alloc_gendisk(sectors, info, sector_size, - physical_sector_size); + err = xlvbd_alloc_gendisk(sectors, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 7c5f4e4d9b50..4b7219be1bb8 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -409,4 +409,5 @@ static void __exit z2_exit(void) module_init(z2_init); module_exit(z2_exit); +MODULE_DESCRIPTION("Amiga Zorro II ramdisk driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 3acd7006ad2c..efcb8d9d274c 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2208,6 +2208,8 @@ static int zram_add(void) #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE .max_write_zeroes_sectors = UINT_MAX, #endif + .features = BLK_FEAT_STABLE_WRITES | + BLK_FEAT_SYNCHRONOUS, }; struct zram *zram; int ret, device_id; @@ -2245,10 +2247,6 @@ static int zram_add(void) /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */ set_capacity(zram->disk, 0); - /* zram devices sort of resembles non-rotational disks */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); ret = device_add_disk(NULL, zram->disk, zram_disk_groups); if (ret) goto out_cleanup_disk; diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index 5b6805d87fcf..dd3c0626c72d 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -382,7 +382,7 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data, /* The first 4 bytes indicates the Intel PCIe specific packet type */ pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN); - if (!data) { + if (!pdata) { bt_dev_err(hdev, "Corrupted packet received"); ret = -EILSEQ; goto exit_error; diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 9d0c7e278114..9bfa9a6ad56c 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -281,7 +281,7 @@ static u8 crc8_table[CRC8_TABLE_SIZE]; /* Default configurations */ #define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK -#define DEFAULT_PS_MODE PS_MODE_DISABLE +#define DEFAULT_PS_MODE PS_MODE_ENABLE #define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode, diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c index 0c2f15235b4c..d90858ea2fe5 100644 --- a/drivers/bluetooth/hci_bcm4377.c +++ b/drivers/bluetooth/hci_bcm4377.c @@ -495,6 +495,10 @@ struct bcm4377_data; * extended scanning * broken_mws_transport_config: Set to true if the chip erroneously claims to * support MWS Transport Configuration + * broken_le_ext_adv_report_phy: Set to true if this chip stuffs flags inside + * reserved bits of Primary/Secondary_PHY inside + * LE Extended Advertising Report events which + * have to be ignored * send_calibration: Optional callback to send calibration data * send_ptb: Callback to send "PTB" regulatory/calibration data */ @@ -513,6 +517,7 @@ struct bcm4377_hw { unsigned long broken_ext_scan : 1; unsigned long broken_mws_transport_config : 1; unsigned long broken_le_coded : 1; + unsigned long broken_le_ext_adv_report_phy : 1; int (*send_calibration)(struct bcm4377_data *bcm4377); int (*send_ptb)(struct bcm4377_data *bcm4377, @@ -716,7 +721,7 @@ static void bcm4377_handle_ack(struct bcm4377_data *bcm4377, ring->events[msgid] = NULL; } - bitmap_release_region(ring->msgids, msgid, ring->n_entries); + bitmap_release_region(ring->msgids, msgid, 0); unlock: spin_unlock_irqrestore(&ring->lock, flags); @@ -2373,6 +2378,8 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id) set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks); if (bcm4377->hw->broken_le_coded) set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks); + if (bcm4377->hw->broken_le_ext_adv_report_phy) + set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks); pci_set_drvdata(pdev, bcm4377); hci_set_drvdata(hdev, bcm4377); @@ -2477,6 +2484,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = { .clear_pciecfg_subsystem_ctrl_bit19 = true, .broken_mws_transport_config = true, .broken_le_coded = true, + .broken_le_ext_adv_report_phy = true, .send_calibration = bcm4387_send_calibration, .send_ptb = bcm4378_send_ptb, }, diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 0c9c9ee56592..9a0bc86f9aac 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -2450,15 +2450,27 @@ static void qca_serdev_shutdown(struct device *dev) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct hci_uart *hu = &qcadev->serdev_hu; struct hci_dev *hdev = hu->hdev; - struct qca_data *qca = hu->priv; const u8 ibs_wake_cmd[] = { 0xFD }; const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 }; if (qcadev->btsoc_type == QCA_QCA6390) { - if (test_bit(QCA_BT_OFF, &qca->flags) || - !test_bit(HCI_RUNNING, &hdev->flags)) + /* The purpose of sending the VSC is to reset SOC into a initial + * state and the state will ensure next hdev->setup() success. + * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that + * hdev->setup() can do its job regardless of SoC state, so + * don't need to send the VSC. + * if HCI_SETUP is set, it means that hdev->setup() was never + * invoked and the SOC is already in the initial state, so + * don't also need to send the VSC. + */ + if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) || + hci_dev_test_flag(hdev, HCI_SETUP)) return; + /* The serdev must be in open state when conrol logic arrives + * here, so also fix the use-after-free issue caused by that + * the serdev is flushed or wrote after it is closed. + */ serdev_device_write_flush(serdev); ret = serdev_device_write_buf(serdev, ibs_wake_cmd, sizeof(ibs_wake_cmd)); diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 1e29ba76615d..ac6c7e4900f4 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -457,7 +457,7 @@ static void regmap_sunxi_rsb_free_ctx(void *context) kfree(ctx); } -static struct regmap_bus regmap_sunxi_rsb = { +static const struct regmap_bus regmap_sunxi_rsb = { .reg_write = regmap_sunxi_rsb_reg_write, .reg_read = regmap_sunxi_rsb_reg_read, .free_context = regmap_sunxi_rsb_free_ctx, diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c index baf22a82c47a..b8af44c5cdbd 100644 --- a/drivers/bus/ts-nbus.c +++ b/drivers/bus/ts-nbus.c @@ -294,7 +294,7 @@ static int ts_nbus_probe(struct platform_device *pdev) state.duty_cycle = state.period; state.enabled = true; - ret = pwm_apply_state(pwm, &state); + ret = pwm_apply_might_sleep(pwm, &state); if (ret < 0) return dev_err_probe(dev, ret, "failed to configure PWM\n"); diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c index d2c7ada90186..64ee920721ee 100644 --- a/drivers/bus/vexpress-config.c +++ b/drivers/bus/vexpress-config.c @@ -414,4 +414,5 @@ static struct platform_driver vexpress_syscfg_driver = { .probe = vexpress_syscfg_probe, }; module_platform_driver(vexpress_syscfg_driver); +MODULE_DESCRIPTION("Versatile Express configuration bus"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig index 9345ce4976d7..94abd8f632a7 100644 --- a/drivers/cache/Kconfig +++ b/drivers/cache/Kconfig @@ -14,4 +14,13 @@ config SIFIVE_CCACHE help Support for the composable cache controller on SiFive platforms. +config STARFIVE_STARLINK_CACHE + bool "StarFive StarLink Cache controller" + depends on RISCV + depends on ARCH_STARFIVE + select RISCV_DMA_NONCOHERENT + select RISCV_NONSTANDARD_CACHE_OPS + help + Support for the StarLink cache controller IP from StarFive. + endmenu diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile index 7657cff3bd6c..55c5e851034d 100644 --- a/drivers/cache/Makefile +++ b/drivers/cache/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o -obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o +obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o +obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o +obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o diff --git a/drivers/cache/starfive_starlink_cache.c b/drivers/cache/starfive_starlink_cache.c new file mode 100644 index 000000000000..24c7d078ca22 --- /dev/null +++ b/drivers/cache/starfive_starlink_cache.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cache Management Operations for StarFive's Starlink cache controller + * + * Copyright (C) 2024 Shanghai StarFive Technology Co., Ltd. + * + * Author: Joshua Yeong <joshua.yeong@starfivetech.com> + */ + +#include <linux/bitfield.h> +#include <linux/cacheflush.h> +#include <linux/iopoll.h> +#include <linux/of_address.h> + +#include <asm/dma-noncoherent.h> + +#define STARLINK_CACHE_FLUSH_START_ADDR 0x0 +#define STARLINK_CACHE_FLUSH_END_ADDR 0x8 +#define STARLINK_CACHE_FLUSH_CTL 0x10 +#define STARLINK_CACHE_ALIGN 0x40 + +#define STARLINK_CACHE_ADDRESS_RANGE_MASK GENMASK(39, 0) +#define STARLINK_CACHE_FLUSH_CTL_MODE_MASK GENMASK(2, 1) +#define STARLINK_CACHE_FLUSH_CTL_ENABLE_MASK BIT(0) + +#define STARLINK_CACHE_FLUSH_CTL_CLEAN_INVALIDATE 0 +#define STARLINK_CACHE_FLUSH_CTL_MAKE_INVALIDATE 1 +#define STARLINK_CACHE_FLUSH_CTL_CLEAN_SHARED 2 +#define STARLINK_CACHE_FLUSH_POLL_DELAY_US 1 +#define STARLINK_CACHE_FLUSH_TIMEOUT_US 5000000 + +static void __iomem *starlink_cache_base; + +static void starlink_cache_flush_complete(void) +{ + volatile void __iomem *ctl = starlink_cache_base + STARLINK_CACHE_FLUSH_CTL; + u64 v; + int ret; + + ret = readq_poll_timeout_atomic(ctl, v, !(v & STARLINK_CACHE_FLUSH_CTL_ENABLE_MASK), + STARLINK_CACHE_FLUSH_POLL_DELAY_US, + STARLINK_CACHE_FLUSH_TIMEOUT_US); + if (ret) + WARN(1, "StarFive Starlink cache flush operation timeout\n"); +} + +static void starlink_cache_dma_cache_wback(phys_addr_t paddr, unsigned long size) +{ + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr), + starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR); + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size), + starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR); + + mb(); + writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK, + STARLINK_CACHE_FLUSH_CTL_CLEAN_SHARED), + starlink_cache_base + STARLINK_CACHE_FLUSH_CTL); + + starlink_cache_flush_complete(); +} + +static void starlink_cache_dma_cache_invalidate(phys_addr_t paddr, unsigned long size) +{ + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr), + starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR); + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size), + starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR); + + mb(); + writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK, + STARLINK_CACHE_FLUSH_CTL_MAKE_INVALIDATE), + starlink_cache_base + STARLINK_CACHE_FLUSH_CTL); + + starlink_cache_flush_complete(); +} + +static void starlink_cache_dma_cache_wback_inv(phys_addr_t paddr, unsigned long size) +{ + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr), + starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR); + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size), + starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR); + + mb(); + writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK, + STARLINK_CACHE_FLUSH_CTL_CLEAN_INVALIDATE), + starlink_cache_base + STARLINK_CACHE_FLUSH_CTL); + + starlink_cache_flush_complete(); +} + +static const struct riscv_nonstd_cache_ops starlink_cache_ops = { + .wback = &starlink_cache_dma_cache_wback, + .inv = &starlink_cache_dma_cache_invalidate, + .wback_inv = &starlink_cache_dma_cache_wback_inv, +}; + +static const struct of_device_id starlink_cache_ids[] = { + { .compatible = "starfive,jh8100-starlink-cache" }, + { /* sentinel */ } +}; + +static int __init starlink_cache_init(void) +{ + struct device_node *np; + u32 block_size; + int ret; + + np = of_find_matching_node(NULL, starlink_cache_ids); + if (!of_device_is_available(np)) + return -ENODEV; + + ret = of_property_read_u32(np, "cache-block-size", &block_size); + if (ret) + return ret; + + if (block_size % STARLINK_CACHE_ALIGN) + return -EINVAL; + + starlink_cache_base = of_iomap(np, 0); + if (!starlink_cache_base) + return -ENOMEM; + + riscv_cbom_block_size = block_size; + riscv_noncoherent_supported(); + riscv_noncoherent_register_cache_ops(&starlink_cache_ops); + + return 0; +} +arch_initcall(starlink_cache_init); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 20c90ebb3a3f..49e4829b7264 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -3708,4 +3708,5 @@ static void __exit cdrom_exit(void) module_init(cdrom_init); module_exit(cdrom_exit); +MODULE_DESCRIPTION("Uniform CD-ROM driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index eefdd422ad8e..71cfe7a85913 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -744,6 +744,7 @@ static int probe_gdrom(struct platform_device *devptr) .max_segments = 1, /* set a large max size to get most from DMA */ .max_segment_size = 0x40000, + .features = BLK_FEAT_ROTATIONAL, }; int err; diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index d51fc8321d41..da32e8ed0830 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -269,8 +269,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) if (!devp->hd_ireqfreq) return -EIO; - if (count < sizeof(unsigned long)) - return -EINVAL; + if (in_compat_syscall()) { + if (count < sizeof(compat_ulong_t)) + return -EINVAL; + } else { + if (count < sizeof(unsigned long)) + return -EINVAL; + } add_wait_queue(&devp->hd_waitqueue, &wait); @@ -294,9 +299,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) schedule(); } - retval = put_user(data, (unsigned long __user *)buf); - if (!retval) - retval = sizeof(unsigned long); + if (in_compat_syscall()) { + retval = put_user(data, (compat_ulong_t __user *)buf); + if (!retval) + retval = sizeof(compat_ulong_t); + } else { + retval = put_user(data, (unsigned long __user *)buf); + if (!retval) + retval = sizeof(unsigned long); + } + out: __set_current_state(TASK_RUNNING); remove_wait_queue(&devp->hd_waitqueue, &wait); @@ -651,12 +663,24 @@ struct compat_hpet_info { unsigned short hi_timer; }; +/* 32-bit types would lead to different command codes which should be + * translated into 64-bit ones before passed to hpet_ioctl_common + */ +#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info) +#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t) + static long hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hpet_info info; int err; + if (cmd == COMPAT_HPET_INFO) + cmd = HPET_INFO; + + if (cmd == COMPAT_HPET_IRQFREQ) + cmd = HPET_IRQFREQ; + mutex_lock(&hpet_mutex); err = hpet_ioctl_common(file->private_data, cmd, arg, &info); mutex_unlock(&hpet_mutex); diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 4c695b0388f3..9bb142c75243 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -16,8 +16,8 @@ tpm-y += eventlog/common.o tpm-y += eventlog/tpm1.o tpm-y += eventlog/tpm2.o tpm-y += tpm-buf.o +tpm-y += tpm2-sessions.o -tpm-$(CONFIG_TCG_TPM2_HMAC) += tpm2-sessions.o tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o tpm-$(CONFIG_EFI) += eventlog/efi.o tpm-$(CONFIG_OF) += eventlog/of.o diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 639c3f395a5a..4c0bbba64ee5 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode, if (!err) { seq = file->private_data; seq->private = chip; + } else { + put_device(&chip->dev); } return err; diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index 907ac9956a78..2281d55df545 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -83,9 +83,6 @@ #define AES_KEY_BYTES AES_KEYSIZE_128 #define AES_KEY_BITS (AES_KEY_BYTES*8) -static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, - u32 *handle, u8 *name); - /* * This is the structure that carries all the auth information (like * session handle, nonces, session key and auth) from use to use it is @@ -148,6 +145,7 @@ struct tpm2_auth { u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE]; }; +#ifdef CONFIG_TCG_TPM2_HMAC /* * Name Size based on TPM algorithm (assumes no hash bigger than 255) */ @@ -163,6 +161,226 @@ static u8 name_size(const u8 *name) return size_map[alg] + 2; } +static int tpm2_parse_read_public(char *name, struct tpm_buf *buf) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset = TPM_HEADER_SIZE; + u32 tot_len = be32_to_cpu(head->length); + u32 val; + + /* we're starting after the header so adjust the length */ + tot_len -= TPM_HEADER_SIZE; + + /* skip public */ + val = tpm_buf_read_u16(buf, &offset); + if (val > tot_len) + return -EINVAL; + offset += val; + /* name */ + val = tpm_buf_read_u16(buf, &offset); + if (val != name_size(&buf->data[offset])) + return -EINVAL; + memcpy(name, &buf->data[offset], val); + /* forget the rest */ + return 0; +} + +static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC); + if (rc) + return rc; + + tpm_buf_append_u32(&buf, handle); + rc = tpm_transmit_cmd(chip, &buf, 0, "read public"); + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_read_public(name, &buf); + + tpm_buf_destroy(&buf); + + return rc; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ + +/** + * tpm_buf_append_name() - add a handle area to the buffer + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @handle: The handle to be appended + * @name: The name of the handle (may be NULL) + * + * In order to compute session HMACs, we need to know the names of the + * objects pointed to by the handles. For most objects, this is simply + * the actual 4 byte handle or an empty buf (in these cases @name + * should be NULL) but for volatile objects, permanent objects and NV + * areas, the name is defined as the hash (according to the name + * algorithm which should be set to sha256) of the public area to + * which the two byte algorithm id has been appended. For these + * objects, the @name pointer should point to this. If a name is + * required but @name is NULL, then TPM2_ReadPublic() will be called + * on the handle to obtain the name. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, + u32 handle, u8 *name) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + enum tpm2_mso_type mso = tpm2_handle_mso(handle); + struct tpm2_auth *auth; + int slot; +#endif + + if (!tpm2_chip_auth(chip)) { + tpm_buf_append_u32(buf, handle); + /* count the number of handles in the upper bits of flags */ + buf->handles++; + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4; + if (slot >= AUTH_MAX_NAMES) { + dev_err(&chip->dev, "TPM: too many handles\n"); + return; + } + auth = chip->auth; + WARN(auth->session != tpm_buf_length(buf), + "name added in wrong place\n"); + tpm_buf_append_u32(buf, handle); + auth->session += 4; + + if (mso == TPM2_MSO_PERSISTENT || + mso == TPM2_MSO_VOLATILE || + mso == TPM2_MSO_NVRAM) { + if (!name) + tpm2_read_public(chip, handle, auth->name[slot]); + } else { + if (name) + dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n"); + } + + auth->name_h[slot] = handle; + if (name) + memcpy(auth->name[slot], name, name_size(name)); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_name); + +/** + * tpm_buf_append_hmac_session() - Append a TPM session element + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @attributes: The session attributes + * @passphrase: The session authority (NULL if none) + * @passphrase_len: The length of the session authority (0 if none) + * + * This fills in a session structure in the TPM command buffer, except + * for the HMAC which cannot be computed until the command buffer is + * complete. The type of session is controlled by the @attributes, + * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the + * session won't terminate after tpm_buf_check_hmac_response(), + * TPM2_SA_DECRYPT which means this buffers first parameter should be + * encrypted with a session key and TPM2_SA_ENCRYPT, which means the + * response buffer's first parameter needs to be decrypted (confusing, + * but the defines are written from the point of view of the TPM). + * + * Any session appended by this command must be finalized by calling + * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect + * and the TPM will reject the command. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, + int passphrase_len) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + u8 nonce[SHA256_DIGEST_SIZE]; + struct tpm2_auth *auth; + u32 len; +#endif + + if (!tpm2_chip_auth(chip)) { + /* offset tells us where the sessions area begins */ + int offset = buf->handles * 4 + TPM_HEADER_SIZE; + u32 len = 9 + passphrase_len; + + if (tpm_buf_length(buf) != offset) { + /* not the first session so update the existing length */ + len += get_unaligned_be32(&buf->data[offset]); + put_unaligned_be32(len, &buf->data[offset]); + } else { + tpm_buf_append_u32(buf, len); + } + /* auth handle */ + tpm_buf_append_u32(buf, TPM2_RS_PW); + /* nonce */ + tpm_buf_append_u16(buf, 0); + /* attributes */ + tpm_buf_append_u8(buf, 0); + /* passphrase */ + tpm_buf_append_u16(buf, passphrase_len); + tpm_buf_append(buf, passphrase, passphrase_len); + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + /* + * The Architecture Guide requires us to strip trailing zeros + * before computing the HMAC + */ + while (passphrase && passphrase_len > 0 && passphrase[passphrase_len - 1] == '\0') + passphrase_len--; + + auth = chip->auth; + auth->attrs = attributes; + auth->passphrase_len = passphrase_len; + if (passphrase_len) + memcpy(auth->passphrase, passphrase, passphrase_len); + + if (auth->session != tpm_buf_length(buf)) { + /* we're not the first session */ + len = get_unaligned_be32(&buf->data[auth->session]); + if (4 + len + auth->session != tpm_buf_length(buf)) { + WARN(1, "session length mismatch, cannot append"); + return; + } + + /* add our new session */ + len += 9 + 2 * SHA256_DIGEST_SIZE; + put_unaligned_be32(len, &buf->data[auth->session]); + } else { + tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE); + } + + /* random number for our nonce */ + get_random_bytes(nonce, sizeof(nonce)); + memcpy(auth->our_nonce, nonce, sizeof(nonce)); + tpm_buf_append_u32(buf, auth->handle); + /* our new nonce */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); + tpm_buf_append_u8(buf, auth->attrs); + /* and put a placeholder for the hmac */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_hmac_session); + +#ifdef CONFIG_TCG_TPM2_HMAC + +static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, + u32 *handle, u8 *name); + /* * It turns out the crypto hmac(sha256) is hard for us to consume * because it assumes a fixed key and the TPM seems to change the key @@ -344,82 +562,6 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip) } /** - * tpm_buf_append_hmac_session() - Append a TPM session element - * @chip: the TPM chip structure - * @buf: The buffer to be appended - * @attributes: The session attributes - * @passphrase: The session authority (NULL if none) - * @passphrase_len: The length of the session authority (0 if none) - * - * This fills in a session structure in the TPM command buffer, except - * for the HMAC which cannot be computed until the command buffer is - * complete. The type of session is controlled by the @attributes, - * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the - * session won't terminate after tpm_buf_check_hmac_response(), - * TPM2_SA_DECRYPT which means this buffers first parameter should be - * encrypted with a session key and TPM2_SA_ENCRYPT, which means the - * response buffer's first parameter needs to be decrypted (confusing, - * but the defines are written from the point of view of the TPM). - * - * Any session appended by this command must be finalized by calling - * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect - * and the TPM will reject the command. - * - * As with most tpm_buf operations, success is assumed because failure - * will be caused by an incorrect programming model and indicated by a - * kernel message. - */ -void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, - u8 attributes, u8 *passphrase, - int passphrase_len) -{ - u8 nonce[SHA256_DIGEST_SIZE]; - u32 len; - struct tpm2_auth *auth = chip->auth; - - /* - * The Architecture Guide requires us to strip trailing zeros - * before computing the HMAC - */ - while (passphrase && passphrase_len > 0 - && passphrase[passphrase_len - 1] == '\0') - passphrase_len--; - - auth->attrs = attributes; - auth->passphrase_len = passphrase_len; - if (passphrase_len) - memcpy(auth->passphrase, passphrase, passphrase_len); - - if (auth->session != tpm_buf_length(buf)) { - /* we're not the first session */ - len = get_unaligned_be32(&buf->data[auth->session]); - if (4 + len + auth->session != tpm_buf_length(buf)) { - WARN(1, "session length mismatch, cannot append"); - return; - } - - /* add our new session */ - len += 9 + 2 * SHA256_DIGEST_SIZE; - put_unaligned_be32(len, &buf->data[auth->session]); - } else { - tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE); - } - - /* random number for our nonce */ - get_random_bytes(nonce, sizeof(nonce)); - memcpy(auth->our_nonce, nonce, sizeof(nonce)); - tpm_buf_append_u32(buf, auth->handle); - /* our new nonce */ - tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); - tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); - tpm_buf_append_u8(buf, auth->attrs); - /* and put a placeholder for the hmac */ - tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); - tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); -} -EXPORT_SYMBOL(tpm_buf_append_hmac_session); - -/** * tpm_buf_fill_hmac_session() - finalize the session HMAC * @chip: the TPM chip structure * @buf: The buffer to be appended @@ -449,6 +591,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) u8 cphash[SHA256_DIGEST_SIZE]; struct sha256_state sctx; + if (!auth) + return; + /* save the command code in BE format */ auth->ordinal = head->ordinal; @@ -567,104 +712,6 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) } EXPORT_SYMBOL(tpm_buf_fill_hmac_session); -static int tpm2_parse_read_public(char *name, struct tpm_buf *buf) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - off_t offset = TPM_HEADER_SIZE; - u32 tot_len = be32_to_cpu(head->length); - u32 val; - - /* we're starting after the header so adjust the length */ - tot_len -= TPM_HEADER_SIZE; - - /* skip public */ - val = tpm_buf_read_u16(buf, &offset); - if (val > tot_len) - return -EINVAL; - offset += val; - /* name */ - val = tpm_buf_read_u16(buf, &offset); - if (val != name_size(&buf->data[offset])) - return -EINVAL; - memcpy(name, &buf->data[offset], val); - /* forget the rest */ - return 0; -} - -static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name) -{ - struct tpm_buf buf; - int rc; - - rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC); - if (rc) - return rc; - - tpm_buf_append_u32(&buf, handle); - rc = tpm_transmit_cmd(chip, &buf, 0, "read public"); - if (rc == TPM2_RC_SUCCESS) - rc = tpm2_parse_read_public(name, &buf); - - tpm_buf_destroy(&buf); - - return rc; -} - -/** - * tpm_buf_append_name() - add a handle area to the buffer - * @chip: the TPM chip structure - * @buf: The buffer to be appended - * @handle: The handle to be appended - * @name: The name of the handle (may be NULL) - * - * In order to compute session HMACs, we need to know the names of the - * objects pointed to by the handles. For most objects, this is simply - * the actual 4 byte handle or an empty buf (in these cases @name - * should be NULL) but for volatile objects, permanent objects and NV - * areas, the name is defined as the hash (according to the name - * algorithm which should be set to sha256) of the public area to - * which the two byte algorithm id has been appended. For these - * objects, the @name pointer should point to this. If a name is - * required but @name is NULL, then TPM2_ReadPublic() will be called - * on the handle to obtain the name. - * - * As with most tpm_buf operations, success is assumed because failure - * will be caused by an incorrect programming model and indicated by a - * kernel message. - */ -void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, - u32 handle, u8 *name) -{ - enum tpm2_mso_type mso = tpm2_handle_mso(handle); - struct tpm2_auth *auth = chip->auth; - int slot; - - slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE)/4; - if (slot >= AUTH_MAX_NAMES) { - dev_err(&chip->dev, "TPM: too many handles\n"); - return; - } - WARN(auth->session != tpm_buf_length(buf), - "name added in wrong place\n"); - tpm_buf_append_u32(buf, handle); - auth->session += 4; - - if (mso == TPM2_MSO_PERSISTENT || - mso == TPM2_MSO_VOLATILE || - mso == TPM2_MSO_NVRAM) { - if (!name) - tpm2_read_public(chip, handle, auth->name[slot]); - } else { - if (name) - dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n"); - } - - auth->name_h[slot] = handle; - if (name) - memcpy(auth->name[slot], name, name_size(name)); -} -EXPORT_SYMBOL(tpm_buf_append_name); - /** * tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness * @chip: the TPM chip structure @@ -705,6 +752,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, u32 cc = be32_to_cpu(auth->ordinal); int parm_len, len, i, handles; + if (!auth) + return rc; + if (auth->session >= TPM_HEADER_SIZE) { WARN(1, "tpm session not filled correctly\n"); goto out; @@ -824,8 +874,13 @@ EXPORT_SYMBOL(tpm_buf_check_hmac_response); */ void tpm2_end_auth_session(struct tpm_chip *chip) { - tpm2_flush_context(chip, chip->auth->handle); - memzero_explicit(chip->auth, sizeof(*chip->auth)); + struct tpm2_auth *auth = chip->auth; + + if (!auth) + return; + + tpm2_flush_context(chip, auth->handle); + memzero_explicit(auth, sizeof(*auth)); } EXPORT_SYMBOL(tpm2_end_auth_session); @@ -907,6 +962,11 @@ int tpm2_start_auth_session(struct tpm_chip *chip) int rc; u32 null_key; + if (!auth) { + dev_warn_once(&chip->dev, "auth session is not active\n"); + return 0; + } + rc = tpm2_load_null(chip, &null_key); if (rc) goto out; @@ -1301,3 +1361,4 @@ int tpm2_sessions_init(struct tpm_chip *chip) return rc; } +#endif /* CONFIG_TCG_TPM2_HMAC */ diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index c9eca24bbad4..61b42c83ced8 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -318,6 +318,7 @@ static void tpm_tis_spi_remove(struct spi_device *dev) } static const struct spi_device_id tpm_tis_spi_id[] = { + { "attpm20p", (unsigned long)tpm_tis_spi_probe }, { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, { "slb9670", (unsigned long)tpm_tis_spi_probe }, { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c index ba504e19d420..62d876e150e1 100644 --- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c @@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = { static const struct mtk_clk_desc mfg_desc = { .clks = mfg_clks, .num_clks = ARRAY_SIZE(mfg_clks), + .need_runtime_pm = true, }; static const struct of_device_id of_match_clk_mt8183_mfg[] = { diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c index bd37ab4d1a9b..ba1d1c495bc2 100644 --- a/drivers/clk/mediatek/clk-mtk.c +++ b/drivers/clk/mediatek/clk-mtk.c @@ -496,14 +496,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev, } - devm_pm_runtime_enable(&pdev->dev); - /* - * Do a pm_runtime_resume_and_get() to workaround a possible - * deadlock between clk_register() and the genpd framework. - */ - r = pm_runtime_resume_and_get(&pdev->dev); - if (r) - return r; + if (mcd->need_runtime_pm) { + devm_pm_runtime_enable(&pdev->dev); + /* + * Do a pm_runtime_resume_and_get() to workaround a possible + * deadlock between clk_register() and the genpd framework. + */ + r = pm_runtime_resume_and_get(&pdev->dev); + if (r) + return r; + } /* Calculate how many clk_hw_onecell_data entries to allocate */ num_clks = mcd->num_clks + mcd->num_composite_clks; @@ -585,7 +587,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev, goto unregister_clks; } - pm_runtime_put(&pdev->dev); + if (mcd->need_runtime_pm) + pm_runtime_put(&pdev->dev); return r; @@ -618,7 +621,8 @@ free_base: if (mcd->shared_io && base) iounmap(base); - pm_runtime_put(&pdev->dev); + if (mcd->need_runtime_pm) + pm_runtime_put(&pdev->dev); return r; } diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index 22096501a60a..c17fe1c2d732 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -237,6 +237,8 @@ struct mtk_clk_desc { int (*clk_notifier_func)(struct device *dev, struct clk *clk); unsigned int mfg_clk_idx; + + bool need_runtime_pm; }; int mtk_clk_pdev_probe(struct platform_device *pdev); diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 1bb51a058872..46369edfc07a 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -827,6 +827,14 @@ config SM_CAMCC_8550 Support for the camera clock controller on SM8550 devices. Say Y if you want to support camera devices and camera functionality. +config SM_CAMCC_8650 + tristate "SM8650 Camera Clock Controller" + depends on ARM64 || COMPILE_TEST + select SM_GCC_8650 + help + Support for the camera clock controller on SM8650 devices. + Say Y if you want to support camera devices and camera functionality. + config SM_DISPCC_6115 tristate "SM6115 Display Clock Controller" depends on ARM64 || COMPILE_TEST diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index dec5b6db6860..28bffa1eb8dd 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -109,6 +109,7 @@ obj-$(CONFIG_SM_CAMCC_6350) += camcc-sm6350.o obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o obj-$(CONFIG_SM_CAMCC_8550) += camcc-sm8550.o +obj-$(CONFIG_SM_CAMCC_8650) += camcc-sm8650.o obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c index 5f7f537e4ecb..e8632db2c542 100644 --- a/drivers/clk/qcom/apss-ipq-pll.c +++ b/drivers/clk/qcom/apss-ipq-pll.c @@ -70,7 +70,6 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = { static const struct alpha_pll_config ipq5018_pll_config = { .l = 0x2a, .config_ctl_val = 0x4001075b, - .config_ctl_hi_val = 0x304, .main_output_mask = BIT(0), .aux_output_mask = BIT(1), .early_output_mask = BIT(3), @@ -84,7 +83,6 @@ static const struct alpha_pll_config ipq5018_pll_config = { static const struct alpha_pll_config ipq5332_pll_config = { .l = 0x2d, .config_ctl_val = 0x4001075b, - .config_ctl_hi_val = 0x304, .main_output_mask = BIT(0), .aux_output_mask = BIT(1), .early_output_mask = BIT(3), diff --git a/drivers/clk/qcom/camcc-sm8650.c b/drivers/clk/qcom/camcc-sm8650.c new file mode 100644 index 000000000000..1b28e086e519 --- /dev/null +++ b/drivers/clk/qcom/camcc-sm8650.c @@ -0,0 +1,3591 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,sm8650-camcc.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_IFACE, + DT_BI_TCXO, + DT_BI_TCXO_AO, + DT_SLEEP_CLK, +}; + +enum { + P_BI_TCXO, + P_BI_TCXO_AO, + P_CAM_CC_PLL0_OUT_EVEN, + P_CAM_CC_PLL0_OUT_MAIN, + P_CAM_CC_PLL0_OUT_ODD, + P_CAM_CC_PLL1_OUT_EVEN, + P_CAM_CC_PLL2_OUT_EVEN, + P_CAM_CC_PLL2_OUT_MAIN, + P_CAM_CC_PLL3_OUT_EVEN, + P_CAM_CC_PLL4_OUT_EVEN, + P_CAM_CC_PLL5_OUT_EVEN, + P_CAM_CC_PLL6_OUT_EVEN, + P_CAM_CC_PLL7_OUT_EVEN, + P_CAM_CC_PLL8_OUT_EVEN, + P_CAM_CC_PLL9_OUT_EVEN, + P_CAM_CC_PLL9_OUT_ODD, + P_CAM_CC_PLL10_OUT_EVEN, + P_SLEEP_CLK, +}; + +static const struct pll_vco lucid_ole_vco[] = { + { 249600000, 2300000000, 0 }, +}; + +static const struct pll_vco rivian_ole_vco[] = { + { 777000000, 1285000000, 0 }, +}; + +static const struct alpha_pll_config cam_cc_pll0_config = { + .l = 0x3e, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00008400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = { + .offset = 0x0, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = { + { 0x2, 3 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = { + .offset = 0x0, + .post_div_shift = 14, + .post_div_table = post_div_table_cam_cc_pll0_out_odd, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0_out_odd", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll1_config = { + .l = 0x31, + .alpha = 0x7aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = { + .offset = 0x1000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll1_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll1_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll1.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll2_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x10000030, + .config_ctl_hi_val = 0x80890263, + .config_ctl_hi1_val = 0x00000217, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000000, +}; + +static struct clk_alpha_pll cam_cc_pll2 = { + .offset = 0x2000, + .vco_table = rivian_ole_vco, + .num_vco = ARRAY_SIZE(rivian_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll2", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_rivian_evo_ops, + }, + }, +}; + +static const struct alpha_pll_config cam_cc_pll3_config = { + .l = 0x30, + .alpha = 0x8aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll3 = { + .offset = 0x3000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll3", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = { + .offset = 0x3000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll3_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll3_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll3.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll4_config = { + .l = 0x30, + .alpha = 0x8aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll4 = { + .offset = 0x4000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll4", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = { + .offset = 0x4000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll4_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll4_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll4.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll5_config = { + .l = 0x30, + .alpha = 0x8aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll5 = { + .offset = 0x5000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll5", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = { + .offset = 0x5000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll5_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll5_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll5.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll6_config = { + .l = 0x30, + .alpha = 0x8aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll6 = { + .offset = 0x6000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll6", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = { + .offset = 0x6000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll6_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll6_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll6.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll7_config = { + .l = 0x30, + .alpha = 0x8aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll7 = { + .offset = 0x7000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll7", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll7_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = { + .offset = 0x7000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll7_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll7_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll7_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll7.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll8_config = { + .l = 0x14, + .alpha = 0xd555, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll8 = { + .offset = 0x8000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll8", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = { + .offset = 0x8000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll8_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll8_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll8_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll8.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll9_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00008400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll9 = { + .offset = 0x9000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll9", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll9_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll9_out_even = { + .offset = 0x9000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll9_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll9_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll9_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll9.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll9_out_odd[] = { + { 0x2, 3 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll9_out_odd = { + .offset = 0x9000, + .post_div_shift = 14, + .post_div_table = post_div_table_cam_cc_pll9_out_odd, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll9_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll9_out_odd", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll9.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll10_config = { + .l = 0x30, + .alpha = 0x8aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82aa299c, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll10 = { + .offset = 0xa000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll10", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll10_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll10_out_even = { + .offset = 0xa000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll10_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll10_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll10_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll10.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct parent_map cam_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 2 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, + { P_CAM_CC_PLL9_OUT_ODD, 4 }, + { P_CAM_CC_PLL9_OUT_EVEN, 5 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, + { .hw = &cam_cc_pll9_out_odd.clkr.hw }, + { .hw = &cam_cc_pll9_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL2_OUT_EVEN, 3 }, + { P_CAM_CC_PLL2_OUT_MAIN, 5 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll2.clkr.hw }, + { .hw = &cam_cc_pll2.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL8_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll8_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL3_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll3_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL4_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll4_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL5_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_5[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll5_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL1_OUT_EVEN, 4 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_6[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll1_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL6_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_7[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll6_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_8[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL7_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_8[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll7_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_9[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL10_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_9[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll10_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_10[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_10[] = { + { .index = DT_SLEEP_CLK }, +}; + +static const struct parent_map cam_cc_parent_map_11_ao[] = { + { P_BI_TCXO_AO, 0 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_11_ao[] = { + { .index = DT_BI_TCXO_AO }, +}; + +static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(200000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0), + F(400000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0), + F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0), + F(785000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_bps_clk_src = { + .cmd_rcgr = 0x10050, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_clk_src", + .parent_data = cam_cc_parent_data_2, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_camnoc_axi_rt_clk_src[] = { + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_camnoc_axi_rt_clk_src = { + .cmd_rcgr = 0x1325c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_camnoc_axi_rt_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_rt_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cci_0_clk_src = { + .cmd_rcgr = 0x131cc, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_0_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_cci_1_clk_src = { + .cmd_rcgr = 0x131e8, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_1_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_cci_2_clk_src = { + .cmd_rcgr = 0x13204, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_2_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { + .cmd_rcgr = 0x1104c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cphy_rx_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cre_clk_src[] = { + F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL9_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cre_clk_src = { + .cmd_rcgr = 0x13144, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cre_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cre_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { + .cmd_rcgr = 0x150e0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi0phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { + .cmd_rcgr = 0x15104, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi1phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { + .cmd_rcgr = 0x15124, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi2phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { + .cmd_rcgr = 0x15144, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi3phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = { + .cmd_rcgr = 0x15164, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi4phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = { + .cmd_rcgr = 0x15184, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi5phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi6phytimer_clk_src = { + .cmd_rcgr = 0x151a4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi6phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi7phytimer_clk_src = { + .cmd_rcgr = 0x151c4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi7phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_csid_clk_src[] = { + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_csid_clk_src = { + .cmd_rcgr = 0x13238, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csid_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { + .cmd_rcgr = 0x10018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_fast_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL9_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_icp_clk_src = { + .cmd_rcgr = 0x131a4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_icp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(466000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(594000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(675000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(785000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_clk_src = { + .cmd_rcgr = 0x11018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_3, + .freq_tbl = ftbl_cam_cc_ife_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_clk_src", + .parent_data = cam_cc_parent_data_3, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(466000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(594000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(675000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(785000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_1_clk_src = { + .cmd_rcgr = 0x12018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_4, + .freq_tbl = ftbl_cam_cc_ife_1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_clk_src", + .parent_data = cam_cc_parent_data_4, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(466000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0), + F(594000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0), + F(675000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0), + F(785000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_2_clk_src = { + .cmd_rcgr = 0x12068, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_5, + .freq_tbl = ftbl_cam_cc_ife_2_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_2_clk_src", + .parent_data = cam_cc_parent_data_5, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_5), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_clk_src = { + .cmd_rcgr = 0x13000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csid_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { + .cmd_rcgr = 0x13028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csid_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ipe_nps_clk_src[] = { + F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(575000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(675000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(825000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ipe_nps_clk_src = { + .cmd_rcgr = 0x10094, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_6, + .freq_tbl = ftbl_cam_cc_ipe_nps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_nps_clk_src", + .parent_data = cam_cc_parent_data_6, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_6), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_jpeg_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL9_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_jpeg_clk_src = { + .cmd_rcgr = 0x13168, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_jpeg_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_jpeg_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 4), + F(68571429, P_CAM_CC_PLL2_OUT_MAIN, 14, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_mclk0_clk_src = { + .cmd_rcgr = 0x15000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk0_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk1_clk_src = { + .cmd_rcgr = 0x1501c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk1_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk2_clk_src = { + .cmd_rcgr = 0x15038, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk2_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk3_clk_src = { + .cmd_rcgr = 0x15054, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk3_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk4_clk_src = { + .cmd_rcgr = 0x15070, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk4_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk5_clk_src = { + .cmd_rcgr = 0x1508c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk5_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk6_clk_src = { + .cmd_rcgr = 0x150a8, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk6_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk7_clk_src = { + .cmd_rcgr = 0x150c4, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk7_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_qdss_debug_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0), + F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_qdss_debug_clk_src = { + .cmd_rcgr = 0x1329c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_qdss_debug_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_qdss_debug_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_sfe_0_clk_src[] = { + F(466000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0), + F(594000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0), + F(675000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0), + F(785000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_sfe_0_clk_src = { + .cmd_rcgr = 0x1306c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_7, + .freq_tbl = ftbl_cam_cc_sfe_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_0_clk_src", + .parent_data = cam_cc_parent_data_7, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_7), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_sfe_1_clk_src[] = { + F(466000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0), + F(594000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0), + F(675000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0), + F(785000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_sfe_1_clk_src = { + .cmd_rcgr = 0x130bc, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_8, + .freq_tbl = ftbl_cam_cc_sfe_1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_1_clk_src", + .parent_data = cam_cc_parent_data_8, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_8), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_sfe_2_clk_src[] = { + F(466000000, P_CAM_CC_PLL10_OUT_EVEN, 1, 0, 0), + F(594000000, P_CAM_CC_PLL10_OUT_EVEN, 1, 0, 0), + F(675000000, P_CAM_CC_PLL10_OUT_EVEN, 1, 0, 0), + F(785000000, P_CAM_CC_PLL10_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_sfe_2_clk_src = { + .cmd_rcgr = 0x1310c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_9, + .freq_tbl = ftbl_cam_cc_sfe_2_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_2_clk_src", + .parent_data = cam_cc_parent_data_9, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_9), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_sleep_clk_src = { + .cmd_rcgr = 0x132f0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_10, + .freq_tbl = ftbl_cam_cc_sleep_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sleep_clk_src", + .parent_data = cam_cc_parent_data_10, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_10), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { + .cmd_rcgr = 0x10034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_slow_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = { + F(19200000, P_BI_TCXO_AO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_xo_clk_src = { + .cmd_rcgr = 0x132d4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_11_ao, + .freq_tbl = ftbl_cam_cc_xo_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_xo_clk_src", + .parent_data = cam_cc_parent_data_11_ao, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_11_ao), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_branch cam_cc_bps_ahb_clk = { + .halt_reg = 0x1004c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1004c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_clk = { + .halt_reg = 0x10068, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_bps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_fast_ahb_clk = { + .halt_reg = 0x10030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_shift_clk = { + .halt_reg = 0x10078, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x10078, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_nrt_clk = { + .halt_reg = 0x13284, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13284, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_nrt_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_rt_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_rt_clk = { + .halt_reg = 0x13274, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13274, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_rt_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_rt_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_dcd_xo_clk = { + .halt_reg = 0x13290, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13290, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_dcd_xo_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_xo_clk = { + .halt_reg = 0x13294, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13294, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_xo_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_0_clk = { + .halt_reg = 0x131e4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x131e4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_1_clk = { + .halt_reg = 0x13200, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13200, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_2_clk = { + .halt_reg = 0x1321c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1321c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_core_ahb_clk = { + .halt_reg = 0x132d0, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x132d0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_core_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ahb_clk = { + .halt_reg = 0x13220, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13220, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_bps_clk = { + .halt_reg = 0x10074, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10074, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_bps_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_bps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_cre_clk = { + .halt_reg = 0x13160, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13160, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_cre_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cre_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_fast_ahb_clk = { + .halt_reg = 0x1322c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1322c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_0_clk = { + .halt_reg = 0x1103c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1103c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ife_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_1_clk = { + .halt_reg = 0x1203c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1203c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ife_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_2_clk = { + .halt_reg = 0x1208c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1208c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ife_2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_lite_clk = { + .halt_reg = 0x13024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ife_lite_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ipe_nps_clk = { + .halt_reg = 0x100b8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x100b8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ipe_nps_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ipe_nps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_sbi_clk = { + .halt_reg = 0x10104, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10104, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_sbi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_sfe_0_clk = { + .halt_reg = 0x13090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13090, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_sfe_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_sfe_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_sfe_1_clk = { + .halt_reg = 0x130e0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x130e0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_sfe_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_sfe_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_sfe_2_clk = { + .halt_reg = 0x13130, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13130, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_sfe_2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_sfe_2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cre_ahb_clk = { + .halt_reg = 0x13164, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13164, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cre_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cre_clk = { + .halt_reg = 0x1315c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1315c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cre_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cre_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi0phytimer_clk = { + .halt_reg = 0x150f8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150f8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi0phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi0phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi1phytimer_clk = { + .halt_reg = 0x1511c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1511c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi1phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi1phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi2phytimer_clk = { + .halt_reg = 0x1513c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1513c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi2phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi2phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi3phytimer_clk = { + .halt_reg = 0x1515c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1515c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi3phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi3phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi4phytimer_clk = { + .halt_reg = 0x1517c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1517c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi4phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi4phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi5phytimer_clk = { + .halt_reg = 0x1519c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1519c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi5phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi5phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi6phytimer_clk = { + .halt_reg = 0x151bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x151bc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi6phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi6phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi7phytimer_clk = { + .halt_reg = 0x151dc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x151dc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi7phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi7phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csid_clk = { + .halt_reg = 0x13250, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13250, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csid_csiphy_rx_clk = { + .halt_reg = 0x15100, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15100, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csid_csiphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy0_clk = { + .halt_reg = 0x150fc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150fc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy1_clk = { + .halt_reg = 0x15120, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15120, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy2_clk = { + .halt_reg = 0x15140, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15140, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy3_clk = { + .halt_reg = 0x15160, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15160, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy3_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy4_clk = { + .halt_reg = 0x15180, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15180, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy4_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy5_clk = { + .halt_reg = 0x151a0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x151a0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy5_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy6_clk = { + .halt_reg = 0x151c0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x151c0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy6_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy7_clk = { + .halt_reg = 0x151e0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x151e0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy7_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_ahb_clk = { + .halt_reg = 0x131c8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x131c8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_clk = { + .halt_reg = 0x131bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x131bc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_icp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_clk = { + .halt_reg = 0x11030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_fast_ahb_clk = { + .halt_reg = 0x11048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_shift_clk = { + .halt_reg = 0x11064, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x11064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_clk = { + .halt_reg = 0x12030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_fast_ahb_clk = { + .halt_reg = 0x12048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_shift_clk = { + .halt_reg = 0x1204c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1204c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_2_clk = { + .halt_reg = 0x12080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12080, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_2_fast_ahb_clk = { + .halt_reg = 0x12098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12098, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_2_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_2_shift_clk = { + .halt_reg = 0x1209c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1209c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_2_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_ahb_clk = { + .halt_reg = 0x13050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13050, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_clk = { + .halt_reg = 0x13018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = { + .halt_reg = 0x1304c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1304c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_csid_clk = { + .halt_reg = 0x13040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_nps_ahb_clk = { + .halt_reg = 0x100d0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x100d0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_nps_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_nps_clk = { + .halt_reg = 0x100ac, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x100ac, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_nps_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ipe_nps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_nps_fast_ahb_clk = { + .halt_reg = 0x100d4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x100d4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_nps_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_pps_clk = { + .halt_reg = 0x100bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x100bc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_pps_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ipe_nps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_pps_fast_ahb_clk = { + .halt_reg = 0x100d8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x100d8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_pps_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_shift_clk = { + .halt_reg = 0x100dc, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x100dc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_jpeg_1_clk = { + .halt_reg = 0x1318c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1318c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_jpeg_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_jpeg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_jpeg_clk = { + .halt_reg = 0x13180, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13180, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_jpeg_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_jpeg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk0_clk = { + .halt_reg = 0x15018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk1_clk = { + .halt_reg = 0x15034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk2_clk = { + .halt_reg = 0x15050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15050, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk3_clk = { + .halt_reg = 0x1506c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1506c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk3_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk4_clk = { + .halt_reg = 0x15088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk4_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk5_clk = { + .halt_reg = 0x150a4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150a4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk5_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk6_clk = { + .halt_reg = 0x150c0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150c0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk6_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk6_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk7_clk = { + .halt_reg = 0x150dc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x150dc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk7_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk7_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_qdss_debug_clk = { + .halt_reg = 0x132b4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x132b4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_qdss_debug_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_qdss_debug_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_qdss_debug_xo_clk = { + .halt_reg = 0x132b8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x132b8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_qdss_debug_xo_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sbi_clk = { + .halt_reg = 0x100f8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x100f8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sbi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sbi_fast_ahb_clk = { + .halt_reg = 0x10108, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10108, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sbi_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sbi_shift_clk = { + .halt_reg = 0x1010c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1010c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sbi_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_0_clk = { + .halt_reg = 0x13084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_sfe_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = { + .halt_reg = 0x1309c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1309c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_0_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_0_shift_clk = { + .halt_reg = 0x130a0, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x130a0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_0_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_1_clk = { + .halt_reg = 0x130d4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x130d4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_sfe_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_1_fast_ahb_clk = { + .halt_reg = 0x130ec, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x130ec, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_1_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_1_shift_clk = { + .halt_reg = 0x130f0, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x130f0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_1_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_2_clk = { + .halt_reg = 0x13124, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13124, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_sfe_2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_2_fast_ahb_clk = { + .halt_reg = 0x1313c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1313c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_2_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sfe_2_shift_clk = { + .halt_reg = 0x13140, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x13140, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sfe_2_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_titan_top_shift_clk = { + .halt_reg = 0x1330c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1330c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_titan_top_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc cam_cc_titan_top_gdsc = { + .gdscr = 0x132bc, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_titan_top_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_bps_gdsc = { + .gdscr = 0x10004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_bps_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_ife_0_gdsc = { + .gdscr = 0x11004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_ife_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_ife_1_gdsc = { + .gdscr = 0x12004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_ife_1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_ife_2_gdsc = { + .gdscr = 0x12054, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_ife_2_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_ipe_0_gdsc = { + .gdscr = 0x10080, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_ipe_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_sbi_gdsc = { + .gdscr = 0x100e4, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_sbi_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_sfe_0_gdsc = { + .gdscr = 0x13058, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_sfe_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_sfe_1_gdsc = { + .gdscr = 0x130a8, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_sfe_1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc cam_cc_sfe_2_gdsc = { + .gdscr = 0x130f8, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_sfe_2_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct clk_regmap *cam_cc_sm8650_clocks[] = { + [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr, + [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr, + [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr, + [CAM_CC_BPS_FAST_AHB_CLK] = &cam_cc_bps_fast_ahb_clk.clkr, + [CAM_CC_BPS_SHIFT_CLK] = &cam_cc_bps_shift_clk.clkr, + [CAM_CC_CAMNOC_AXI_NRT_CLK] = &cam_cc_camnoc_axi_nrt_clk.clkr, + [CAM_CC_CAMNOC_AXI_RT_CLK] = &cam_cc_camnoc_axi_rt_clk.clkr, + [CAM_CC_CAMNOC_AXI_RT_CLK_SRC] = &cam_cc_camnoc_axi_rt_clk_src.clkr, + [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr, + [CAM_CC_CAMNOC_XO_CLK] = &cam_cc_camnoc_xo_clk.clkr, + [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr, + [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr, + [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr, + [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr, + [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr, + [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr, + [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr, + [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr, + [CAM_CC_CPAS_BPS_CLK] = &cam_cc_cpas_bps_clk.clkr, + [CAM_CC_CPAS_CRE_CLK] = &cam_cc_cpas_cre_clk.clkr, + [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr, + [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr, + [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr, + [CAM_CC_CPAS_IFE_2_CLK] = &cam_cc_cpas_ife_2_clk.clkr, + [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr, + [CAM_CC_CPAS_IPE_NPS_CLK] = &cam_cc_cpas_ipe_nps_clk.clkr, + [CAM_CC_CPAS_SBI_CLK] = &cam_cc_cpas_sbi_clk.clkr, + [CAM_CC_CPAS_SFE_0_CLK] = &cam_cc_cpas_sfe_0_clk.clkr, + [CAM_CC_CPAS_SFE_1_CLK] = &cam_cc_cpas_sfe_1_clk.clkr, + [CAM_CC_CPAS_SFE_2_CLK] = &cam_cc_cpas_sfe_2_clk.clkr, + [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr, + [CAM_CC_CRE_AHB_CLK] = &cam_cc_cre_ahb_clk.clkr, + [CAM_CC_CRE_CLK] = &cam_cc_cre_clk.clkr, + [CAM_CC_CRE_CLK_SRC] = &cam_cc_cre_clk_src.clkr, + [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr, + [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr, + [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr, + [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr, + [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr, + [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr, + [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr, + [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr, + [CAM_CC_CSI4PHYTIMER_CLK] = &cam_cc_csi4phytimer_clk.clkr, + [CAM_CC_CSI4PHYTIMER_CLK_SRC] = &cam_cc_csi4phytimer_clk_src.clkr, + [CAM_CC_CSI5PHYTIMER_CLK] = &cam_cc_csi5phytimer_clk.clkr, + [CAM_CC_CSI5PHYTIMER_CLK_SRC] = &cam_cc_csi5phytimer_clk_src.clkr, + [CAM_CC_CSI6PHYTIMER_CLK] = &cam_cc_csi6phytimer_clk.clkr, + [CAM_CC_CSI6PHYTIMER_CLK_SRC] = &cam_cc_csi6phytimer_clk_src.clkr, + [CAM_CC_CSI7PHYTIMER_CLK] = &cam_cc_csi7phytimer_clk.clkr, + [CAM_CC_CSI7PHYTIMER_CLK_SRC] = &cam_cc_csi7phytimer_clk_src.clkr, + [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr, + [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr, + [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr, + [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr, + [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr, + [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr, + [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr, + [CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr, + [CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr, + [CAM_CC_CSIPHY6_CLK] = &cam_cc_csiphy6_clk.clkr, + [CAM_CC_CSIPHY7_CLK] = &cam_cc_csiphy7_clk.clkr, + [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr, + [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr, + [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr, + [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr, + [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr, + [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr, + [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr, + [CAM_CC_IFE_0_SHIFT_CLK] = &cam_cc_ife_0_shift_clk.clkr, + [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr, + [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr, + [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr, + [CAM_CC_IFE_1_SHIFT_CLK] = &cam_cc_ife_1_shift_clk.clkr, + [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr, + [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr, + [CAM_CC_IFE_2_FAST_AHB_CLK] = &cam_cc_ife_2_fast_ahb_clk.clkr, + [CAM_CC_IFE_2_SHIFT_CLK] = &cam_cc_ife_2_shift_clk.clkr, + [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr, + [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr, + [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr, + [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr, + [CAM_CC_IPE_NPS_AHB_CLK] = &cam_cc_ipe_nps_ahb_clk.clkr, + [CAM_CC_IPE_NPS_CLK] = &cam_cc_ipe_nps_clk.clkr, + [CAM_CC_IPE_NPS_CLK_SRC] = &cam_cc_ipe_nps_clk_src.clkr, + [CAM_CC_IPE_NPS_FAST_AHB_CLK] = &cam_cc_ipe_nps_fast_ahb_clk.clkr, + [CAM_CC_IPE_PPS_CLK] = &cam_cc_ipe_pps_clk.clkr, + [CAM_CC_IPE_PPS_FAST_AHB_CLK] = &cam_cc_ipe_pps_fast_ahb_clk.clkr, + [CAM_CC_IPE_SHIFT_CLK] = &cam_cc_ipe_shift_clk.clkr, + [CAM_CC_JPEG_1_CLK] = &cam_cc_jpeg_1_clk.clkr, + [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr, + [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr, + [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr, + [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr, + [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr, + [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr, + [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr, + [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr, + [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr, + [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr, + [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr, + [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr, + [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr, + [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr, + [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr, + [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr, + [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr, + [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr, + [CAM_CC_PLL0] = &cam_cc_pll0.clkr, + [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr, + [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr, + [CAM_CC_PLL1] = &cam_cc_pll1.clkr, + [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr, + [CAM_CC_PLL2] = &cam_cc_pll2.clkr, + [CAM_CC_PLL3] = &cam_cc_pll3.clkr, + [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr, + [CAM_CC_PLL4] = &cam_cc_pll4.clkr, + [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr, + [CAM_CC_PLL5] = &cam_cc_pll5.clkr, + [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr, + [CAM_CC_PLL6] = &cam_cc_pll6.clkr, + [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr, + [CAM_CC_PLL7] = &cam_cc_pll7.clkr, + [CAM_CC_PLL7_OUT_EVEN] = &cam_cc_pll7_out_even.clkr, + [CAM_CC_PLL8] = &cam_cc_pll8.clkr, + [CAM_CC_PLL8_OUT_EVEN] = &cam_cc_pll8_out_even.clkr, + [CAM_CC_PLL9] = &cam_cc_pll9.clkr, + [CAM_CC_PLL9_OUT_EVEN] = &cam_cc_pll9_out_even.clkr, + [CAM_CC_PLL9_OUT_ODD] = &cam_cc_pll9_out_odd.clkr, + [CAM_CC_PLL10] = &cam_cc_pll10.clkr, + [CAM_CC_PLL10_OUT_EVEN] = &cam_cc_pll10_out_even.clkr, + [CAM_CC_QDSS_DEBUG_CLK] = &cam_cc_qdss_debug_clk.clkr, + [CAM_CC_QDSS_DEBUG_CLK_SRC] = &cam_cc_qdss_debug_clk_src.clkr, + [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr, + [CAM_CC_SBI_CLK] = &cam_cc_sbi_clk.clkr, + [CAM_CC_SBI_FAST_AHB_CLK] = &cam_cc_sbi_fast_ahb_clk.clkr, + [CAM_CC_SBI_SHIFT_CLK] = &cam_cc_sbi_shift_clk.clkr, + [CAM_CC_SFE_0_CLK] = &cam_cc_sfe_0_clk.clkr, + [CAM_CC_SFE_0_CLK_SRC] = &cam_cc_sfe_0_clk_src.clkr, + [CAM_CC_SFE_0_FAST_AHB_CLK] = &cam_cc_sfe_0_fast_ahb_clk.clkr, + [CAM_CC_SFE_0_SHIFT_CLK] = &cam_cc_sfe_0_shift_clk.clkr, + [CAM_CC_SFE_1_CLK] = &cam_cc_sfe_1_clk.clkr, + [CAM_CC_SFE_1_CLK_SRC] = &cam_cc_sfe_1_clk_src.clkr, + [CAM_CC_SFE_1_FAST_AHB_CLK] = &cam_cc_sfe_1_fast_ahb_clk.clkr, + [CAM_CC_SFE_1_SHIFT_CLK] = &cam_cc_sfe_1_shift_clk.clkr, + [CAM_CC_SFE_2_CLK] = &cam_cc_sfe_2_clk.clkr, + [CAM_CC_SFE_2_CLK_SRC] = &cam_cc_sfe_2_clk_src.clkr, + [CAM_CC_SFE_2_FAST_AHB_CLK] = &cam_cc_sfe_2_fast_ahb_clk.clkr, + [CAM_CC_SFE_2_SHIFT_CLK] = &cam_cc_sfe_2_shift_clk.clkr, + [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr, + [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, + [CAM_CC_TITAN_TOP_SHIFT_CLK] = &cam_cc_titan_top_shift_clk.clkr, + [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr, +}; + +static struct gdsc *cam_cc_sm8650_gdscs[] = { + [CAM_CC_TITAN_TOP_GDSC] = &cam_cc_titan_top_gdsc, + [CAM_CC_BPS_GDSC] = &cam_cc_bps_gdsc, + [CAM_CC_IFE_0_GDSC] = &cam_cc_ife_0_gdsc, + [CAM_CC_IFE_1_GDSC] = &cam_cc_ife_1_gdsc, + [CAM_CC_IFE_2_GDSC] = &cam_cc_ife_2_gdsc, + [CAM_CC_IPE_0_GDSC] = &cam_cc_ipe_0_gdsc, + [CAM_CC_SBI_GDSC] = &cam_cc_sbi_gdsc, + [CAM_CC_SFE_0_GDSC] = &cam_cc_sfe_0_gdsc, + [CAM_CC_SFE_1_GDSC] = &cam_cc_sfe_1_gdsc, + [CAM_CC_SFE_2_GDSC] = &cam_cc_sfe_2_gdsc, +}; + +static const struct qcom_reset_map cam_cc_sm8650_resets[] = { + [CAM_CC_BPS_BCR] = { 0x10000 }, + [CAM_CC_DRV_BCR] = { 0x13310 }, + [CAM_CC_ICP_BCR] = { 0x131a0 }, + [CAM_CC_IFE_0_BCR] = { 0x11000 }, + [CAM_CC_IFE_1_BCR] = { 0x12000 }, + [CAM_CC_IFE_2_BCR] = { 0x12050 }, + [CAM_CC_IPE_0_BCR] = { 0x1007c }, + [CAM_CC_QDSS_DEBUG_BCR] = { 0x13298 }, + [CAM_CC_SBI_BCR] = { 0x100e0 }, + [CAM_CC_SFE_0_BCR] = { 0x13054 }, + [CAM_CC_SFE_1_BCR] = { 0x130a4 }, + [CAM_CC_SFE_2_BCR] = { 0x130f4 }, +}; + +static const struct regmap_config cam_cc_sm8650_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1603c, + .fast_io = true, +}; + +static struct qcom_cc_desc cam_cc_sm8650_desc = { + .config = &cam_cc_sm8650_regmap_config, + .clks = cam_cc_sm8650_clocks, + .num_clks = ARRAY_SIZE(cam_cc_sm8650_clocks), + .resets = cam_cc_sm8650_resets, + .num_resets = ARRAY_SIZE(cam_cc_sm8650_resets), + .gdscs = cam_cc_sm8650_gdscs, + .num_gdscs = ARRAY_SIZE(cam_cc_sm8650_gdscs), +}; + +static const struct of_device_id cam_cc_sm8650_match_table[] = { + { .compatible = "qcom,sm8650-camcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, cam_cc_sm8650_match_table); + +static int cam_cc_sm8650_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + return ret; + + regmap = qcom_cc_map(pdev, &cam_cc_sm8650_desc); + if (IS_ERR(regmap)) { + pm_runtime_put(&pdev->dev); + return PTR_ERR(regmap); + } + + clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); + clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config); + clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); + clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config); + clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); + clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config); + clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config); + clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config); + clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config); + clk_lucid_ole_pll_configure(&cam_cc_pll9, regmap, &cam_cc_pll9_config); + clk_lucid_ole_pll_configure(&cam_cc_pll10, regmap, &cam_cc_pll10_config); + + /* Keep clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x13318); /* CAM_CC_DRV_AHB_CLK */ + qcom_branch_set_clk_en(regmap, 0x13314); /* CAM_CC_DRV_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x132ec); /* CAM_CC_GDSC_CLK */ + qcom_branch_set_clk_en(regmap, 0x13308); /* CAM_CC_SLEEP_CLK */ + + ret = qcom_cc_really_probe(pdev, &cam_cc_sm8650_desc, regmap); + + pm_runtime_put(&pdev->dev); + + return ret; +} + +static struct platform_driver cam_cc_sm8650_driver = { + .probe = cam_cc_sm8650_probe, + .driver = { + .name = "camcc-sm8650", + .of_match_table = cam_cc_sm8650_match_table, + }, +}; + +module_platform_driver(cam_cc_sm8650_driver); + +MODULE_DESCRIPTION("QTI CAMCC SM8650 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index d4227909d1fe..c51647e37df8 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -2574,6 +2574,9 @@ static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw, regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> ALPHA_BITWIDTH); + regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll), + PLL_ALPHA_EN, PLL_ALPHA_EN); + regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL); /* Wait five micro seconds or more */ diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c index 0a3f846695b8..f8b9a1e93bef 100644 --- a/drivers/clk/qcom/gcc-ipq9574.c +++ b/drivers/clk/qcom/gcc-ipq9574.c @@ -2140,9 +2140,10 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = { static struct clk_branch gcc_crypto_axi_clk = { .halt_reg = 0x16010, + .halt_check = BRANCH_HALT_VOTED, .clkr = { - .enable_reg = 0x16010, - .enable_mask = BIT(0), + .enable_reg = 0xb004, + .enable_mask = BIT(15), .hw.init = &(const struct clk_init_data) { .name = "gcc_crypto_axi_clk", .parent_hws = (const struct clk_hw *[]) { @@ -2156,9 +2157,10 @@ static struct clk_branch gcc_crypto_axi_clk = { static struct clk_branch gcc_crypto_ahb_clk = { .halt_reg = 0x16014, + .halt_check = BRANCH_HALT_VOTED, .clkr = { - .enable_reg = 0x16014, - .enable_mask = BIT(0), + .enable_reg = 0xb004, + .enable_mask = BIT(16), .hw.init = &(const struct clk_init_data) { .name = "gcc_crypto_ahb_clk", .parent_hws = (const struct clk_hw *[]) { diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c index cf4a7b6e0b23..0559a33faf00 100644 --- a/drivers/clk/qcom/gcc-sm6350.c +++ b/drivers/clk/qcom/gcc-sm6350.c @@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = { .enable_mask = BIT(6), .hw.init = &(struct clk_init_data){ .name = "gpll6", - .parent_hws = (const struct clk_hw*[]){ - &gpll0.clkr.hw, + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", }, .num_parents = 1, .ops = &clk_alpha_pll_fixed_fabia_ops, @@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = { .clkr.hw.init = &(struct clk_init_data){ .name = "gpll6_out_even", .parent_hws = (const struct clk_hw*[]){ - &gpll0.clkr.hw, + &gpll6.clkr.hw, }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_fabia_ops, @@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = { .enable_mask = BIT(7), .hw.init = &(struct clk_init_data){ .name = "gpll7", - .parent_hws = (const struct clk_hw*[]){ - &gpll0.clkr.hw, + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", }, .num_parents = 1, .ops = &clk_alpha_pll_fixed_fabia_ops, diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index df9618ab7eea..fa5fe4c2a2ee 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -363,6 +363,43 @@ static int gdsc_disable(struct generic_pm_domain *domain) return 0; } +static int gdsc_set_hwmode(struct generic_pm_domain *domain, struct device *dev, bool mode) +{ + struct gdsc *sc = domain_to_gdsc(domain); + int ret; + + ret = gdsc_hwctrl(sc, mode); + if (ret) + return ret; + + /* + * Wait for the GDSC to go through a power down and + * up cycle. If we poll the status register before the + * power cycle is finished we might read incorrect values. + */ + udelay(1); + + /* + * When the GDSC is switched to HW mode, HW can disable the GDSC. + * When the GDSC is switched back to SW mode, the GDSC will be enabled + * again, hence we need to poll for GDSC to complete the power up. + */ + if (!mode) + return gdsc_poll_status(sc, GDSC_ON); + + return 0; +} + +static bool gdsc_get_hwmode(struct generic_pm_domain *domain, struct device *dev) +{ + struct gdsc *sc = domain_to_gdsc(domain); + u32 val; + + regmap_read(sc->regmap, sc->gdscr, &val); + + return !!(val & HW_CONTROL_MASK); +} + static int gdsc_init(struct gdsc *sc) { u32 mask, val; @@ -451,6 +488,10 @@ static int gdsc_init(struct gdsc *sc) sc->pd.power_off = gdsc_disable; if (!sc->pd.power_on) sc->pd.power_on = gdsc_enable; + if (sc->flags & HW_CTRL_TRIGGER) { + sc->pd.set_hwmode_dev = gdsc_set_hwmode; + sc->pd.get_hwmode_dev = gdsc_get_hwmode; + } ret = pm_genpd_init(&sc->pd, NULL, !on); if (ret) diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index 803512688336..1e2779b823d1 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -67,6 +67,7 @@ struct gdsc { #define ALWAYS_ON BIT(6) #define RETAIN_FF_ENABLE BIT(7) #define NO_RET_PERIPH BIT(8) +#define HW_CTRL_TRIGGER BIT(9) struct reset_controller_dev *rcdev; unsigned int *resets; unsigned int reset_count; diff --git a/drivers/clk/qcom/videocc-sc7280.c b/drivers/clk/qcom/videocc-sc7280.c index cdd59c6f60df..d55613a47ff7 100644 --- a/drivers/clk/qcom/videocc-sc7280.c +++ b/drivers/clk/qcom/videocc-sc7280.c @@ -236,7 +236,7 @@ static struct gdsc mvs0_gdsc = { .name = "mvs0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE, }; static struct gdsc mvsc_gdsc = { diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c index 016b596e03b3..cac10ccd362e 100644 --- a/drivers/clk/qcom/videocc-sm8250.c +++ b/drivers/clk/qcom/videocc-sm8250.c @@ -293,7 +293,7 @@ static struct gdsc mvs0_gdsc = { .pd = { .name = "mvs0_gdsc", }, - .flags = HW_CTRL, + .flags = HW_CTRL_TRIGGER, .pwrsts = PWRSTS_OFF_ON, }; @@ -302,7 +302,7 @@ static struct gdsc mvs1_gdsc = { .pd = { .name = "mvs1_gdsc", }, - .flags = HW_CTRL, + .flags = HW_CTRL_TRIGGER, .pwrsts = PWRSTS_OFF_ON, }; diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c index d73f747d2474..c601c35e6724 100644 --- a/drivers/clk/qcom/videocc-sm8550.c +++ b/drivers/clk/qcom/videocc-sm8550.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/clk-provider.h> @@ -10,7 +10,7 @@ #include <linux/pm_runtime.h> #include <linux/regmap.h> -#include <dt-bindings/clock/qcom,sm8450-videocc.h> +#include <dt-bindings/clock/qcom,sm8650-videocc.h> #include "clk-alpha-pll.h" #include "clk-branch.h" @@ -35,7 +35,7 @@ static const struct pll_vco lucid_ole_vco[] = { { 249600000, 2300000000, 0 }, }; -static const struct alpha_pll_config video_cc_pll0_config = { +static struct alpha_pll_config video_cc_pll0_config = { .l = 0x25, .alpha = 0x8000, .config_ctl_val = 0x20485699, @@ -66,7 +66,7 @@ static struct clk_alpha_pll video_cc_pll0 = { }, }; -static const struct alpha_pll_config video_cc_pll1_config = { +static struct alpha_pll_config video_cc_pll1_config = { .l = 0x36, .alpha = 0xb000, .config_ctl_val = 0x20485699, @@ -117,6 +117,14 @@ static const struct clk_parent_data video_cc_parent_data_1[] = { { .hw = &video_cc_pll1.clkr.hw }, }; +static const struct parent_map video_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data video_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, +}; + static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = { F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), @@ -126,6 +134,16 @@ static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = { { } }; +static const struct freq_tbl ftbl_video_cc_mvs0_clk_src_sm8650[] = { + F(588000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(900000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1140000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1305000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1440000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1600000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + { } +}; + static struct clk_rcg2 video_cc_mvs0_clk_src = { .cmd_rcgr = 0x8000, .mnd_width = 0, @@ -149,6 +167,15 @@ static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = { { } }; +static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_sm8650[] = { + F(840000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0), + F(1110000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0), + F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0), + F(1500000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0), + F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0), + { } +}; + static struct clk_rcg2 video_cc_mvs1_clk_src = { .cmd_rcgr = 0x8018, .mnd_width = 0, @@ -164,6 +191,26 @@ static struct clk_rcg2 video_cc_mvs1_clk_src = { }, }; +static const struct freq_tbl ftbl_video_cc_xo_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_xo_clk_src = { + .cmd_rcgr = 0x810c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_2, + .freq_tbl = ftbl_video_cc_xo_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "video_cc_xo_clk_src", + .parent_data = video_cc_parent_data_2, + .num_parents = ARRAY_SIZE(video_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + static struct clk_regmap_div video_cc_mvs0_div_clk_src = { .reg = 0x80c4, .shift = 0, @@ -244,6 +291,26 @@ static struct clk_branch video_cc_mvs0_clk = { }, }; +static struct clk_branch video_cc_mvs0_shift_clk = { + .halt_reg = 0x8128, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8128, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x8128, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs0_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch video_cc_mvs0c_clk = { .halt_reg = 0x8064, .halt_check = BRANCH_HALT, @@ -262,6 +329,26 @@ static struct clk_branch video_cc_mvs0c_clk = { }, }; +static struct clk_branch video_cc_mvs0c_shift_clk = { + .halt_reg = 0x812c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x812c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x812c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs0c_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch video_cc_mvs1_clk = { .halt_reg = 0x80e0, .halt_check = BRANCH_HALT_SKIP, @@ -282,6 +369,26 @@ static struct clk_branch video_cc_mvs1_clk = { }, }; +static struct clk_branch video_cc_mvs1_shift_clk = { + .halt_reg = 0x8130, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8130, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x8130, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs1_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch video_cc_mvs1c_clk = { .halt_reg = 0x8090, .halt_check = BRANCH_HALT, @@ -300,6 +407,26 @@ static struct clk_branch video_cc_mvs1c_clk = { }, }; +static struct clk_branch video_cc_mvs1c_shift_clk = { + .halt_reg = 0x8134, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8134, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x8134, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "video_cc_mvs1c_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &video_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct gdsc video_cc_mvs0c_gdsc = { .gdscr = 0x804c, .en_rest_wait_val = 0x2, @@ -363,6 +490,7 @@ static struct clk_regmap *video_cc_sm8550_clocks[] = { [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr, [VIDEO_CC_PLL0] = &video_cc_pll0.clkr, [VIDEO_CC_PLL1] = &video_cc_pll1.clkr, + [VIDEO_CC_XO_CLK_SRC] = NULL, }; static struct gdsc *video_cc_sm8550_gdscs[] = { @@ -380,6 +508,7 @@ static const struct qcom_reset_map video_cc_sm8550_resets[] = { [CVP_VIDEO_CC_MVS1C_BCR] = { 0x8074 }, [VIDEO_CC_MVS0C_CLK_ARES] = { .reg = 0x8064, .bit = 2, .udelay = 1000 }, [VIDEO_CC_MVS1C_CLK_ARES] = { .reg = 0x8090, .bit = 2, .udelay = 1000 }, + [VIDEO_CC_XO_CLK_ARES] = { .reg = 0x8124, .bit = 2, .udelay = 100 }, }; static const struct regmap_config video_cc_sm8550_regmap_config = { @@ -402,6 +531,7 @@ static struct qcom_cc_desc video_cc_sm8550_desc = { static const struct of_device_id video_cc_sm8550_match_table[] = { { .compatible = "qcom,sm8550-videocc" }, + { .compatible = "qcom,sm8650-videocc" }, { } }; MODULE_DEVICE_TABLE(of, video_cc_sm8550_match_table); @@ -410,6 +540,7 @@ static int video_cc_sm8550_probe(struct platform_device *pdev) { struct regmap *regmap; int ret; + u32 sleep_clk_offset = 0x8140; ret = devm_pm_runtime_enable(&pdev->dev); if (ret) @@ -425,12 +556,27 @@ static int video_cc_sm8550_probe(struct platform_device *pdev) return PTR_ERR(regmap); } + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-videocc")) { + sleep_clk_offset = 0x8150; + video_cc_pll0_config.l = 0x1e; + video_cc_pll0_config.alpha = 0xa000; + video_cc_pll1_config.l = 0x2b; + video_cc_pll1_config.alpha = 0xc000; + video_cc_mvs0_clk_src.freq_tbl = ftbl_video_cc_mvs0_clk_src_sm8650; + video_cc_mvs1_clk_src.freq_tbl = ftbl_video_cc_mvs1_clk_src_sm8650; + video_cc_sm8550_clocks[VIDEO_CC_MVS0_SHIFT_CLK] = &video_cc_mvs0_shift_clk.clkr; + video_cc_sm8550_clocks[VIDEO_CC_MVS0C_SHIFT_CLK] = &video_cc_mvs0c_shift_clk.clkr; + video_cc_sm8550_clocks[VIDEO_CC_MVS1_SHIFT_CLK] = &video_cc_mvs1_shift_clk.clkr; + video_cc_sm8550_clocks[VIDEO_CC_MVS1C_SHIFT_CLK] = &video_cc_mvs1c_shift_clk.clkr; + video_cc_sm8550_clocks[VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr; + } + clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config); clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config); /* Keep some clocks always-on */ qcom_branch_set_clk_en(regmap, 0x80f4); /* VIDEO_CC_AHB_CLK */ - qcom_branch_set_clk_en(regmap, 0x8140); /* VIDEO_CC_SLEEP_CLK */ + qcom_branch_set_clk_en(regmap, sleep_clk_offset); /* VIDEO_CC_SLEEP_CLK */ qcom_branch_set_clk_en(regmap, 0x8124); /* VIDEO_CC_XO_CLK */ ret = qcom_cc_really_probe(pdev, &video_cc_sm8550_desc, regmap); diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c index ac0091b4ce24..be375ce0149c 100644 --- a/drivers/clk/sunxi-ng/ccu_common.c +++ b/drivers/clk/sunxi-ng/ccu_common.c @@ -132,7 +132,6 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev, for (i = 0; i < desc->hw_clks->num ; i++) { struct clk_hw *hw = desc->hw_clks->hws[i]; - struct ccu_common *common = hw_to_ccu_common(hw); const char *name; if (!hw) @@ -147,14 +146,21 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev, pr_err("Couldn't register clock %d - %s\n", i, name); goto err_clk_unreg; } + } + + for (i = 0; i < desc->num_ccu_clks; i++) { + struct ccu_common *cclk = desc->ccu_clks[i]; + + if (!cclk) + continue; - if (common->max_rate) - clk_hw_set_rate_range(hw, common->min_rate, - common->max_rate); + if (cclk->max_rate) + clk_hw_set_rate_range(&cclk->hw, cclk->min_rate, + cclk->max_rate); else - WARN(common->min_rate, + WARN(cclk->min_rate, "No max_rate, ignoring min_rate of clock %d - %s\n", - i, name); + i, clk_hw_get_name(&cclk->hw)); } ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 34faa0320ece..95dd4660b5b6 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -134,6 +134,16 @@ config RDA_TIMER help Enables the support for the RDA Micro timer driver. +config REALTEK_OTTO_TIMER + bool "Clocksource/timer for the Realtek Otto platform" if COMPILE_TEST + select TIMER_OF + help + This driver adds support for the timers found in the Realtek RTL83xx + and RTL93xx SoCs series. This includes chips such as RTL8380, RTL8381 + and RTL832, as well as chips from the RTL839x series, such as RTL8390 + RT8391, RTL8392, RTL8393 and RTL8396 and chips of the RTL930x series + such as RTL9301, RTL9302 or RTL9303. + config SUN4I_TIMER bool "Sun4i timer driver" if COMPILE_TEST depends on HAS_IOMEM diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 4bb856e4df55..22743785299e 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o obj-$(CONFIG_RDA_TIMER) += timer-rda.o +obj-$(CONFIG_REALTEK_OTTO_TIMER) += timer-rtl-otto.o obj-$(CONFIG_ARC_TIMERS) += arc_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 5bb43cc1a8df..aeafc74181f0 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1556,7 +1556,7 @@ static int __init arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame) { void __iomem *base; - int ret, irq = 0; + int ret, irq; if (arch_timer_mem_use_virtual) irq = frame->virt_irq; diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index ab1c8c2b66b8..a05cfaab5f84 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -343,7 +343,7 @@ static int __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; static unsigned long gt_clk_rate; - int err = 0; + int err; /* * In A9 r2p0 the comparators for each processor with the global timer diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index b3ae38f36720..110347707ff9 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -19,6 +19,7 @@ static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); static int gic_timer_irq; static unsigned int gic_frequency; +static unsigned int gic_count_width; static bool __read_mostly gic_clock_unstable; static void gic_clocksource_unstable(char *reason); @@ -186,18 +187,21 @@ static void gic_clocksource_unstable(char *reason) static int __init __gic_clocksource_init(void) { - unsigned int count_width; int ret; /* Set clocksource mask. */ - count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; - count_width >>= __ffs(GIC_CONFIG_COUNTBITS); - count_width *= 4; - count_width += 32; - gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); + gic_count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; + gic_count_width >>= __ffs(GIC_CONFIG_COUNTBITS); + gic_count_width *= 4; + gic_count_width += 32; + gic_clocksource.mask = CLOCKSOURCE_MASK(gic_count_width); /* Calculate a somewhat reasonable rating value. */ - gic_clocksource.rating = 200 + gic_frequency / 10000000; + if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) + gic_clocksource.rating = 300; /* Good when frequecy is stable */ + else + gic_clocksource.rating = 200; + gic_clocksource.rating += clamp(gic_frequency / 10000000, 0, 99); ret = clocksource_register_hz(&gic_clocksource, gic_frequency); if (ret < 0) @@ -260,7 +264,7 @@ static int __init gic_clocksource_of_init(struct device_node *node) if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) { sched_clock_register(mips_cm_is64 ? gic_read_count_64 : gic_read_count_2x32, - 64, gic_frequency); + gic_count_width, gic_frequency); } return 0; diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 26919556ef5f..b72b36e0abed 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -528,6 +528,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { struct sh_cmt_channel *ch = dev_id; + unsigned long flags; /* clear flags */ sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & @@ -558,6 +559,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) ch->flags &= ~FLAG_SKIPEVENT; + raw_spin_lock_irqsave(&ch->lock, flags); + if (ch->flags & FLAG_REPROGRAM) { ch->flags &= ~FLAG_REPROGRAM; sh_cmt_clock_event_program_verify(ch, 1); @@ -570,6 +573,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) ch->flags &= ~FLAG_IRQCONTEXT; + raw_spin_unlock_irqrestore(&ch->lock, flags); + return IRQ_HANDLED; } @@ -780,12 +785,18 @@ static int sh_cmt_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); + unsigned long flags; BUG_ON(!clockevent_state_oneshot(ced)); + + raw_spin_lock_irqsave(&ch->lock, flags); + if (likely(ch->flags & FLAG_IRQCONTEXT)) ch->next_match_value = delta - 1; else - sh_cmt_set_next(ch, delta - 1); + __sh_cmt_set_next(ch, delta - 1); + + raw_spin_unlock_irqrestore(&ch->lock, flags); return 0; } diff --git a/drivers/clocksource/timer-rtl-otto.c b/drivers/clocksource/timer-rtl-otto.c new file mode 100644 index 000000000000..8a3068b36e75 --- /dev/null +++ b/drivers/clocksource/timer-rtl-otto.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/jiffies.h> +#include <linux/printk.h> +#include <linux/sched_clock.h> +#include "timer-of.h" + +#define RTTM_DATA 0x0 +#define RTTM_CNT 0x4 +#define RTTM_CTRL 0x8 +#define RTTM_INT 0xc + +#define RTTM_CTRL_ENABLE BIT(28) +#define RTTM_INT_PENDING BIT(16) +#define RTTM_INT_ENABLE BIT(20) + +/* + * The Otto platform provides multiple 28 bit timers/counters with the following + * operating logic. If enabled the timer counts up. Per timer one can set a + * maximum counter value as an end marker. If end marker is reached the timer + * fires an interrupt. If the timer "overflows" by reaching the end marker or + * by adding 1 to 0x0fffffff the counter is reset to 0. When this happens and + * the timer is in operating mode COUNTER it stops. In mode TIMER it will + * continue to count up. + */ +#define RTTM_CTRL_COUNTER 0 +#define RTTM_CTRL_TIMER BIT(24) + +#define RTTM_BIT_COUNT 28 +#define RTTM_MIN_DELTA 8 +#define RTTM_MAX_DELTA CLOCKSOURCE_MASK(28) + +/* + * Timers are derived from the LXB clock frequency. Usually this is a fixed + * multiple of the 25 MHz oscillator. The 930X SOC is an exception from that. + * Its LXB clock has only dividers and uses the switch PLL of 2.45 GHz as its + * base. The only meaningful frequencies we can achieve from that are 175.000 + * MHz and 153.125 MHz. The greatest common divisor of all explained possible + * speeds is 3125000. Pin the timers to this 3.125 MHz reference frequency. + */ +#define RTTM_TICKS_PER_SEC 3125000 + +struct rttm_cs { + struct timer_of to; + struct clocksource cs; +}; + +/* Simple internal register functions */ +static inline void rttm_set_counter(void __iomem *base, unsigned int counter) +{ + iowrite32(counter, base + RTTM_CNT); +} + +static inline unsigned int rttm_get_counter(void __iomem *base) +{ + return ioread32(base + RTTM_CNT); +} + +static inline void rttm_set_period(void __iomem *base, unsigned int period) +{ + iowrite32(period, base + RTTM_DATA); +} + +static inline void rttm_disable_timer(void __iomem *base) +{ + iowrite32(0, base + RTTM_CTRL); +} + +static inline void rttm_enable_timer(void __iomem *base, u32 mode, u32 divisor) +{ + iowrite32(RTTM_CTRL_ENABLE | mode | divisor, base + RTTM_CTRL); +} + +static inline void rttm_ack_irq(void __iomem *base) +{ + iowrite32(ioread32(base + RTTM_INT) | RTTM_INT_PENDING, base + RTTM_INT); +} + +static inline void rttm_enable_irq(void __iomem *base) +{ + iowrite32(RTTM_INT_ENABLE, base + RTTM_INT); +} + +static inline void rttm_disable_irq(void __iomem *base) +{ + iowrite32(0, base + RTTM_INT); +} + +/* Aggregated control functions for kernel clock framework */ +#define RTTM_DEBUG(base) \ + pr_debug("------------- %d %p\n", \ + smp_processor_id(), base) + +static irqreturn_t rttm_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *clkevt = dev_id; + struct timer_of *to = to_timer_of(clkevt); + + rttm_ack_irq(to->of_base.base); + RTTM_DEBUG(to->of_base.base); + clkevt->event_handler(clkevt); + + return IRQ_HANDLED; +} + +static void rttm_stop_timer(void __iomem *base) +{ + rttm_disable_timer(base); + rttm_ack_irq(base); +} + +static void rttm_start_timer(struct timer_of *to, u32 mode) +{ + rttm_set_counter(to->of_base.base, 0); + rttm_enable_timer(to->of_base.base, mode, to->of_clk.rate / RTTM_TICKS_PER_SEC); +} + +static int rttm_next_event(unsigned long delta, struct clock_event_device *clkevt) +{ + struct timer_of *to = to_timer_of(clkevt); + + RTTM_DEBUG(to->of_base.base); + rttm_stop_timer(to->of_base.base); + rttm_set_period(to->of_base.base, delta); + rttm_start_timer(to, RTTM_CTRL_COUNTER); + + return 0; +} + +static int rttm_state_oneshot(struct clock_event_device *clkevt) +{ + struct timer_of *to = to_timer_of(clkevt); + + RTTM_DEBUG(to->of_base.base); + rttm_stop_timer(to->of_base.base); + rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ); + rttm_start_timer(to, RTTM_CTRL_COUNTER); + + return 0; +} + +static int rttm_state_periodic(struct clock_event_device *clkevt) +{ + struct timer_of *to = to_timer_of(clkevt); + + RTTM_DEBUG(to->of_base.base); + rttm_stop_timer(to->of_base.base); + rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ); + rttm_start_timer(to, RTTM_CTRL_TIMER); + + return 0; +} + +static int rttm_state_shutdown(struct clock_event_device *clkevt) +{ + struct timer_of *to = to_timer_of(clkevt); + + RTTM_DEBUG(to->of_base.base); + rttm_stop_timer(to->of_base.base); + + return 0; +} + +static void rttm_setup_timer(void __iomem *base) +{ + RTTM_DEBUG(base); + rttm_stop_timer(base); + rttm_set_period(base, 0); +} + +static u64 rttm_read_clocksource(struct clocksource *cs) +{ + struct rttm_cs *rcs = container_of(cs, struct rttm_cs, cs); + + return rttm_get_counter(rcs->to.of_base.base); +} + +/* Module initialization part. */ +static DEFINE_PER_CPU(struct timer_of, rttm_to) = { + .flags = TIMER_OF_BASE | TIMER_OF_CLOCK | TIMER_OF_IRQ, + .of_irq = { + .flags = IRQF_PERCPU | IRQF_TIMER, + .handler = rttm_timer_interrupt, + }, + .clkevt = { + .rating = 400, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_state_periodic = rttm_state_periodic, + .set_state_shutdown = rttm_state_shutdown, + .set_state_oneshot = rttm_state_oneshot, + .set_next_event = rttm_next_event + }, +}; + +static int rttm_enable_clocksource(struct clocksource *cs) +{ + struct rttm_cs *rcs = container_of(cs, struct rttm_cs, cs); + + rttm_disable_irq(rcs->to.of_base.base); + rttm_setup_timer(rcs->to.of_base.base); + rttm_enable_timer(rcs->to.of_base.base, RTTM_CTRL_TIMER, + rcs->to.of_clk.rate / RTTM_TICKS_PER_SEC); + + return 0; +} + +struct rttm_cs rttm_cs = { + .to = { + .flags = TIMER_OF_BASE | TIMER_OF_CLOCK, + }, + .cs = { + .name = "realtek_otto_timer", + .rating = 400, + .mask = CLOCKSOURCE_MASK(RTTM_BIT_COUNT), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .read = rttm_read_clocksource, + } +}; + +static u64 notrace rttm_read_clock(void) +{ + return rttm_get_counter(rttm_cs.to.of_base.base); +} + +static int rttm_cpu_starting(unsigned int cpu) +{ + struct timer_of *to = per_cpu_ptr(&rttm_to, cpu); + + RTTM_DEBUG(to->of_base.base); + to->clkevt.cpumask = cpumask_of(cpu); + irq_force_affinity(to->of_irq.irq, to->clkevt.cpumask); + clockevents_config_and_register(&to->clkevt, RTTM_TICKS_PER_SEC, + RTTM_MIN_DELTA, RTTM_MAX_DELTA); + rttm_enable_irq(to->of_base.base); + + return 0; +} + +static int __init rttm_probe(struct device_node *np) +{ + unsigned int cpu, cpu_rollback; + struct timer_of *to; + unsigned int clkidx = num_possible_cpus(); + + /* Use the first n timers as per CPU clock event generators */ + for_each_possible_cpu(cpu) { + to = per_cpu_ptr(&rttm_to, cpu); + to->of_irq.index = to->of_base.index = cpu; + if (timer_of_init(np, to)) { + pr_err("setup of timer %d failed\n", cpu); + goto rollback; + } + rttm_setup_timer(to->of_base.base); + } + + /* Activate the n'th + 1 timer as a stable CPU clocksource. */ + to = &rttm_cs.to; + to->of_base.index = clkidx; + timer_of_init(np, to); + if (rttm_cs.to.of_base.base && rttm_cs.to.of_clk.rate) { + rttm_enable_clocksource(&rttm_cs.cs); + clocksource_register_hz(&rttm_cs.cs, RTTM_TICKS_PER_SEC); + sched_clock_register(rttm_read_clock, RTTM_BIT_COUNT, RTTM_TICKS_PER_SEC); + } else + pr_err(" setup of timer %d as clocksource failed", clkidx); + + return cpuhp_setup_state(CPUHP_AP_REALTEK_TIMER_STARTING, + "timer/realtek:online", + rttm_cpu_starting, NULL); +rollback: + pr_err("timer registration failed\n"); + for_each_possible_cpu(cpu_rollback) { + if (cpu_rollback == cpu) + break; + to = per_cpu_ptr(&rttm_to, cpu_rollback); + timer_of_cleanup(to); + } + + return -EINVAL; +} + +TIMER_OF_DECLARE(otto_timer, "realtek,otto-timer", rttm_probe); diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index 0664ef969f79..186e73d6ccb4 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -465,7 +465,7 @@ static int stm32_count_events_configure(struct counter_device *counter) ret = stm32_count_capture_configure(counter, event_node->channel, true); if (ret) return ret; - dier |= TIM_DIER_CC_IE(event_node->channel); + dier |= TIM_DIER_CCxIE(event_node->channel + 1); break; default: /* should never reach this path */ @@ -478,7 +478,7 @@ static int stm32_count_events_configure(struct counter_device *counter) /* check for disabled capture events */ for (i = 0 ; i < priv->nchannels; i++) { - if (!(dier & TIM_DIER_CC_IE(i))) { + if (!(dier & TIM_DIER_CCxIE(i + 1))) { ret = stm32_count_capture_configure(counter, i, false); if (ret) return ret; diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 94e55c40970a..10cda6f2fe1d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -262,6 +262,18 @@ config LOONGSON2_CPUFREQ If in doubt, say N. endif +if LOONGARCH +config LOONGSON3_CPUFREQ + tristate "Loongson3 CPUFreq Driver" + help + This option adds a CPUFreq driver for Loongson processors which + support software configurable cpu frequency. + + Loongson-3 family processors support this feature. + + If in doubt, say N. +endif + if SPARC64 config SPARC_US3_CPUFREQ tristate "UltraSPARC-III CPU Frequency driver" diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 438c9e75a04d..97c2d4f15d76 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -71,6 +71,7 @@ config X86_AMD_PSTATE_DEFAULT_MODE config X86_AMD_PSTATE_UT tristate "selftest for AMD Processor P-State driver" depends on X86 && ACPI_PROCESSOR + depends on X86_AMD_PSTATE default n help This kernel module is used for testing. It's safe to say M here. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 8d141c71b016..0f184031dd12 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -103,6 +103,7 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o # Other platform drivers obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o +obj-$(CONFIG_LOONGSON3_CPUFREQ) += loongson3_cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 37f1cdf46d29..a8ca625a98b8 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -50,8 +50,6 @@ enum { #define AMD_MSR_RANGE (0x7) #define HYGON_MSR_RANGE (0x7) -#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) - struct acpi_cpufreq_data { unsigned int resume; unsigned int cpu_feature; @@ -890,8 +888,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) pr_warn(FW_WARN "P-state 0 is not max freq\n"); - if (acpi_cpufreq_driver.set_boost) + if (acpi_cpufreq_driver.set_boost) { set_boost(policy, acpi_cpufreq_driver.boost_enabled); + policy->boost_enabled = acpi_cpufreq_driver.boost_enabled; + } return result; @@ -906,7 +906,7 @@ err_free: return result; } -static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = policy->driver_data; @@ -919,8 +919,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) free_cpumask_var(data->freqdomain_cpus); kfree(policy->freq_table); kfree(data); - - return 0; } static int acpi_cpufreq_resume(struct cpufreq_policy *policy) diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index fc275d41d51e..66b73c308ce6 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -202,6 +202,7 @@ static void amd_pstate_ut_check_freq(u32 index) int cpu = 0; struct cpufreq_policy *policy = NULL; struct amd_cpudata *cpudata = NULL; + u32 nominal_freq_khz; for_each_possible_cpu(cpu) { policy = cpufreq_cpu_get(cpu); @@ -209,13 +210,14 @@ static void amd_pstate_ut_check_freq(u32 index) break; cpudata = policy->driver_data; - if (!((cpudata->max_freq >= cpudata->nominal_freq) && - (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) && + nominal_freq_khz = cpudata->nominal_freq*1000; + if (!((cpudata->max_freq >= nominal_freq_khz) && + (nominal_freq_khz > cpudata->lowest_nonlinear_freq) && (cpudata->lowest_nonlinear_freq > cpudata->min_freq) && (cpudata->min_freq > 0))) { amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n", - __func__, cpu, cpudata->max_freq, cpudata->nominal_freq, + __func__, cpu, cpudata->max_freq, nominal_freq_khz, cpudata->lowest_nonlinear_freq, cpudata->min_freq); goto skip_test; } @@ -229,13 +231,13 @@ static void amd_pstate_ut_check_freq(u32 index) if (cpudata->boost_supported) { if ((policy->max == cpudata->max_freq) || - (policy->max == cpudata->nominal_freq)) + (policy->max == nominal_freq_khz)) amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; else { amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n", __func__, cpu, policy->max, cpudata->max_freq, - cpudata->nominal_freq); + nominal_freq_khz); goto skip_test; } } else { diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 9ad62dbe8bfb..68c616b572f2 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -51,6 +51,7 @@ #define AMD_PSTATE_TRANSITION_LATENCY 20000 #define AMD_PSTATE_TRANSITION_DELAY 1000 +#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600 #define CPPC_HIGHEST_PERF_PERFORMANCE 196 #define CPPC_HIGHEST_PERF_DEFAULT 166 @@ -85,15 +86,6 @@ struct quirk_entry { u32 lowest_freq; }; -/* - * TODO: We need more time to fine tune processors with shared memory solution - * with community together. - * - * There are some performance drops on the CPU benchmarks which reports from - * Suse. We are co-working with them to fine tune the shared memory solution. So - * we disable it by default to go acpi-cpufreq on these processors and add a - * module parameter to be able to enable it manually for debugging. - */ static struct cpufreq_driver *current_pstate_driver; static struct cpufreq_driver amd_pstate_driver; static struct cpufreq_driver amd_pstate_epp_driver; @@ -157,7 +149,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) * broken BIOS lack of nominal_freq and lowest_freq capabilities * definition in ACPI tables */ - if (boot_cpu_has(X86_FEATURE_ZEN2)) { + if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { quirks = dmi->driver_data; pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); return 1; @@ -199,7 +191,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached) u64 epp; int ret; - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { if (!cppc_req_cached) { epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req_cached); @@ -247,12 +239,32 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata) return index; } +static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, + u32 des_perf, u32 max_perf, bool fast_switch) +{ + if (fast_switch) + wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); + else + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, + READ_ONCE(cpudata->cppc_req_cached)); +} + +DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); + +static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, + u32 min_perf, u32 des_perf, + u32 max_perf, bool fast_switch) +{ + static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, + max_perf, fast_switch); +} + static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) { int ret; struct cppc_perf_ctrls perf_ctrls; - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { u64 value = READ_ONCE(cpudata->cppc_req_cached); value &= ~GENMASK_ULL(31, 24); @@ -263,6 +275,9 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) if (!ret) cpudata->epp_cached = epp; } else { + amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U, + cpudata->max_limit_perf, false); + perf_ctrls.energy_perf = epp; ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); if (ret) { @@ -281,10 +296,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, int epp = -EINVAL; int ret; - if (!pref_index) { - pr_debug("EPP pref_index is invalid\n"); - return -EINVAL; - } + if (!pref_index) + epp = cpudata->epp_default; if (epp == -EINVAL) epp = epp_values[pref_index]; @@ -452,16 +465,6 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) return static_call(amd_pstate_init_perf)(cpudata); } -static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, - u32 des_perf, u32 max_perf, bool fast_switch) -{ - if (fast_switch) - wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); - else - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, - READ_ONCE(cpudata->cppc_req_cached)); -} - static void cppc_update_perf(struct amd_cpudata *cpudata, u32 min_perf, u32 des_perf, u32 max_perf, bool fast_switch) @@ -475,16 +478,6 @@ static void cppc_update_perf(struct amd_cpudata *cpudata, cppc_set_perf(cpudata->cpu, &perf_ctrls); } -DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); - -static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, - u32 min_perf, u32 des_perf, - u32 max_perf, bool fast_switch) -{ - static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, - max_perf, fast_switch); -} - static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) { u64 aperf, mperf, tsc; @@ -521,7 +514,10 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags) { + unsigned long max_freq; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu); u64 prev = READ_ONCE(cpudata->cppc_req_cached); + u32 nominal_perf = READ_ONCE(cpudata->nominal_perf); u64 value = prev; min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, @@ -530,6 +526,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, cpudata->max_limit_perf); des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); + max_freq = READ_ONCE(cpudata->max_limit_freq); + policy->cur = div_u64(des_perf * max_freq, max_perf); + if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { min_perf = des_perf; des_perf = 0; @@ -541,6 +540,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, value &= ~AMD_CPPC_DES_PERF(~0L); value |= AMD_CPPC_DES_PERF(des_perf); + /* limit the max perf when core performance boost feature is disabled */ + if (!cpudata->boost_supported) + max_perf = min_t(unsigned long, nominal_perf, max_perf); + value &= ~AMD_CPPC_MAX_PERF(~0L); value |= AMD_CPPC_MAX_PERF(max_perf); @@ -651,10 +654,9 @@ static void amd_pstate_adjust_perf(unsigned int cpu, unsigned long capacity) { unsigned long max_perf, min_perf, des_perf, - cap_perf, lowest_nonlinear_perf, max_freq; + cap_perf, lowest_nonlinear_perf; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct amd_cpudata *cpudata = policy->driver_data; - unsigned int target_freq; if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) amd_pstate_update_min_max_limit(policy); @@ -662,7 +664,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu, cap_perf = READ_ONCE(cpudata->highest_perf); lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); - max_freq = READ_ONCE(cpudata->max_freq); des_perf = cap_perf; if (target_perf < capacity) @@ -680,51 +681,111 @@ static void amd_pstate_adjust_perf(unsigned int cpu, max_perf = min_perf; des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); - target_freq = div_u64(des_perf * max_freq, max_perf); - policy->cur = target_freq; amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, policy->governor->flags); cpufreq_cpu_put(policy); } -static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) +static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on) { struct amd_cpudata *cpudata = policy->driver_data; + struct cppc_perf_ctrls perf_ctrls; + u32 highest_perf, nominal_perf, nominal_freq, max_freq; int ret; - if (!cpudata->boost_supported) { - pr_err("Boost mode is not supported by this processor or SBIOS\n"); - return -EINVAL; + highest_perf = READ_ONCE(cpudata->highest_perf); + nominal_perf = READ_ONCE(cpudata->nominal_perf); + nominal_freq = READ_ONCE(cpudata->nominal_freq); + max_freq = READ_ONCE(cpudata->max_freq); + + if (boot_cpu_has(X86_FEATURE_CPPC)) { + u64 value = READ_ONCE(cpudata->cppc_req_cached); + + value &= ~GENMASK_ULL(7, 0); + value |= on ? highest_perf : nominal_perf; + WRITE_ONCE(cpudata->cppc_req_cached, value); + + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + } else { + perf_ctrls.max_perf = on ? highest_perf : nominal_perf; + ret = cppc_set_perf(cpudata->cpu, &perf_ctrls); + if (ret) { + cpufreq_cpu_release(policy); + pr_debug("Failed to set max perf on CPU:%d. ret:%d\n", + cpudata->cpu, ret); + return ret; + } } - if (state) - policy->cpuinfo.max_freq = cpudata->max_freq; - else - policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000; + if (on) + policy->cpuinfo.max_freq = max_freq; + else if (policy->cpuinfo.max_freq > nominal_freq * 1000) + policy->cpuinfo.max_freq = nominal_freq * 1000; policy->max = policy->cpuinfo.max_freq; - ret = freq_qos_update_request(&cpudata->req[1], - policy->cpuinfo.max_freq); - if (ret < 0) - return ret; + if (cppc_state == AMD_PSTATE_PASSIVE) { + ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq); + if (ret < 0) + pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu); + } - return 0; + return ret < 0 ? ret : 0; } -static void amd_pstate_boost_init(struct amd_cpudata *cpudata) +static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) { - u32 highest_perf, nominal_perf; + struct amd_cpudata *cpudata = policy->driver_data; + int ret; - highest_perf = READ_ONCE(cpudata->highest_perf); - nominal_perf = READ_ONCE(cpudata->nominal_perf); + if (!cpudata->boost_supported) { + pr_err("Boost mode is not supported by this processor or SBIOS\n"); + return -EOPNOTSUPP; + } + mutex_lock(&amd_pstate_driver_lock); + ret = amd_pstate_cpu_boost_update(policy, state); + WRITE_ONCE(cpudata->boost_state, !ret ? state : false); + policy->boost_enabled = !ret ? state : false; + refresh_frequency_limits(policy); + mutex_unlock(&amd_pstate_driver_lock); - if (highest_perf <= nominal_perf) - return; + return ret; +} - cpudata->boost_supported = true; +static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata) +{ + u64 boost_val; + int ret = -1; + + /* + * If platform has no CPB support or disable it, initialize current driver + * boost_enabled state to be false, it is not an error for cpufreq core to handle. + */ + if (!cpu_feature_enabled(X86_FEATURE_CPB)) { + pr_debug_once("Boost CPB capabilities not present in the processor\n"); + ret = 0; + goto exit_err; + } + + /* at least one CPU supports CPB, even if others fail later on to set up */ current_pstate_driver->boost_enabled = true; + + ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); + if (ret) { + pr_err_once("failed to read initial CPU boost state!\n"); + ret = -EIO; + goto exit_err; + } + + if (!(boost_val & MSR_K7_HWCR_CPB_DIS)) + cpudata->boost_supported = true; + + return 0; + +exit_err: + cpudata->boost_supported = false; + return ret; } static void amd_perf_ctl_reset(unsigned int cpu) @@ -753,7 +814,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) { int ret; - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { u64 cap1; ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); @@ -849,8 +910,12 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu) u32 transition_delay_ns; transition_delay_ns = cppc_get_transition_latency(cpu); - if (transition_delay_ns == CPUFREQ_ETERNAL) - return AMD_PSTATE_TRANSITION_DELAY; + if (transition_delay_ns == CPUFREQ_ETERNAL) { + if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC)) + return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY; + else + return AMD_PSTATE_TRANSITION_DELAY; + } return transition_delay_ns / NSEC_PER_USEC; } @@ -921,12 +986,30 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata) WRITE_ONCE(cpudata->nominal_freq, nominal_freq); WRITE_ONCE(cpudata->max_freq, max_freq); + /** + * Below values need to be initialized correctly, otherwise driver will fail to load + * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf + * lowest_nonlinear_freq is a value between [min_freq, nominal_freq] + * Check _CPC in ACPI table objects if any values are incorrect + */ + if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) { + pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n", + min_freq, max_freq, nominal_freq * 1000); + return -EINVAL; + } + + if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) { + pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n", + lowest_nonlinear_freq, min_freq, nominal_freq * 1000); + return -EINVAL; + } + return 0; } static int amd_pstate_cpu_init(struct cpufreq_policy *policy) { - int min_freq, max_freq, nominal_freq, ret; + int min_freq, max_freq, ret; struct device *dev; struct amd_cpudata *cpudata; @@ -955,18 +1038,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) if (ret) goto free_cpudata1; + ret = amd_pstate_init_boost_support(cpudata); + if (ret) + goto free_cpudata1; + min_freq = READ_ONCE(cpudata->min_freq); max_freq = READ_ONCE(cpudata->max_freq); - nominal_freq = READ_ONCE(cpudata->nominal_freq); - - if (min_freq <= 0 || max_freq <= 0 || - nominal_freq <= 0 || min_freq > max_freq) { - dev_err(dev, - "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n", - min_freq, max_freq, nominal_freq); - ret = -EINVAL; - goto free_cpudata1; - } policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu); policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu); @@ -977,10 +1054,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.min_freq = min_freq; policy->cpuinfo.max_freq = max_freq; + policy->boost_enabled = READ_ONCE(cpudata->boost_supported); + /* It will be updated by governor */ policy->cur = policy->cpuinfo.min_freq; - if (boot_cpu_has(X86_FEATURE_CPPC)) + if (cpu_feature_enabled(X86_FEATURE_CPPC)) policy->fast_switch_possible = true; ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], @@ -1002,7 +1081,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) policy->driver_data = cpudata; - amd_pstate_boost_init(cpudata); if (!current_pstate_driver->adjust_perf) current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; @@ -1015,7 +1093,7 @@ free_cpudata1: return ret; } -static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) +static void amd_pstate_cpu_exit(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; @@ -1023,8 +1101,6 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) freq_qos_remove_request(&cpudata->req[0]); policy->fast_switch_possible = false; kfree(cpudata); - - return 0; } static int amd_pstate_cpu_resume(struct cpufreq_policy *policy) @@ -1213,7 +1289,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode) cppc_state = mode; - if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) + if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) return 0; for_each_present_cpu(cpu) { @@ -1386,7 +1462,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void) static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) { - int min_freq, max_freq, nominal_freq, ret; + int min_freq, max_freq, ret; struct amd_cpudata *cpudata; struct device *dev; u64 value; @@ -1417,17 +1493,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) if (ret) goto free_cpudata1; + ret = amd_pstate_init_boost_support(cpudata); + if (ret) + goto free_cpudata1; + min_freq = READ_ONCE(cpudata->min_freq); max_freq = READ_ONCE(cpudata->max_freq); - nominal_freq = READ_ONCE(cpudata->nominal_freq); - if (min_freq <= 0 || max_freq <= 0 || - nominal_freq <= 0 || min_freq > max_freq) { - dev_err(dev, - "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n", - min_freq, max_freq, nominal_freq); - ret = -EINVAL; - goto free_cpudata1; - } policy->cpuinfo.min_freq = min_freq; policy->cpuinfo.max_freq = max_freq; @@ -1436,11 +1507,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) policy->driver_data = cpudata; - cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0); + cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0); policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; + policy->boost_enabled = READ_ONCE(cpudata->boost_supported); + /* * Set the policy to provide a valid fallback value in case * the default cpufreq governor is neither powersave nor performance. @@ -1451,7 +1524,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) else policy->policy = CPUFREQ_POLICY_POWERSAVE; - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); if (ret) return ret; @@ -1462,7 +1535,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) return ret; WRITE_ONCE(cpudata->cppc_cap1_cached, value); } - amd_pstate_boost_init(cpudata); return 0; @@ -1471,7 +1543,7 @@ free_cpudata1: return ret; } -static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) +static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; @@ -1481,7 +1553,6 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) } pr_debug("CPU %d exiting\n", policy->cpu); - return 0; } static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) @@ -1541,7 +1612,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) epp = 0; /* Set initial EPP value */ - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; } @@ -1564,6 +1635,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) amd_pstate_epp_update_limit(policy); + /* + * policy->cur is never updated with the amd_pstate_epp driver, but it + * is used as a stale frequency value. So, keep it within limits. + */ + policy->cur = policy->min; + return 0; } @@ -1580,7 +1657,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) value = READ_ONCE(cpudata->cppc_req_cached); max_perf = READ_ONCE(cpudata->highest_perf); - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); } else { perf_ctrls.max_perf = max_perf; @@ -1614,7 +1691,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy) value = READ_ONCE(cpudata->cppc_req_cached); mutex_lock(&amd_pstate_limits_lock); - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; /* Set max perf same as min perf */ @@ -1718,6 +1795,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = { .suspend = amd_pstate_epp_suspend, .resume = amd_pstate_epp_resume, .update_limits = amd_pstate_update_limits, + .set_boost = amd_pstate_set_boost, .name = "amd-pstate-epp", .attr = amd_pstate_epp_attr, }; @@ -1741,6 +1819,46 @@ static int __init amd_pstate_set_driver(int mode_idx) return -EINVAL; } +/** + * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F. + * show the debug message that helps to check if the CPU has CPPC support for loading issue. + */ +static bool amd_cppc_supported(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + bool warn = false; + + if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) { + pr_debug_once("CPPC feature is not supported by the processor\n"); + return false; + } + + /* + * If the CPPC feature is disabled in the BIOS for processors that support MSR-based CPPC, + * the AMD Pstate driver may not function correctly. + * Check the CPPC flag and display a warning message if the platform supports CPPC. + * Note: below checking code will not abort the driver registeration process because of + * the code is added for debugging purposes. + */ + if (!cpu_feature_enabled(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) { + if (c->x86_model > 0x60 && c->x86_model < 0xaf) + warn = true; + } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) { + if ((c->x86_model > 0x10 && c->x86_model < 0x1F) || + (c->x86_model > 0x40 && c->x86_model < 0xaf)) + warn = true; + } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) { + warn = true; + } + } + + if (warn) + pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n" + "Please enable it if your BIOS has the CPPC option.\n"); + return true; +} + static int __init amd_pstate_init(void) { struct device *dev_root; @@ -1749,6 +1867,11 @@ static int __init amd_pstate_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return -ENODEV; + /* show debug message only if CPPC is not supported */ + if (!amd_cppc_supported()) + return -EOPNOTSUPP; + + /* show warning message when BIOS broken or ACPI disabled */ if (!acpi_cpc_valid()) { pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n"); return -ENODEV; @@ -1763,35 +1886,43 @@ static int __init amd_pstate_init(void) /* check if this machine need CPPC quirks */ dmi_check_system(amd_pstate_quirks_table); - switch (cppc_state) { - case AMD_PSTATE_UNDEFINED: + /* + * determine the driver mode from the command line or kernel config. + * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED. + * command line options will override the kernel config settings. + */ + + if (cppc_state == AMD_PSTATE_UNDEFINED) { /* Disable on the following configs by default: * 1. Undefined platforms * 2. Server platforms - * 3. Shared memory designs */ if (amd_pstate_acpi_pm_profile_undefined() || - amd_pstate_acpi_pm_profile_server() || - !boot_cpu_has(X86_FEATURE_CPPC)) { + amd_pstate_acpi_pm_profile_server()) { pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; } - ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE); - if (ret) - return ret; - break; + /* get driver mode from kernel config option [1:4] */ + cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE; + } + + switch (cppc_state) { case AMD_PSTATE_DISABLE: + pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; case AMD_PSTATE_PASSIVE: case AMD_PSTATE_ACTIVE: case AMD_PSTATE_GUIDED: + ret = amd_pstate_set_driver(cppc_state); + if (ret) + return ret; break; default: return -EINVAL; } /* capability check */ - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { pr_debug("AMD CPPC MSR based functionality is supported\n"); if (cppc_state != AMD_PSTATE_ACTIVE) current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; @@ -1805,13 +1936,15 @@ static int __init amd_pstate_init(void) /* enable amd pstate feature */ ret = amd_pstate_enable(true); if (ret) { - pr_err("failed to enable with return %d\n", ret); + pr_err("failed to enable driver mode(%d)\n", cppc_state); return ret; } ret = cpufreq_register_driver(current_pstate_driver); - if (ret) + if (ret) { pr_err("failed to register with return %d\n", ret); + goto disable_driver; + } dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { @@ -1827,6 +1960,8 @@ static int __init amd_pstate_init(void) global_attr_free: cpufreq_unregister_driver(current_pstate_driver); +disable_driver: + amd_pstate_enable(false); return ret; } device_initcall(amd_pstate_init); diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index e6a28e7f4dbf..cc8bb2bc325a 100644 --- a/drivers/cpufreq/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -99,6 +99,8 @@ struct amd_cpudata { u32 policy; u64 cppc_cap1_cached; bool suspended; + s16 epp_default; + bool boost_state; }; #endif /* _LINUX_AMD_PSTATE_H */ diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c index 021f423705e1..af34c22fa273 100644 --- a/drivers/cpufreq/apple-soc-cpufreq.c +++ b/drivers/cpufreq/apple-soc-cpufreq.c @@ -305,7 +305,7 @@ out_iounmap: return ret; } -static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy) +static void apple_soc_cpufreq_exit(struct cpufreq_policy *policy) { struct apple_cpu_priv *priv = policy->driver_data; @@ -313,8 +313,6 @@ static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy) dev_pm_opp_remove_all_dynamic(priv->cpu_dev); iounmap(priv->reg_base); kfree(priv); - - return 0; } static struct cpufreq_driver apple_soc_cpufreq_driver = { diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c index 39221a9a187a..17a4c174553d 100644 --- a/drivers/cpufreq/bmips-cpufreq.c +++ b/drivers/cpufreq/bmips-cpufreq.c @@ -121,11 +121,9 @@ static int bmips_cpufreq_target_index(struct cpufreq_policy *policy, return 0; } -static int bmips_cpufreq_exit(struct cpufreq_policy *policy) +static void bmips_cpufreq_exit(struct cpufreq_policy *policy) { kfree(policy->freq_table); - - return 0; } static int bmips_cpufreq_init(struct cpufreq_policy *policy) diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 15f1d41920a3..bafa32dd375d 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -291,15 +291,10 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, struct cppc_cpudata *cpu_data = policy->driver_data; unsigned int cpu = policy->cpu; struct cpufreq_freqs freqs; - u32 desired_perf; int ret = 0; - desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); - /* Return if it is exactly the same perf */ - if (desired_perf == cpu_data->perf_ctrls.desired_perf) - return ret; - - cpu_data->perf_ctrls.desired_perf = desired_perf; + cpu_data->perf_ctrls.desired_perf = + cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); freqs.old = policy->cur; freqs.new = target_freq; @@ -688,7 +683,7 @@ out: return ret; } -static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu_data = policy->driver_data; struct cppc_perf_caps *caps = &cpu_data->perf_caps; @@ -705,7 +700,6 @@ static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) caps->lowest_perf, cpu, ret); cppc_cpufreq_put_cpu_data(policy); - return 0; } static inline u64 get_delta(u64 t1, u64 t0) diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index c74dd1e01e0d..cac379ba006d 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -233,4 +233,5 @@ create_pdev: sizeof(struct cpufreq_dt_platform_data))); } core_initcall(cpufreq_dt_platdev_init); +MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 907e22632fda..6532c4d71338 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -157,10 +157,9 @@ static int cpufreq_offline(struct cpufreq_policy *policy) return 0; } -static int cpufreq_exit(struct cpufreq_policy *policy) +static void cpufreq_exit(struct cpufreq_policy *policy) { clk_put(policy->clk); - return 0; } static struct cpufreq_driver dt_cpufreq_driver = { diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index f7a7bcf6f52e..fedad1081973 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -359,11 +359,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) return 0; } -static int nforce2_cpu_exit(struct cpufreq_policy *policy) -{ - return 0; -} - static struct cpufreq_driver nforce2_driver = { .name = "nforce2", .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, @@ -371,7 +366,6 @@ static struct cpufreq_driver nforce2_driver = { .target = nforce2_target, .get = nforce2_get, .init = nforce2_cpu_init, - .exit = nforce2_cpu_exit, }; #ifdef MODULE diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a45aac17c20f..04fc786dd2c0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -608,16 +608,15 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); static ssize_t show_boost(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); + return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled); } static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - int ret, enable; + bool enable; - ret = sscanf(buf, "%d", &enable); - if (ret != 1 || enable < 0 || enable > 1) + if (kstrtobool(buf, &enable)) return -EINVAL; if (cpufreq_boost_trigger_state(enable)) { @@ -641,10 +640,10 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf) static ssize_t store_local_boost(struct cpufreq_policy *policy, const char *buf, size_t count) { - int ret, enable; + int ret; + bool enable; - ret = kstrtoint(buf, 10, &enable); - if (ret || enable < 0 || enable > 1) + if (kstrtobool(buf, &enable)) return -EINVAL; if (!cpufreq_driver->boost_enabled) @@ -739,7 +738,7 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor) static ssize_t show_##file_name \ (struct cpufreq_policy *policy, char *buf) \ { \ - return sprintf(buf, "%u\n", policy->object); \ + return sysfs_emit(buf, "%u\n", policy->object); \ } show_one(cpuinfo_min_freq, cpuinfo.min_freq); @@ -760,11 +759,11 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) freq = arch_freq_get_on_cpu(policy->cpu); if (freq) - ret = sprintf(buf, "%u\n", freq); + ret = sysfs_emit(buf, "%u\n", freq); else if (cpufreq_driver->setpolicy && cpufreq_driver->get) - ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); + ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu)); else - ret = sprintf(buf, "%u\n", policy->cur); + ret = sysfs_emit(buf, "%u\n", policy->cur); return ret; } @@ -798,9 +797,9 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, unsigned int cur_freq = __cpufreq_get(policy); if (cur_freq) - return sprintf(buf, "%u\n", cur_freq); + return sysfs_emit(buf, "%u\n", cur_freq); - return sprintf(buf, "<unknown>\n"); + return sysfs_emit(buf, "<unknown>\n"); } /* @@ -809,12 +808,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) { if (policy->policy == CPUFREQ_POLICY_POWERSAVE) - return sprintf(buf, "powersave\n"); + return sysfs_emit(buf, "powersave\n"); else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) - return sprintf(buf, "performance\n"); + return sysfs_emit(buf, "performance\n"); else if (policy->governor) - return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", - policy->governor->name); + return sysfs_emit(buf, "%s\n", policy->governor->name); return -EINVAL; } @@ -873,7 +871,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, struct cpufreq_governor *t; if (!has_target()) { - i += sprintf(buf, "performance powersave"); + i += sysfs_emit(buf, "performance powersave"); goto out; } @@ -882,11 +880,11 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) break; - i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); + i += sysfs_emit_at(buf, i, "%s ", t->name); } mutex_unlock(&cpufreq_governor_mutex); out: - i += sprintf(&buf[i], "\n"); + i += sysfs_emit_at(buf, i, "\n"); return i; } @@ -896,7 +894,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) unsigned int cpu; for_each_cpu(cpu, mask) { - i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu); + i += sysfs_emit_at(buf, i, "%u ", cpu); if (i >= (PAGE_SIZE - 5)) break; } @@ -904,7 +902,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) /* Remove the extra space at the end */ i--; - i += sprintf(&buf[i], "\n"); + i += sysfs_emit_at(buf, i, "\n"); return i; } EXPORT_SYMBOL_GPL(cpufreq_show_cpus); @@ -947,7 +945,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) { if (!policy->governor || !policy->governor->show_setspeed) - return sprintf(buf, "<unsupported>\n"); + return sysfs_emit(buf, "<unsupported>\n"); return policy->governor->show_setspeed(policy, buf); } @@ -961,8 +959,8 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) int ret; ret = cpufreq_driver->bios_limit(policy->cpu, &limit); if (!ret) - return sprintf(buf, "%u\n", limit); - return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); + return sysfs_emit(buf, "%u\n", limit); + return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq); } cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); @@ -1431,7 +1429,8 @@ static int cpufreq_online(unsigned int cpu) } /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */ - policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy); + if (cpufreq_boost_enabled() && policy_has_boost_freq(policy)) + policy->boost_enabled = true; /* * The initialization has succeeded and the policy is online. @@ -2875,7 +2874,7 @@ int cpufreq_enable_boost_support(void) } EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support); -int cpufreq_boost_enabled(void) +bool cpufreq_boost_enabled(void) { return cpufreq_driver->boost_enabled; } diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index ab93bce8ae77..6e958b09e1b5 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -360,14 +360,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy) return 0; } -static int eps_cpu_exit(struct cpufreq_policy *policy) +static void eps_cpu_exit(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; /* Bye */ kfree(eps_cpu[cpu]); eps_cpu[cpu] = NULL; - return 0; } static struct cpufreq_driver eps_driver = { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index c31914a9876f..392a8000b238 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -300,6 +300,7 @@ static struct cpufreq_driver *intel_pstate_driver __read_mostly; #define HYBRID_SCALING_FACTOR 78741 #define HYBRID_SCALING_FACTOR_MTL 80000 +#define HYBRID_SCALING_FACTOR_LNL 86957 static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR; @@ -1625,17 +1626,24 @@ static void intel_pstate_notify_work(struct work_struct *work) static DEFINE_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; +#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_STATUS BIT(3) + void notify_hwp_interrupt(void) { unsigned int this_cpu = smp_processor_id(); + u64 value, status_mask; unsigned long flags; - u64 value; - if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; + status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS; + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS; + rdmsrl_safe(MSR_HWP_STATUS, &value); - if (!(value & 0x01)) + if (!(value & status_mask)) return; spin_lock_irqsave(&hwp_notify_lock, flags); @@ -1659,7 +1667,7 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { bool cancel_work; - if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ @@ -1673,17 +1681,25 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) cancel_delayed_work_sync(&cpudata->hwp_notify_work); } +#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_REQ BIT(2) + static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) { - /* Enable HWP notification interrupt for guaranteed performance change */ + /* Enable HWP notification interrupt for performance change */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { + u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; + spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); spin_unlock_irq(&hwp_notify_lock); + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } } @@ -2367,54 +2383,54 @@ static const struct pstate_funcs knl_funcs = { .get_val = core_get_val, }; -#define X86_MATCH(model, policy) \ - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ - X86_FEATURE_APERFMPERF, &policy) +#define X86_MATCH(vfm, policy) \ + X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, &policy) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - X86_MATCH(SANDYBRIDGE, core_funcs), - X86_MATCH(SANDYBRIDGE_X, core_funcs), - X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), - X86_MATCH(IVYBRIDGE, core_funcs), - X86_MATCH(HASWELL, core_funcs), - X86_MATCH(BROADWELL, core_funcs), - X86_MATCH(IVYBRIDGE_X, core_funcs), - X86_MATCH(HASWELL_X, core_funcs), - X86_MATCH(HASWELL_L, core_funcs), - X86_MATCH(HASWELL_G, core_funcs), - X86_MATCH(BROADWELL_G, core_funcs), - X86_MATCH(ATOM_AIRMONT, airmont_funcs), - X86_MATCH(SKYLAKE_L, core_funcs), - X86_MATCH(BROADWELL_X, core_funcs), - X86_MATCH(SKYLAKE, core_funcs), - X86_MATCH(BROADWELL_D, core_funcs), - X86_MATCH(XEON_PHI_KNL, knl_funcs), - X86_MATCH(XEON_PHI_KNM, knl_funcs), - X86_MATCH(ATOM_GOLDMONT, core_funcs), - X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), - X86_MATCH(SKYLAKE_X, core_funcs), - X86_MATCH(COMETLAKE, core_funcs), - X86_MATCH(ICELAKE_X, core_funcs), - X86_MATCH(TIGERLAKE, core_funcs), - X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), - X86_MATCH(EMERALDRAPIDS_X, core_funcs), + X86_MATCH(INTEL_SANDYBRIDGE, core_funcs), + X86_MATCH(INTEL_SANDYBRIDGE_X, core_funcs), + X86_MATCH(INTEL_ATOM_SILVERMONT, silvermont_funcs), + X86_MATCH(INTEL_IVYBRIDGE, core_funcs), + X86_MATCH(INTEL_HASWELL, core_funcs), + X86_MATCH(INTEL_BROADWELL, core_funcs), + X86_MATCH(INTEL_IVYBRIDGE_X, core_funcs), + X86_MATCH(INTEL_HASWELL_X, core_funcs), + X86_MATCH(INTEL_HASWELL_L, core_funcs), + X86_MATCH(INTEL_HASWELL_G, core_funcs), + X86_MATCH(INTEL_BROADWELL_G, core_funcs), + X86_MATCH(INTEL_ATOM_AIRMONT, airmont_funcs), + X86_MATCH(INTEL_SKYLAKE_L, core_funcs), + X86_MATCH(INTEL_BROADWELL_X, core_funcs), + X86_MATCH(INTEL_SKYLAKE, core_funcs), + X86_MATCH(INTEL_BROADWELL_D, core_funcs), + X86_MATCH(INTEL_XEON_PHI_KNL, knl_funcs), + X86_MATCH(INTEL_XEON_PHI_KNM, knl_funcs), + X86_MATCH(INTEL_ATOM_GOLDMONT, core_funcs), + X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS, core_funcs), + X86_MATCH(INTEL_SKYLAKE_X, core_funcs), + X86_MATCH(INTEL_COMETLAKE, core_funcs), + X86_MATCH(INTEL_ICELAKE_X, core_funcs), + X86_MATCH(INTEL_TIGERLAKE, core_funcs), + X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); #ifdef CONFIG_ACPI static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { - X86_MATCH(BROADWELL_D, core_funcs), - X86_MATCH(BROADWELL_X, core_funcs), - X86_MATCH(SKYLAKE_X, core_funcs), - X86_MATCH(ICELAKE_X, core_funcs), - X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_BROADWELL_D, core_funcs), + X86_MATCH(INTEL_BROADWELL_X, core_funcs), + X86_MATCH(INTEL_SKYLAKE_X, core_funcs), + X86_MATCH(INTEL_ICELAKE_X, core_funcs), + X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), {} }; #endif static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { - X86_MATCH(KABYLAKE, core_funcs), + X86_MATCH(INTEL_KABYLAKE, core_funcs), {} }; @@ -2699,13 +2715,11 @@ static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) return intel_cpufreq_cpu_offline(policy); } -static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) +static void intel_pstate_cpu_exit(struct cpufreq_policy *policy) { pr_debug("CPU %d exiting\n", policy->cpu); policy->fast_switch_possible = false; - - return 0; } static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) @@ -3036,7 +3050,7 @@ pstate_exit: return ret; } -static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct freq_qos_request *req; @@ -3046,7 +3060,7 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) freq_qos_remove_request(req); kfree(req); - return intel_pstate_cpu_exit(policy); + intel_pstate_cpu_exit(policy); } static int intel_cpufreq_suspend(struct cpufreq_policy *policy) @@ -3350,14 +3364,13 @@ static inline void intel_pstate_request_control_from_smm(void) {} #define INTEL_PSTATE_HWP_BROADWELL 0x01 -#define X86_MATCH_HWP(model, hwp_mode) \ - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ - X86_FEATURE_HWP, hwp_mode) +#define X86_MATCH_HWP(vfm, hwp_mode) \ + X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_HWP, hwp_mode) static const struct x86_cpu_id hwp_support_ids[] __initconst = { - X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), - X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), - X86_MATCH_HWP(ANY, 0), + X86_MATCH_HWP(INTEL_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), + X86_MATCH_HWP(INTEL_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), + X86_MATCH_HWP(INTEL_ANY, 0), {} }; @@ -3390,15 +3403,19 @@ static const struct x86_cpu_id intel_epp_default[] = { * which can result in one core turbo frequency for * AlderLake Mobile CPUs. */ - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, - HWP_EPP_BALANCE_POWERSAVE, 115, 16)), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, + 179, 64, 16)), + X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, + 179, 64, 16)), {} }; static const struct x86_cpu_id intel_hybrid_scaling_factor[] = { - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL), + X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL), + X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL), + X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL), {} }; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 4c57c6725c13..bd6fe8638d39 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -236,8 +236,9 @@ static void do_powersaver(int cx_address, unsigned int mults_index, } /** - * longhaul_set_cpu_frequency() - * @mults_index : bitpattern of the new multiplier. + * longhaul_setstate() + * @policy: cpufreq_policy structure containing the current policy. + * @table_index: index of the frequency within the cpufreq_frequency_table. * * Sets a new clock ratio. */ diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index afc59b292153..6a8e97896d38 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -85,18 +85,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) -{ - return 0; -} - static struct cpufreq_driver loongson2_cpufreq_driver = { .name = "loongson2", .init = loongson2_cpufreq_cpu_init, .verify = cpufreq_generic_frequency_table_verify, .target_index = loongson2_cpufreq_target, .get = cpufreq_generic_get, - .exit = loongson2_cpufreq_exit, .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c new file mode 100644 index 000000000000..5f79b6de127c --- /dev/null +++ b/drivers/cpufreq/loongson3_cpufreq.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CPUFreq driver for the Loongson-3 processors. + * + * All revisions of Loongson-3 processor support cpu_has_scalefreq feature. + * + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ +#include <linux/cpufreq.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/units.h> + +#include <asm/idle.h> +#include <asm/loongarch.h> +#include <asm/loongson.h> + +/* Message */ +union smc_message { + u32 value; + struct { + u32 id : 4; + u32 info : 4; + u32 val : 16; + u32 cmd : 6; + u32 extra : 1; + u32 complete : 1; + }; +}; + +/* Command return values */ +#define CMD_OK 0 /* No error */ +#define CMD_ERROR 1 /* Regular error */ +#define CMD_NOCMD 2 /* Command does not support */ +#define CMD_INVAL 3 /* Invalid Parameter */ + +/* Version commands */ +/* + * CMD_GET_VERSION - Get interface version + * Input: none + * Output: version + */ +#define CMD_GET_VERSION 0x1 + +/* Feature commands */ +/* + * CMD_GET_FEATURE - Get feature state + * Input: feature ID + * Output: feature flag + */ +#define CMD_GET_FEATURE 0x2 + +/* + * CMD_SET_FEATURE - Set feature state + * Input: feature ID, feature flag + * output: none + */ +#define CMD_SET_FEATURE 0x3 + +/* Feature IDs */ +#define FEATURE_SENSOR 0 +#define FEATURE_FAN 1 +#define FEATURE_DVFS 2 + +/* Sensor feature flags */ +#define FEATURE_SENSOR_ENABLE BIT(0) +#define FEATURE_SENSOR_SAMPLE BIT(1) + +/* Fan feature flags */ +#define FEATURE_FAN_ENABLE BIT(0) +#define FEATURE_FAN_AUTO BIT(1) + +/* DVFS feature flags */ +#define FEATURE_DVFS_ENABLE BIT(0) +#define FEATURE_DVFS_BOOST BIT(1) +#define FEATURE_DVFS_AUTO BIT(2) +#define FEATURE_DVFS_SINGLE_BOOST BIT(3) + +/* Sensor commands */ +/* + * CMD_GET_SENSOR_NUM - Get number of sensors + * Input: none + * Output: number + */ +#define CMD_GET_SENSOR_NUM 0x4 + +/* + * CMD_GET_SENSOR_STATUS - Get sensor status + * Input: sensor ID, type + * Output: sensor status + */ +#define CMD_GET_SENSOR_STATUS 0x5 + +/* Sensor types */ +#define SENSOR_INFO_TYPE 0 +#define SENSOR_INFO_TYPE_TEMP 1 + +/* Fan commands */ +/* + * CMD_GET_FAN_NUM - Get number of fans + * Input: none + * Output: number + */ +#define CMD_GET_FAN_NUM 0x6 + +/* + * CMD_GET_FAN_INFO - Get fan status + * Input: fan ID, type + * Output: fan info + */ +#define CMD_GET_FAN_INFO 0x7 + +/* + * CMD_SET_FAN_INFO - Set fan status + * Input: fan ID, type, value + * Output: none + */ +#define CMD_SET_FAN_INFO 0x8 + +/* Fan types */ +#define FAN_INFO_TYPE_LEVEL 0 + +/* DVFS commands */ +/* + * CMD_GET_FREQ_LEVEL_NUM - Get number of freq levels + * Input: CPU ID + * Output: number + */ +#define CMD_GET_FREQ_LEVEL_NUM 0x9 + +/* + * CMD_GET_FREQ_BOOST_LEVEL - Get the first boost level + * Input: CPU ID + * Output: number + */ +#define CMD_GET_FREQ_BOOST_LEVEL 0x10 + +/* + * CMD_GET_FREQ_LEVEL_INFO - Get freq level info + * Input: CPU ID, level ID + * Output: level info + */ +#define CMD_GET_FREQ_LEVEL_INFO 0x11 + +/* + * CMD_GET_FREQ_INFO - Get freq info + * Input: CPU ID, type + * Output: freq info + */ +#define CMD_GET_FREQ_INFO 0x12 + +/* + * CMD_SET_FREQ_INFO - Set freq info + * Input: CPU ID, type, value + * Output: none + */ +#define CMD_SET_FREQ_INFO 0x13 + +/* Freq types */ +#define FREQ_INFO_TYPE_FREQ 0 +#define FREQ_INFO_TYPE_LEVEL 1 + +#define FREQ_MAX_LEVEL 16 + +struct loongson3_freq_data { + unsigned int def_freq_level; + struct cpufreq_frequency_table table[]; +}; + +static struct mutex cpufreq_mutex[MAX_PACKAGES]; +static struct cpufreq_driver loongson3_cpufreq_driver; +static DEFINE_PER_CPU(struct loongson3_freq_data *, freq_data); + +static inline int do_service_request(u32 id, u32 info, u32 cmd, u32 val, u32 extra) +{ + int retries; + unsigned int cpu = smp_processor_id(); + unsigned int package = cpu_data[cpu].package; + union smc_message msg, last; + + mutex_lock(&cpufreq_mutex[package]); + + last.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX); + if (!last.complete) { + mutex_unlock(&cpufreq_mutex[package]); + return -EPERM; + } + + msg.id = id; + msg.info = info; + msg.cmd = cmd; + msg.val = val; + msg.extra = extra; + msg.complete = 0; + + iocsr_write32(msg.value, LOONGARCH_IOCSR_SMCMBX); + iocsr_write32(iocsr_read32(LOONGARCH_IOCSR_MISC_FUNC) | IOCSR_MISC_FUNC_SOFT_INT, + LOONGARCH_IOCSR_MISC_FUNC); + + for (retries = 0; retries < 10000; retries++) { + msg.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX); + if (msg.complete) + break; + + usleep_range(8, 12); + } + + if (!msg.complete || msg.cmd != CMD_OK) { + mutex_unlock(&cpufreq_mutex[package]); + return -EPERM; + } + + mutex_unlock(&cpufreq_mutex[package]); + + return msg.val; +} + +static unsigned int loongson3_cpufreq_get(unsigned int cpu) +{ + int ret; + + ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_INFO, 0, 0); + + return ret * KILO; +} + +static int loongson3_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) +{ + int ret; + + ret = do_service_request(cpu_data[policy->cpu].core, + FREQ_INFO_TYPE_LEVEL, CMD_SET_FREQ_INFO, index, 0); + + return (ret >= 0) ? 0 : ret; +} + +static int configure_freq_table(int cpu) +{ + int i, ret, boost_level, max_level, freq_level; + struct platform_device *pdev = cpufreq_get_driver_data(); + struct loongson3_freq_data *data; + + if (per_cpu(freq_data, cpu)) + return 0; + + ret = do_service_request(cpu, 0, CMD_GET_FREQ_LEVEL_NUM, 0, 0); + if (ret < 0) + return ret; + max_level = ret; + + ret = do_service_request(cpu, 0, CMD_GET_FREQ_BOOST_LEVEL, 0, 0); + if (ret < 0) + return ret; + boost_level = ret; + + freq_level = min(max_level, FREQ_MAX_LEVEL); + data = devm_kzalloc(&pdev->dev, struct_size(data, table, freq_level + 1), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->def_freq_level = boost_level - 1; + + for (i = 0; i < freq_level; i++) { + ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_LEVEL_INFO, i, 0); + if (ret < 0) { + devm_kfree(&pdev->dev, data); + return ret; + } + + data->table[i].frequency = ret * KILO; + data->table[i].flags = (i >= boost_level) ? CPUFREQ_BOOST_FREQ : 0; + } + + data->table[freq_level].flags = 0; + data->table[freq_level].frequency = CPUFREQ_TABLE_END; + + per_cpu(freq_data, cpu) = data; + + return 0; +} + +static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + int i, ret, cpu = policy->cpu; + + ret = configure_freq_table(cpu); + if (ret < 0) + return ret; + + policy->cpuinfo.transition_latency = 10000; + policy->freq_table = per_cpu(freq_data, cpu)->table; + policy->suspend_freq = policy->freq_table[per_cpu(freq_data, cpu)->def_freq_level].frequency; + cpumask_copy(policy->cpus, topology_sibling_cpumask(cpu)); + + for_each_cpu(i, policy->cpus) { + if (i != cpu) + per_cpu(freq_data, i) = per_cpu(freq_data, cpu); + } + + if (policy_has_boost_freq(policy)) { + ret = cpufreq_enable_boost_support(); + if (ret < 0) { + pr_warn("cpufreq: Failed to enable boost: %d\n", ret); + return ret; + } + loongson3_cpufreq_driver.boost_enabled = true; + } + + return 0; +} + +static void loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + int cpu = policy->cpu; + + loongson3_cpufreq_target(policy, per_cpu(freq_data, cpu)->def_freq_level); +} + +static int loongson3_cpufreq_cpu_online(struct cpufreq_policy *policy) +{ + return 0; +} + +static int loongson3_cpufreq_cpu_offline(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct cpufreq_driver loongson3_cpufreq_driver = { + .name = "loongson3", + .flags = CPUFREQ_CONST_LOOPS, + .init = loongson3_cpufreq_cpu_init, + .exit = loongson3_cpufreq_cpu_exit, + .online = loongson3_cpufreq_cpu_online, + .offline = loongson3_cpufreq_cpu_offline, + .get = loongson3_cpufreq_get, + .target_index = loongson3_cpufreq_target, + .attr = cpufreq_generic_attr, + .verify = cpufreq_generic_frequency_table_verify, + .suspend = cpufreq_generic_suspend, +}; + +static int loongson3_cpufreq_probe(struct platform_device *pdev) +{ + int i, ret; + + for (i = 0; i < MAX_PACKAGES; i++) + devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]); + + ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0); + if (ret <= 0) + return -EPERM; + + ret = do_service_request(FEATURE_DVFS, 0, CMD_SET_FEATURE, + FEATURE_DVFS_ENABLE | FEATURE_DVFS_BOOST, 0); + if (ret < 0) + return -EPERM; + + loongson3_cpufreq_driver.driver_data = pdev; + + ret = cpufreq_register_driver(&loongson3_cpufreq_driver); + if (ret) + return ret; + + pr_info("cpufreq: Loongson-3 CPU frequency driver.\n"); + + return 0; +} + +static void loongson3_cpufreq_remove(struct platform_device *pdev) +{ + cpufreq_unregister_driver(&loongson3_cpufreq_driver); +} + +static struct platform_device_id cpufreq_id_table[] = { + { "loongson3_cpufreq", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, cpufreq_id_table); + +static struct platform_driver loongson3_platform_driver = { + .driver = { + .name = "loongson3_cpufreq", + }, + .id_table = cpufreq_id_table, + .probe = loongson3_cpufreq_probe, + .remove_new = loongson3_cpufreq_remove, +}; +module_platform_driver(loongson3_platform_driver); + +MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>"); +MODULE_DESCRIPTION("CPUFreq driver for Loongson-3 processors"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index 8d097dcddda4..8925e096d5b9 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -260,7 +260,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) return 0; } -static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) +static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) { struct mtk_cpufreq_data *data = policy->driver_data; struct resource *res = data->res; @@ -270,8 +270,6 @@ static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]); iounmap(base); release_mem_region(res->start, resource_size(res)); - - return 0; } static void mtk_cpufreq_register_em(struct cpufreq_policy *policy) diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 518606adf14e..3a1aadaa723c 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -390,28 +390,23 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) int ret; cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - dev_err(cpu_dev, "failed to get cpu%d device\n", cpu); - return -ENODEV; - } + if (!cpu_dev) + return dev_err_probe(cpu_dev, -ENODEV, "failed to get cpu%d device\n", cpu); info->cpu_dev = cpu_dev; info->ccifreq_bound = false; if (info->soc_data->ccifreq_supported) { info->cci_dev = of_get_cci(info->cpu_dev); - if (IS_ERR(info->cci_dev)) { - ret = PTR_ERR(info->cci_dev); - dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu); - return -ENODEV; - } + if (IS_ERR(info->cci_dev)) + return dev_err_probe(cpu_dev, PTR_ERR(info->cci_dev), + "cpu%d: failed to get cci device\n", + cpu); } info->cpu_clk = clk_get(cpu_dev, "cpu"); - if (IS_ERR(info->cpu_clk)) { - ret = PTR_ERR(info->cpu_clk); - return dev_err_probe(cpu_dev, ret, + if (IS_ERR(info->cpu_clk)) + return dev_err_probe(cpu_dev, PTR_ERR(info->cpu_clk), "cpu%d: failed to get cpu clk\n", cpu); - } info->inter_clk = clk_get(cpu_dev, "intermediate"); if (IS_ERR(info->inter_clk)) { @@ -431,7 +426,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) ret = regulator_enable(info->proc_reg); if (ret) { - dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu); + dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vproc\n", cpu); goto out_free_proc_reg; } @@ -439,14 +434,17 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) info->sram_reg = regulator_get_optional(cpu_dev, "sram"); if (IS_ERR(info->sram_reg)) { ret = PTR_ERR(info->sram_reg); - if (ret == -EPROBE_DEFER) + if (ret == -EPROBE_DEFER) { + dev_err_probe(cpu_dev, ret, + "cpu%d: Failed to get sram regulator\n", cpu); goto out_disable_proc_reg; + } info->sram_reg = NULL; } else { ret = regulator_enable(info->sram_reg); if (ret) { - dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu); + dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vsram\n", cpu); goto out_free_sram_reg; } } @@ -454,31 +452,34 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus); if (ret) { - dev_err(cpu_dev, + dev_err_probe(cpu_dev, ret, "cpu%d: failed to get OPP-sharing information\n", cpu); goto out_disable_sram_reg; } ret = dev_pm_opp_of_cpumask_add_table(&info->cpus); if (ret) { - dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu); + dev_err_probe(cpu_dev, ret, "cpu%d: no OPP table\n", cpu); goto out_disable_sram_reg; } ret = clk_prepare_enable(info->cpu_clk); - if (ret) + if (ret) { + dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable cpu clk\n", cpu); goto out_free_opp_table; + } ret = clk_prepare_enable(info->inter_clk); - if (ret) + if (ret) { + dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable inter clk\n", cpu); goto out_disable_mux_clock; + } if (info->soc_data->ccifreq_supported) { info->vproc_on_boot = regulator_get_voltage(info->proc_reg); if (info->vproc_on_boot < 0) { - ret = info->vproc_on_boot; - dev_err(info->cpu_dev, - "invalid Vproc value: %d\n", info->vproc_on_boot); + ret = dev_err_probe(info->cpu_dev, info->vproc_on_boot, + "invalid Vproc value\n"); goto out_disable_inter_clock; } } @@ -487,8 +488,8 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) rate = clk_get_rate(info->inter_clk); opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate); if (IS_ERR(opp)) { - dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu); - ret = PTR_ERR(opp); + ret = dev_err_probe(cpu_dev, PTR_ERR(opp), + "cpu%d: failed to get intermediate opp\n", cpu); goto out_disable_inter_clock; } info->intermediate_voltage = dev_pm_opp_get_voltage(opp); @@ -501,7 +502,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier; ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb); if (ret) { - dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu); + dev_err_probe(cpu_dev, ret, "cpu%d: failed to register opp notifier\n", cpu); goto out_disable_inter_clock; } @@ -599,13 +600,11 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) return 0; } -static int mtk_cpufreq_exit(struct cpufreq_policy *policy) +static void mtk_cpufreq_exit(struct cpufreq_policy *policy) { struct mtk_cpu_dvfs_info *info = policy->driver_data; dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); - - return 0; } static struct cpufreq_driver mtk_cpufreq_driver = { @@ -629,11 +628,9 @@ static int mtk_cpufreq_probe(struct platform_device *pdev) int cpu, ret; data = dev_get_platdata(&pdev->dev); - if (!data) { - dev_err(&pdev->dev, - "failed to get mtk cpufreq platform data\n"); - return -ENODEV; - } + if (!data) + return dev_err_probe(&pdev->dev, -ENODEV, + "failed to get mtk cpufreq platform data\n"); for_each_possible_cpu(cpu) { info = mtk_cpu_dvfs_info_lookup(cpu); @@ -642,25 +639,22 @@ static int mtk_cpufreq_probe(struct platform_device *pdev) info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) { - ret = -ENOMEM; + ret = dev_err_probe(&pdev->dev, -ENOMEM, + "Failed to allocate dvfs_info\n"); goto release_dvfs_info_list; } info->soc_data = data; ret = mtk_cpu_dvfs_info_init(info, cpu); - if (ret) { - dev_err(&pdev->dev, - "failed to initialize dvfs info for cpu%d\n", - cpu); + if (ret) goto release_dvfs_info_list; - } list_add(&info->list_head, &dvfs_info_list); } ret = cpufreq_register_driver(&mtk_cpufreq_driver); if (ret) { - dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); + dev_err_probe(&pdev->dev, ret, "failed to register mtk cpufreq driver\n"); goto release_dvfs_info_list; } diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 895690856665..3458d5cc9b7f 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -135,11 +135,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy) return 0; } -static int omap_cpu_exit(struct cpufreq_policy *policy) +static void omap_cpu_exit(struct cpufreq_policy *policy) { freq_table_free(); clk_put(policy->clk); - return 0; } static struct cpufreq_driver omap_driver = { diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 039a66bbe1be..ee925b53b6b9 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -204,21 +204,19 @@ out: return err; } -static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) { /* * We don't support CPU hotplug. Don't unmap after the system * has already made it to a running state. */ if (system_state >= SYSTEM_RUNNING) - return 0; + return; if (sdcasr_mapbase) iounmap(sdcasr_mapbase); if (sdcpwr_mapbase) iounmap(sdcpwr_mapbase); - - return 0; } static int pas_cpufreq_target(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 6f8b5ea7aeae..771efbf51a48 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -562,18 +562,12 @@ out: return result; } -static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - return 0; -} - static struct cpufreq_driver pcc_cpufreq_driver = { .flags = CPUFREQ_CONST_LOOPS, .get = pcc_get_freq, .verify = pcc_cpufreq_verify, .target = pcc_cpufreq_target, .init = pcc_cpufreq_cpu_init, - .exit = pcc_cpufreq_cpu_exit, .name = "pcc-cpufreq", }; diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 41eefef95d87..f0a4a6c31204 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -219,7 +219,7 @@ have_busfreq: } -static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) +static void powernow_k6_cpu_exit(struct cpufreq_policy *policy) { unsigned int i; @@ -234,10 +234,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) cpufreq_freq_transition_begin(policy, &freqs); powernow_k6_target(policy, i); cpufreq_freq_transition_end(policy, &freqs, 0); - break; + return; } } - return 0; } static unsigned int powernow_k6_get(unsigned int cpu) diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 5d515fc34836..4271446c8725 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -644,7 +644,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy) return 0; } -static int powernow_cpu_exit(struct cpufreq_policy *policy) +static void powernow_cpu_exit(struct cpufreq_policy *policy) { #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (acpi_processor_perf) { @@ -655,7 +655,6 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy) #endif kfree(powernow_table); - return 0; } static struct cpufreq_driver powernow_driver = { diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index b10f7a1b77f1..a01170f7d01c 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1089,13 +1089,13 @@ err_out: return -ENODEV; } -static int powernowk8_cpu_exit(struct cpufreq_policy *pol) +static void powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); int cpu; if (!data) - return -EINVAL; + return; powernow_k8_cpu_exit_acpi(data); @@ -1104,8 +1104,6 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol) /* pol->cpus will be empty here, use related_cpus instead. */ for_each_cpu(cpu, pol->related_cpus) per_cpu(powernow_data, cpu) = NULL; - - return 0; } static void query_values_on_cpu(void *_err) diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index fddbd1ea1635..50c62929f7ca 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -874,7 +874,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct powernv_smp_call_data freq_data; struct global_pstate_info *gpstates = policy->driver_data; @@ -886,8 +886,6 @@ static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy) del_timer_sync(&gpstates->timer); kfree(policy->driver_data); - - return 0; } static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb, diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index 88afc49941b7..5ee4c7bfdcc5 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -113,10 +113,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) { cbe_cpufreq_pmi_policy_exit(policy); - return 0; } static int cbe_cpufreq_target(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index ec8df5496a0c..370fe6a0104b 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -573,7 +573,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) return qcom_cpufreq_hw_lmh_init(policy, index); } -static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) +static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) { struct device *cpu_dev = get_cpu_device(policy->cpu); struct qcom_cpufreq_data *data = policy->driver_data; @@ -583,8 +583,6 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) qcom_cpufreq_hw_lmh_exit(data); kfree(policy->freq_table); kfree(data); - - return 0; } static void qcom_cpufreq_ready(struct cpufreq_policy *policy) diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index ea05d9d67490..939702dfa73f 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -191,6 +191,7 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev, case QCOM_ID_IPQ5312: case QCOM_ID_IPQ5302: case QCOM_ID_IPQ5300: + case QCOM_ID_IPQ5321: case QCOM_ID_IPQ9514: case QCOM_ID_IPQ9550: case QCOM_ID_IPQ9554: @@ -455,7 +456,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) { struct qcom_cpufreq_drv *drv; struct nvmem_cell *speedbin_nvmem; - struct device_node *np; struct device *cpu_dev; char pvs_name_buffer[] = "speedXX-pvsXX-vXX"; char *pvs_name = pvs_name_buffer; @@ -467,16 +467,15 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) if (!cpu_dev) return -ENODEV; - np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + struct device_node *np __free(device_node) = + dev_pm_opp_of_get_opp_desc_node(cpu_dev); if (!np) return -ENOENT; ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") || of_device_is_compatible(np, "operating-points-v2-krait-cpu"); - if (!ret) { - of_node_put(np); + if (!ret) return -ENOENT; - } drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()), GFP_KERNEL); @@ -502,7 +501,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) } nvmem_cell_put(speedbin_nvmem); } - of_node_put(np); for_each_possible_cpu(cpu) { struct device **virt_devs = NULL; @@ -638,7 +636,7 @@ MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list); */ static int __init qcom_cpufreq_init(void) { - struct device_node *np = of_find_node_by_path("/"); + struct device_node *np __free(device_node) = of_find_node_by_path("/"); const struct of_device_id *match; int ret; @@ -646,7 +644,6 @@ static int __init qcom_cpufreq_init(void) return -ENODEV; match = of_match_node(qcom_cpufreq_match_list, np); - of_node_put(np); if (!match) return -ENODEV; diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 0aecaecbb0e6..3519bf34d397 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -225,7 +225,7 @@ err_np: return -ENODEV; } -static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct cpu_data *data = policy->driver_data; @@ -233,8 +233,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) kfree(data->table); kfree(data); policy->driver_data = NULL; - - return 0; } static int qoriq_cpufreq_target(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 3b4f6bfb2f4c..5892c73e129d 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -63,9 +63,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { struct scmi_data *priv = policy->driver_data; + unsigned long freq = target_freq; - if (!perf_ops->freq_set(ph, priv->domain_id, - target_freq * 1000, true)) + if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true)) return target_freq; return 0; @@ -308,7 +308,7 @@ out_free_priv: return ret; } -static int scmi_cpufreq_exit(struct cpufreq_policy *policy) +static void scmi_cpufreq_exit(struct cpufreq_policy *policy) { struct scmi_data *priv = policy->driver_data; @@ -316,8 +316,6 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) dev_pm_opp_remove_all_dynamic(priv->cpu_dev); free_cpumask_var(priv->opp_shared_cpus); kfree(priv); - - return 0; } static void scmi_cpufreq_register_em(struct cpufreq_policy *policy) diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index d33be56983ed..8d73e6e8be2a 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -167,7 +167,7 @@ out_free_opp: return ret; } -static int scpi_cpufreq_exit(struct cpufreq_policy *policy) +static void scpi_cpufreq_exit(struct cpufreq_policy *policy) { struct scpi_data *priv = policy->driver_data; @@ -175,8 +175,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_remove_all_dynamic(priv->cpu_dev); kfree(priv); - - return 0; } static struct cpufreq_driver scpi_cpufreq_driver = { diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index b8704232c27b..aa74036d0420 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -135,14 +135,12 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); clk_put(cpuclk); - - return 0; } static struct cpufreq_driver sh_cpufreq_driver = { diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index 2783d3d55fce..8a0cd5312a59 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c @@ -296,10 +296,9 @@ static int us2e_freq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) +static void us2e_freq_cpu_exit(struct cpufreq_policy *policy) { us2e_freq_target(policy, 0); - return 0; } static struct cpufreq_driver cpufreq_us2e_driver = { diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index 6c3657679a88..b50f9d13e6d2 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -140,10 +140,9 @@ static int us3_freq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int us3_freq_cpu_exit(struct cpufreq_policy *policy) +static void us3_freq_cpu_exit(struct cpufreq_policy *policy) { us3_freq_target(policy, 0); - return 0; } static struct cpufreq_driver cpufreq_us3_driver = { diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 75b10ecdb60f..3fafedb983b5 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -400,16 +400,12 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) return 0; } -static int centrino_cpu_exit(struct cpufreq_policy *policy) +static void centrino_cpu_exit(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; - if (!per_cpu(centrino_model, cpu)) - return -ENODEV; - - per_cpu(centrino_model, cpu) = NULL; - - return 0; + if (per_cpu(centrino_model, cpu)) + per_cpu(centrino_model, cpu) = NULL; } /** @@ -520,10 +516,10 @@ static struct cpufreq_driver centrino_driver = { * or ASCII model IDs. */ static const struct x86_cpu_id centrino_ids[] = { - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL), - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL), - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL), - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL), + X86_MATCH_VFM_FEATURE(IFM( 6, 9), X86_FEATURE_EST, NULL), + X86_MATCH_VFM_FEATURE(IFM( 6, 13), X86_FEATURE_EST, NULL), + X86_MATCH_VFM_FEATURE(IFM(15, 3), X86_FEATURE_EST, NULL), + X86_MATCH_VFM_FEATURE(IFM(15, 4), X86_FEATURE_EST, NULL), {} }; diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index 9c542e723a15..8e2e703c3865 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -18,7 +18,7 @@ #include <linux/regmap.h> #define VERSION_ELEMENTS 3 -#define MAX_PCODE_NAME_LEN 7 +#define MAX_PCODE_NAME_LEN 16 #define VERSION_SHIFT 28 #define HW_INFO_INDEX 1 @@ -293,6 +293,7 @@ module_init(sti_cpufreq_init); static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = { { .compatible = "st,stih407" }, { .compatible = "st,stih410" }, + { .compatible = "st,stih418" }, { }, }; MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match); diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 0b882765cd66..95ac8d46c156 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -91,6 +91,9 @@ static u32 sun50i_h616_efuse_xlate(u32 speedbin) case 0x5d00: value = 0; break; + case 0x6c00: + value = 5; + break; default: pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n", speedbin & 0xffff); @@ -131,26 +134,24 @@ static const struct of_device_id cpu_opp_match_list[] = { static bool dt_has_supported_hw(void) { bool has_opp_supported_hw = false; - struct device_node *np, *opp; struct device *cpu_dev; cpu_dev = get_cpu_device(0); if (!cpu_dev) return false; - np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + struct device_node *np __free(device_node) = + dev_pm_opp_of_get_opp_desc_node(cpu_dev); if (!np) return false; - for_each_child_of_node(np, opp) { + for_each_child_of_node_scoped(np, opp) { if (of_find_property(opp, "opp-supported-hw", NULL)) { has_opp_supported_hw = true; break; } } - of_node_put(np); - return has_opp_supported_hw; } @@ -165,7 +166,6 @@ static int sun50i_cpufreq_get_efuse(void) const struct sunxi_cpufreq_data *opp_data; struct nvmem_cell *speedbin_nvmem; const struct of_device_id *match; - struct device_node *np; struct device *cpu_dev; u32 *speedbin; int ret; @@ -174,19 +174,18 @@ static int sun50i_cpufreq_get_efuse(void) if (!cpu_dev) return -ENODEV; - np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + struct device_node *np __free(device_node) = + dev_pm_opp_of_get_opp_desc_node(cpu_dev); if (!np) return -ENOENT; match = of_match_node(cpu_opp_match_list, np); - if (!match) { - of_node_put(np); + if (!match) return -ENOENT; - } + opp_data = match->data; speedbin_nvmem = of_nvmem_cell_get(np, NULL); - of_node_put(np); if (IS_ERR(speedbin_nvmem)) return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), "Could not get nvmem cell\n"); @@ -301,14 +300,9 @@ MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list); static const struct of_device_id *sun50i_cpufreq_match_node(void) { - const struct of_device_id *match; - struct device_node *np; - - np = of_find_node_by_path("/"); - match = of_match_node(sun50i_cpufreq_match_list, np); - of_node_put(np); + struct device_node *np __free(device_node) = of_find_node_by_path("/"); - return match; + return of_match_node(sun50i_cpufreq_match_list, np); } /* diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 59865ea455a8..07ea7ed61b68 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -551,14 +551,12 @@ static int tegra194_cpufreq_offline(struct cpufreq_policy *policy) return 0; } -static int tegra194_cpufreq_exit(struct cpufreq_policy *policy) +static void tegra194_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev = get_cpu_device(policy->cpu); dev_pm_opp_remove_all_dynamic(cpu_dev); dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); - - return 0; } static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 714ed53753fa..4d3f27958fbd 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -47,6 +47,35 @@ #define AM625_SUPPORT_S_MPU_OPP BIT(1) #define AM625_SUPPORT_T_MPU_OPP BIT(2) +enum { + AM62A7_EFUSE_M_MPU_OPP = 13, + AM62A7_EFUSE_N_MPU_OPP, + AM62A7_EFUSE_O_MPU_OPP, + AM62A7_EFUSE_P_MPU_OPP, + AM62A7_EFUSE_Q_MPU_OPP, + AM62A7_EFUSE_R_MPU_OPP, + AM62A7_EFUSE_S_MPU_OPP, + /* + * The V, U, and T speed grade numbering is out of order + * to align with the AM625 more uniformly. I promise I know + * my ABCs ;) + */ + AM62A7_EFUSE_V_MPU_OPP, + AM62A7_EFUSE_U_MPU_OPP, + AM62A7_EFUSE_T_MPU_OPP, +}; + +#define AM62A7_SUPPORT_N_MPU_OPP BIT(0) +#define AM62A7_SUPPORT_R_MPU_OPP BIT(1) +#define AM62A7_SUPPORT_V_MPU_OPP BIT(2) + +#define AM62P5_EFUSE_O_MPU_OPP 15 +#define AM62P5_EFUSE_S_MPU_OPP 19 +#define AM62P5_EFUSE_U_MPU_OPP 21 + +#define AM62P5_SUPPORT_O_MPU_OPP BIT(0) +#define AM62P5_SUPPORT_U_MPU_OPP BIT(2) + #define VERSION_COUNT 2 struct ti_cpufreq_data; @@ -112,6 +141,49 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data, return BIT(efuse); } +static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data, + unsigned long efuse) +{ + unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP; + + switch (efuse) { + case AM62P5_EFUSE_U_MPU_OPP: + case AM62P5_EFUSE_S_MPU_OPP: + calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP; + fallthrough; + case AM62P5_EFUSE_O_MPU_OPP: + calculated_efuse |= AM62P5_SUPPORT_O_MPU_OPP; + } + + return calculated_efuse; +} + +static unsigned long am62a7_efuse_xlate(struct ti_cpufreq_data *opp_data, + unsigned long efuse) +{ + unsigned long calculated_efuse = AM62A7_SUPPORT_N_MPU_OPP; + + switch (efuse) { + case AM62A7_EFUSE_V_MPU_OPP: + case AM62A7_EFUSE_U_MPU_OPP: + case AM62A7_EFUSE_T_MPU_OPP: + case AM62A7_EFUSE_S_MPU_OPP: + calculated_efuse |= AM62A7_SUPPORT_V_MPU_OPP; + fallthrough; + case AM62A7_EFUSE_R_MPU_OPP: + case AM62A7_EFUSE_Q_MPU_OPP: + case AM62A7_EFUSE_P_MPU_OPP: + case AM62A7_EFUSE_O_MPU_OPP: + calculated_efuse |= AM62A7_SUPPORT_R_MPU_OPP; + fallthrough; + case AM62A7_EFUSE_N_MPU_OPP: + case AM62A7_EFUSE_M_MPU_OPP: + calculated_efuse |= AM62A7_SUPPORT_N_MPU_OPP; + } + + return calculated_efuse; +} + static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data, unsigned long efuse) { @@ -234,6 +306,24 @@ static struct ti_cpufreq_soc_data am625_soc_data = { .multi_regulator = false, }; +static struct ti_cpufreq_soc_data am62a7_soc_data = { + .efuse_xlate = am62a7_efuse_xlate, + .efuse_offset = 0x0, + .efuse_mask = 0x07c0, + .efuse_shift = 0x6, + .rev_offset = 0x0014, + .multi_regulator = false, +}; + +static struct ti_cpufreq_soc_data am62p5_soc_data = { + .efuse_xlate = am62p5_efuse_xlate, + .efuse_offset = 0x0, + .efuse_mask = 0x07c0, + .efuse_shift = 0x6, + .rev_offset = 0x0014, + .multi_regulator = false, +}; + /** * ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC * @opp_data: pointer to ti_cpufreq_data context @@ -337,8 +427,8 @@ static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, }, { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, }, { .compatible = "ti,am625", .data = &am625_soc_data, }, - { .compatible = "ti,am62a7", .data = &am625_soc_data, }, - { .compatible = "ti,am62p5", .data = &am625_soc_data, }, + { .compatible = "ti,am62a7", .data = &am62a7_soc_data, }, + { .compatible = "ti,am62p5", .data = &am62p5_soc_data, }, /* legacy */ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, }, { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, }, @@ -417,7 +507,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev) ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config); if (ret < 0) { - dev_err(opp_data->cpu_dev, "Failed to set OPP config\n"); + dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n"); goto fail_put_node; } diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 9ac4ea50b874..3fadf536c429 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -447,7 +447,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) return 0; } -static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) +static void ve_spc_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; @@ -455,11 +455,10 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) if (!cpu_dev) { pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu); - return -ENODEV; + return; } put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus); - return 0; } static struct cpufreq_driver ve_spc_cpufreq_driver = { diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c index d8515d5c0853..bcd03e893a0a 100644 --- a/drivers/cpuidle/cpuidle-haltpoll.c +++ b/drivers/cpuidle/cpuidle-haltpoll.c @@ -141,5 +141,6 @@ static void __exit haltpoll_exit(void) module_init(haltpoll_init); module_exit(haltpoll_exit); +MODULE_DESCRIPTION("cpuidle driver for haltpoll governor"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marcelo Tosatti <mtosatti@redhat.com>"); diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index b96e3da0fedd..f3c9d49f0f2a 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -14,8 +14,6 @@ #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/tick.h> -#include <linux/sched.h> -#include <linux/sched/loadavg.h> #include <linux/sched/stat.h> #include <linux/math64.h> @@ -95,16 +93,11 @@ * state, and thus the less likely a busy CPU will hit such a deep * C state. * - * Two factors are used in determing this multiplier: - * a value of 10 is added for each point of "per cpu load average" we have. - * a value of 5 points is added for each process that is waiting for - * IO on this CPU. - * (these values are experimentally determined) - * - * The load average factor gives a longer term (few seconds) input to the - * decision, while the iowait value gives a cpu local instantanious input. - * The iowait factor may look low, but realize that this is also already - * represented in the system load average. + * Currently there is only one value determining the factor: + * 10 points are added for each process that is waiting for IO on this CPU. + * (This value was experimentally determined.) + * Utilization is no longer a factor as it was shown that it never contributed + * significantly to the performance multiplier in the first place. * */ diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index 7244f71c59c5..f2992f92d8db 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -2,13 +2,8 @@ /* * Timer events oriented CPU idle governor * - * TEO governor: * Copyright (C) 2018 - 2021 Intel Corporation * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * Util-awareness mechanism: - * Copyright (C) 2022 Arm Ltd. - * Author: Kajetan Puchalski <kajetan.puchalski@arm.com> */ /** @@ -59,16 +54,12 @@ * shallower than the one whose bin is fallen into by the sleep length (these * situations are referred to as "intercepts" below). * - * In addition to the metrics described above, the governor counts recent - * intercepts (that is, intercepts that have occurred during the last - * %NR_RECENT invocations of it for the given CPU) for each bin. - * * In order to select an idle state for a CPU, the governor takes the following * steps (modulo the possible latency constraint that must be taken into account * too): * * 1. Find the deepest CPU idle state whose target residency does not exceed - * the current sleep length (the candidate idle state) and compute 3 sums as + * the current sleep length (the candidate idle state) and compute 2 sums as * follows: * * - The sum of the "hits" and "intercepts" metrics for the candidate state @@ -81,20 +72,15 @@ * idle long enough to avoid being intercepted if the sleep length had been * equal to the current one). * - * - The sum of the numbers of recent intercepts for all of the idle states - * shallower than the candidate one. - * - * 2. If the second sum is greater than the first one or the third sum is - * greater than %NR_RECENT / 2, the CPU is likely to wake up early, so look - * for an alternative idle state to select. + * 2. If the second sum is greater than the first one the CPU is likely to wake + * up early, so look for an alternative idle state to select. * * - Traverse the idle states shallower than the candidate one in the * descending order. * - * - For each of them compute the sum of the "intercepts" metrics and the sum - * of the numbers of recent intercepts over all of the idle states between - * it and the candidate one (including the former and excluding the - * latter). + * - For each of them compute the sum of the "intercepts" metrics over all + * of the idle states between it and the candidate one (including the + * former and excluding the latter). * * - If each of these sums that needs to be taken into account (because the * check related to it has indicated that the CPU is likely to wake up @@ -104,79 +90,31 @@ * select the given idle state instead of the candidate one. * * 3. By default, select the candidate state. - * - * Util-awareness mechanism: - * - * The idea behind the util-awareness extension is that there are two distinct - * scenarios for the CPU which should result in two different approaches to idle - * state selection - utilized and not utilized. - * - * In this case, 'utilized' means that the average runqueue util of the CPU is - * above a certain threshold. - * - * When the CPU is utilized while going into idle, more likely than not it will - * be woken up to do more work soon and so a shallower idle state should be - * selected to minimise latency and maximise performance. When the CPU is not - * being utilized, the usual metrics-based approach to selecting the deepest - * available idle state should be preferred to take advantage of the power - * saving. - * - * In order to achieve this, the governor uses a utilization threshold. - * The threshold is computed per-CPU as a percentage of the CPU's capacity - * by bit shifting the capacity value. Based on testing, the shift of 6 (~1.56%) - * seems to be getting the best results. - * - * Before selecting the next idle state, the governor compares the current CPU - * util to the precomputed util threshold. If it's below, it defaults to the - * TEO metrics mechanism. If it's above, the closest shallower idle state will - * be selected instead, as long as is not a polling state. */ #include <linux/cpuidle.h> #include <linux/jiffies.h> #include <linux/kernel.h> -#include <linux/sched.h> #include <linux/sched/clock.h> -#include <linux/sched/topology.h> #include <linux/tick.h> #include "gov.h" /* - * The number of bits to shift the CPU's capacity by in order to determine - * the utilized threshold. - * - * 6 was chosen based on testing as the number that achieved the best balance - * of power and performance on average. - * - * The resulting threshold is high enough to not be triggered by background - * noise and low enough to react quickly when activity starts to ramp up. - */ -#define UTIL_THRESHOLD_SHIFT 6 - -/* * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value * is used for decreasing metrics on a regular basis. */ #define PULSE 1024 #define DECAY_SHIFT 3 -/* - * Number of the most recent idle duration values to take into consideration for - * the detection of recent early wakeup patterns. - */ -#define NR_RECENT 9 - /** * struct teo_bin - Metrics used by the TEO cpuidle governor. * @intercepts: The "intercepts" metric. * @hits: The "hits" metric. - * @recent: The number of recent "intercepts". */ struct teo_bin { unsigned int intercepts; unsigned int hits; - unsigned int recent; }; /** @@ -185,42 +123,19 @@ struct teo_bin { * @sleep_length_ns: Time till the closest timer event (at the selection time). * @state_bins: Idle state data bins for this CPU. * @total: Grand total of the "intercepts" and "hits" metrics for all bins. - * @next_recent_idx: Index of the next @recent_idx entry to update. - * @recent_idx: Indices of bins corresponding to recent "intercepts". * @tick_hits: Number of "hits" after TICK_NSEC. - * @util_threshold: Threshold above which the CPU is considered utilized */ struct teo_cpu { s64 time_span_ns; s64 sleep_length_ns; struct teo_bin state_bins[CPUIDLE_STATE_MAX]; unsigned int total; - int next_recent_idx; - int recent_idx[NR_RECENT]; unsigned int tick_hits; - unsigned long util_threshold; }; static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); /** - * teo_cpu_is_utilized - Check if the CPU's util is above the threshold - * @cpu: Target CPU - * @cpu_data: Governor CPU data for the target CPU - */ -#ifdef CONFIG_SMP -static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data) -{ - return sched_cpu_util(cpu) > cpu_data->util_threshold; -} -#else -static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data) -{ - return false; -} -#endif - -/** * teo_update - Update CPU metrics after wakeup. * @drv: cpuidle driver containing state data. * @dev: Target CPU. @@ -286,13 +201,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) } } - i = cpu_data->next_recent_idx++; - if (cpu_data->next_recent_idx >= NR_RECENT) - cpu_data->next_recent_idx = 0; - - if (cpu_data->recent_idx[i] >= 0) - cpu_data->state_bins[cpu_data->recent_idx[i]].recent--; - /* * If the deepest state's target residency is below the tick length, * make a record of it to help teo_select() decide whether or not @@ -319,14 +227,10 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * Otherwise, update the "intercepts" metric for the bin fallen into by * the measured idle duration. */ - if (idx_timer == idx_duration) { + if (idx_timer == idx_duration) cpu_data->state_bins[idx_timer].hits += PULSE; - cpu_data->recent_idx[i] = -1; - } else { + else cpu_data->state_bins[idx_duration].intercepts += PULSE; - cpu_data->state_bins[idx_duration].recent++; - cpu_data->recent_idx[i] = idx_duration; - } end: cpu_data->total += PULSE; @@ -379,14 +283,11 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, unsigned int tick_intercept_sum = 0; unsigned int idx_intercept_sum = 0; unsigned int intercept_sum = 0; - unsigned int idx_recent_sum = 0; - unsigned int recent_sum = 0; unsigned int idx_hit_sum = 0; unsigned int hit_sum = 0; int constraint_idx = 0; int idx0 = 0, idx = -1; - bool alt_intercepts, alt_recent; - bool cpu_utilized; + int prev_intercept_idx; s64 duration_ns; int i; @@ -411,32 +312,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (!dev->states_usage[0].disable) idx = 0; - cpu_utilized = teo_cpu_is_utilized(dev->cpu, cpu_data); - /* - * If the CPU is being utilized over the threshold and there are only 2 - * states to choose from, the metrics need not be considered, so choose - * the shallowest non-polling state and exit. - */ - if (drv->state_count < 3 && cpu_utilized) { - /* - * If state 0 is enabled and it is not a polling one, select it - * right away unless the scheduler tick has been stopped, in - * which case care needs to be taken to leave the CPU in a deep - * enough state in case it is not woken up any time soon after - * all. If state 1 is disabled, though, state 0 must be used - * anyway. - */ - if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING) && - teo_state_ok(0, drv)) || dev->states_usage[1].disable) { - idx = 0; - goto out_tick; - } - /* Assume that state 1 is not a polling one and use it. */ - idx = 1; - duration_ns = drv->states[1].target_residency_ns; - goto end; - } - /* Compute the sums of metrics for early wakeup pattern detection. */ for (i = 1; i < drv->state_count; i++) { struct teo_bin *prev_bin = &cpu_data->state_bins[i-1]; @@ -448,7 +323,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ intercept_sum += prev_bin->intercepts; hit_sum += prev_bin->hits; - recent_sum += prev_bin->recent; if (dev->states_usage[i].disable) continue; @@ -464,7 +338,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, /* Save the sums for the current state. */ idx_intercept_sum = intercept_sum; idx_hit_sum = hit_sum; - idx_recent_sum = recent_sum; } /* Avoid unnecessary overhead. */ @@ -489,37 +362,29 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * If the sum of the intercepts metric for all of the idle states * shallower than the current candidate one (idx) is greater than the * sum of the intercepts and hits metrics for the candidate state and - * all of the deeper states, or the sum of the numbers of recent - * intercepts over all of the states shallower than the candidate one - * is greater than a half of the number of recent events taken into - * account, a shallower idle state is likely to be a better choice. + * all of the deeper states a shallower idle state is likely to be a + * better choice. */ - alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum; - alt_recent = idx_recent_sum > NR_RECENT / 2; - if (alt_recent || alt_intercepts) { + prev_intercept_idx = idx; + if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) { int first_suitable_idx = idx; /* * Look for the deepest idle state whose target residency had * not exceeded the idle duration in over a half of the relevant - * cases (both with respect to intercepts overall and with - * respect to the recent intercepts only) in the past. + * cases in the past. * * Take the possible duration limitation present if the tick * has been stopped already into account. */ intercept_sum = 0; - recent_sum = 0; for (i = idx - 1; i >= 0; i--) { struct teo_bin *bin = &cpu_data->state_bins[i]; intercept_sum += bin->intercepts; - recent_sum += bin->recent; - if ((!alt_recent || 2 * recent_sum > idx_recent_sum) && - (!alt_intercepts || - 2 * intercept_sum > idx_intercept_sum)) { + if (2 * intercept_sum > idx_intercept_sum) { /* * Use the current state unless it is too * shallow or disabled, in which case take the @@ -552,6 +417,15 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, first_suitable_idx = i; } } + if (!idx && prev_intercept_idx) { + /* + * We have to query the sleep length here otherwise we don't + * know after wakeup if our guess was correct. + */ + duration_ns = tick_nohz_get_sleep_length(&delta_tick); + cpu_data->sleep_length_ns = duration_ns; + goto out_tick; + } /* * If there is a latency constraint, it may be necessary to select an @@ -561,18 +435,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, idx = constraint_idx; /* - * If the CPU is being utilized over the threshold, choose a shallower - * non-polling state to improve latency, unless the scheduler tick has - * been stopped already and the shallower state's target residency is - * not sufficiently large. - */ - if (cpu_utilized) { - i = teo_find_shallower_state(drv, dev, idx, KTIME_MAX, true); - if (teo_state_ok(i, drv)) - idx = i; - } - - /* * Skip the timers check if state 0 is the current candidate one, * because an immediate non-timer wakeup is expected in that case. */ @@ -592,7 +454,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, cpu_data->sleep_length_ns = duration_ns; /* - * If the closest expected timer is before the terget residency of the + * If the closest expected timer is before the target residency of the * candidate state, a shallower one needs to be found. */ if (drv->states[idx].target_residency_ns > duration_ns) { @@ -667,14 +529,8 @@ static int teo_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); - unsigned long max_capacity = arch_scale_cpu_capacity(dev->cpu); - int i; memset(cpu_data, 0, sizeof(*cpu_data)); - cpu_data->util_threshold = max_capacity >> UTIL_THRESHOLD_SHIFT; - - for (i = 0; i < NR_RECENT; i++) - cpu_data->recent_idx[i] = -1; return 0; } diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 784843fa2a22..3df10517a327 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -52,6 +52,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port) struct cxl_dport *dport = NULL; int single_port_map[1]; unsigned long index; + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + + /* + * Capability checks are moot for passthrough decoders, support + * any and all possibilities. + */ + cxlhdm->interleave_mask = ~0U; + cxlhdm->iw_cap_mask = ~0UL; cxlsd = cxl_switch_decoder_alloc(port, 1); if (IS_ERR(cxlsd)) @@ -79,6 +87,11 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) cxlhdm->interleave_mask |= GENMASK(11, 8); if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap)) cxlhdm->interleave_mask |= GENMASK(14, 12); + cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8); + if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap)) + cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12); + if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap)) + cxlhdm->iw_cap_mask |= BIT(16); } static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index e69625a8d6a1..c00f3a933164 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -62,10 +62,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data) return is_cxl_nvdimm_bridge(dev); } -struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd) +/** + * cxl_find_nvdimm_bridge() - find a bridge device relative to a port + * @port: any descendant port of an nvdimm-bridge associated + * root-cxl-port + */ +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port) { - struct cxl_root *cxl_root __free(put_cxl_root) = - find_cxl_root(cxlmd->endpoint); + struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port); struct device *dev; if (!cxl_root) @@ -242,18 +246,20 @@ static void cxlmd_release_nvdimm(void *_cxlmd) /** * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm + * @parent_port: parent port for the (to be added) @cxlmd endpoint port * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations * * Return: 0 on success negative error code on failure. */ -int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd) +int devm_cxl_add_nvdimm(struct cxl_port *parent_port, + struct cxl_memdev *cxlmd) { struct cxl_nvdimm_bridge *cxl_nvb; struct cxl_nvdimm *cxl_nvd; struct device *dev; int rc; - cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); + cxl_nvb = cxl_find_nvdimm_bridge(parent_port); if (!cxl_nvb) return -ENODEV; diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 3c2b6144be23..538ebd5a64fd 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1101,6 +1101,26 @@ static int cxl_port_attach_region(struct cxl_port *port, } cxld = cxl_rr->decoder; + /* + * the number of targets should not exceed the target_count + * of the decoder + */ + if (is_switch_decoder(&cxld->dev)) { + struct cxl_switch_decoder *cxlsd; + + cxlsd = to_cxl_switch_decoder(&cxld->dev); + if (cxl_rr->nr_targets > cxlsd->nr_targets) { + dev_dbg(&cxlr->dev, + "%s:%s %s add: %s:%s @ %d overflows targets: %d\n", + dev_name(port->uport_dev), dev_name(&port->dev), + dev_name(&cxld->dev), dev_name(&cxlmd->dev), + dev_name(&cxled->cxld.dev), pos, + cxlsd->nr_targets); + rc = -ENXIO; + goto out_erase; + } + } + rc = cxl_rr_ep_add(cxl_rr, cxled); if (rc) { dev_dbg(&cxlr->dev, @@ -1210,6 +1230,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled, return 0; } +static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig) +{ + struct cxl_port *port = to_cxl_port(cxld->dev.parent); + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + unsigned int interleave_mask; + u8 eiw; + u16 eig; + int high_pos, low_pos; + + if (!test_bit(iw, &cxlhdm->iw_cap_mask)) + return -ENXIO; + /* + * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection), + * if eiw < 8: + * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw] + * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0] + * + * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the + * interleave bits are none. + * + * if eiw >= 8: + * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3 + * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0] + * + * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the + * interleave bits are none. + */ + ways_to_eiw(iw, &eiw); + if (eiw == 0 || eiw == 8) + return 0; + + granularity_to_eig(ig, &eig); + if (eiw > 8) + high_pos = eiw + eig - 1; + else + high_pos = eiw + eig + 7; + low_pos = eig + 8; + interleave_mask = GENMASK(high_pos, low_pos); + if (interleave_mask & ~cxlhdm->interleave_mask) + return -ENXIO; + + return 0; +} + static int cxl_port_setup_targets(struct cxl_port *port, struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled) @@ -1360,6 +1424,15 @@ static int cxl_port_setup_targets(struct cxl_port *port, return -ENXIO; } } else { + rc = check_interleave_cap(cxld, iw, ig); + if (rc) { + dev_dbg(&cxlr->dev, + "%s:%s iw: %d ig: %d is not supported\n", + dev_name(port->uport_dev), + dev_name(&port->dev), iw, ig); + return rc; + } + cxld->interleave_ways = iw; cxld->interleave_granularity = ig; cxld->hpa_range = (struct range) { @@ -1796,6 +1869,15 @@ static int cxl_region_attach(struct cxl_region *cxlr, struct cxl_dport *dport; int rc = -ENXIO; + rc = check_interleave_cap(&cxled->cxld, p->interleave_ways, + p->interleave_granularity); + if (rc) { + dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n", + dev_name(&cxled->cxld.dev), p->interleave_ways, + p->interleave_granularity); + return rc; + } + if (cxled->mode != cxlr->mode) { dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); @@ -2688,22 +2770,33 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg) { struct cxl_dpa_to_region_context *ctx = arg; struct cxl_endpoint_decoder *cxled; + struct cxl_region *cxlr; u64 dpa = ctx->dpa; if (!is_endpoint_decoder(dev)) return 0; cxled = to_cxl_endpoint_decoder(dev); - if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) + if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res)) return 0; if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) return 0; - dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, - dev_name(&cxled->cxld.region->dev)); + /* + * Stop the region search (return 1) when an endpoint mapping is + * found. The region may not be fully constructed so offering + * the cxlr in the context structure is not guaranteed. + */ + cxlr = cxled->cxld.region; + if (cxlr) + dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, + dev_name(&cxlr->dev)); + else + dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa, + dev_name(dev)); - ctx->cxlr = cxled->cxld.region; + ctx->cxlr = cxlr; return 1; } @@ -2847,7 +2940,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr) * bridge for one device is the same for all. */ if (i == 0) { - cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); + cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint); if (!cxl_nvb) return -ENODEV; cxlr->cxl_nvb = cxl_nvb; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 603c0120cff8..a6613a6f8923 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -47,6 +47,8 @@ extern const struct nvdimm_security_ops *cxl_security_ops; #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4) #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8) #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9) +#define CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11) +#define CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12) #define CXL_HDM_DECODER_CTRL_OFFSET 0x4 #define CXL_HDM_DECODER_ENABLE BIT(1) #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10) @@ -855,8 +857,8 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm_bridge(struct device *dev); -int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd); -struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd); +int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd); +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port); #ifdef CONFIG_CXL_REGION bool is_cxl_pmem_region(struct device *dev); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 19aba81cdf13..af8169ccdbc0 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -395,9 +395,9 @@ enum cxl_devtype { /** * struct cxl_dpa_perf - DPA performance property entry - * @dpa_range - range for DPA address - * @coord - QoS performance data (i.e. latency, bandwidth) - * @qos_class - QoS Class cookies + * @dpa_range: range for DPA address + * @coord: QoS performance data (i.e. latency, bandwidth) + * @qos_class: QoS Class cookies */ struct cxl_dpa_perf { struct range dpa_range; @@ -464,13 +464,14 @@ struct cxl_dev_state { * @active_persistent_bytes: sum of hard + soft persistent * @next_volatile_bytes: volatile capacity change pending device reset * @next_persistent_bytes: persistent capacity change pending device reset + * @ram_perf: performance data entry matched to RAM partition + * @pmem_perf: performance data entry matched to PMEM partition * @event: event log driver state * @poison: poison driver state info * @security: security driver state info * @fw: firmware upload / activation state + * @mbox_wait: RCU wait for mbox send completely * @mbox_send: @dev specific transport for transmitting mailbox commands - * @ram_perf: performance data entry matched to RAM partition - * @pmem_perf: performance data entry matched to PMEM partition * * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for * details on capacity parameters. @@ -851,11 +852,21 @@ static inline void cxl_mem_active_dec(void) int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd); +/** + * struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities + * @regs: mapped registers, see devm_cxl_setup_hdm() + * @decoder_count: number of decoders for this port + * @target_count: for switch decoders, max downstream port targets + * @interleave_mask: interleave granularity capability, see check_interleave_cap() + * @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap() + * @port: mapped cxl_port, see devm_cxl_setup_hdm() + */ struct cxl_hdm { struct cxl_component_regs regs; unsigned int decoder_count; unsigned int target_count; unsigned int interleave_mask; + unsigned long iw_cap_mask; struct cxl_port *port; }; diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 0c79d9ce877c..2f1b49bfe162 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -152,6 +152,15 @@ static int cxl_mem_probe(struct device *dev) return -ENXIO; } + if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) { + rc = devm_cxl_add_nvdimm(parent_port, cxlmd); + if (rc) { + if (rc == -ENODEV) + dev_info(dev, "PMEM disabled by platform\n"); + return rc; + } + } + if (dport->rch) endpoint_parent = parent_port->uport_dev; else @@ -174,14 +183,6 @@ unlock: if (rc) return rc; - if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) { - rc = devm_cxl_add_nvdimm(cxlmd); - if (rc == -ENODEV) - dev_info(dev, "PMEM disabled by platform\n"); - else - return rc; - } - /* * The kernel may be operating out of CXL memory on this device, * there is no spec defined way to determine whether this device diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 9c09893695b7..4edfb83ffbee 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -54,11 +54,13 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o -skx_edac-y := skx_common.o skx_base.o -obj-$(CONFIG_EDAC_SKX) += skx_edac.o +skx_edac_common-y := skx_common.o -i10nm_edac-y := skx_common.o i10nm_base.o -obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o +skx_edac-y := skx_base.o +obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o + +i10nm_edac-y := i10nm_base.o +obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index a17f3c0cdfa6..ddfbdb66b794 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -20,7 +20,6 @@ static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg) return reg; switch (reg) { - case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5; case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5; case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5; } @@ -1341,22 +1340,15 @@ static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) static void umc_dump_misc_regs(struct amd64_pvt *pvt) { struct amd64_umc *umc; - u32 i, tmp, umc_base; + u32 i; for_each_umc(i) { - umc_base = get_umc_base(i); umc = &pvt->umc[i]; edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg); edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl); edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl); - - amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp); - edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp); - - amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp); - edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp); edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi); edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n", @@ -1369,14 +1361,6 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt) edac_dbg(1, "UMC%d x16 DIMMs present: %s\n", i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no"); - if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) { - amd_smn_read(pvt->mc_node_id, - umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG), - &tmp); - edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n", - i, 1 << ((tmp >> 4) & 0x3)); - } - umc_debug_display_dimm_sizes(pvt, i); } } @@ -1454,6 +1438,7 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) u32 *base, *base_sec; u32 *mask, *mask_sec; int cs, umc; + u32 tmp; for_each_umc(umc) { umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; @@ -1466,13 +1451,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) base_reg = umc_base_reg + (cs * 4); base_reg_sec = umc_base_reg_sec + (cs * 4); - if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) + if (!amd_smn_read(pvt->mc_node_id, base_reg, &tmp)) { + *base = tmp; edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n", umc, cs, *base, base_reg); + } - if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec)) + if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, &tmp)) { + *base_sec = tmp; edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n", umc, cs, *base_sec, base_reg_sec); + } } umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; @@ -1485,13 +1474,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) mask_reg = umc_mask_reg + (cs * 4); mask_reg_sec = umc_mask_reg_sec + (cs * 4); - if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) + if (!amd_smn_read(pvt->mc_node_id, mask_reg, &tmp)) { + *mask = tmp; edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n", umc, cs, *mask, mask_reg); + } - if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec)) + if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, &tmp)) { + *mask_sec = tmp; edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n", umc, cs, *mask_sec, mask_reg_sec); + } } } } @@ -2910,7 +2903,7 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) { u8 nid = pvt->mc_node_id; struct amd64_umc *umc; - u32 i, umc_base; + u32 i, tmp, umc_base; /* Read registers from each UMC */ for_each_umc(i) { @@ -2918,11 +2911,20 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) umc_base = get_umc_base(i); umc = &pvt->umc[i]; - amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg); - amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); - amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); - amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); - amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi); + if (!amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &tmp)) + umc->dimm_cfg = tmp; + + if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp)) + umc->umc_cfg = tmp; + + if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp)) + umc->sdp_ctrl = tmp; + + if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp)) + umc->ecc_ctrl = tmp; + + if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &tmp)) + umc->umc_cap_hi = tmp; } } @@ -3651,16 +3653,21 @@ static void gpu_read_mc_regs(struct amd64_pvt *pvt) { u8 nid = pvt->mc_node_id; struct amd64_umc *umc; - u32 i, umc_base; + u32 i, tmp, umc_base; /* Read registers from each UMC */ for_each_umc(i) { umc_base = gpu_get_umc_base(pvt, i, 0); umc = &pvt->umc[i]; - amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); - amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); - amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); + if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp)) + umc->umc_cfg = tmp; + + if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp)) + umc->sdp_ctrl = tmp; + + if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp)) + umc->ecc_ctrl = tmp; } } diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index b879b12971e7..17228d07de4c 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -256,15 +256,11 @@ #define UMCCH_ADDR_MASK 0x20 #define UMCCH_ADDR_MASK_SEC 0x28 #define UMCCH_ADDR_MASK_SEC_DDR5 0x30 -#define UMCCH_ADDR_CFG 0x30 -#define UMCCH_ADDR_CFG_DDR5 0x40 #define UMCCH_DIMM_CFG 0x80 #define UMCCH_DIMM_CFG_DDR5 0x90 #define UMCCH_UMC_CFG 0x100 #define UMCCH_SDP_CTRL 0x104 #define UMCCH_ECC_CTRL 0x14C -#define UMCCH_ECC_BAD_SYMBOL 0xD90 -#define UMCCH_UMC_CAP 0xDF0 #define UMCCH_UMC_CAP_HI 0xDF4 /* UMC CH bitfields */ diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c index 4e30b989a1a4..5e52d31db3b8 100644 --- a/drivers/edac/dmc520_edac.c +++ b/drivers/edac/dmc520_edac.c @@ -480,7 +480,6 @@ static int dmc520_edac_probe(struct platform_device *pdev) struct mem_ctl_info *mci; void __iomem *reg_base; u32 irq_mask_all = 0; - struct resource *res; struct device *dev; int ret, idx, irq; u32 reg_val; @@ -505,8 +504,7 @@ static int dmc520_edac_probe(struct platform_device *pdev) } /* Initialize dmc520 edac */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg_base = devm_ioremap_resource(dev, res); + reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg_base)) return PTR_ERR(reg_base); diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index cf2b618c1ada..1eb0136c6fbd 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -547,7 +547,7 @@ static int __init ghes_edac_init(void) return -ENODEV; if (list_empty(ghes_devs)) { - pr_info("GHES probing device list is empty"); + pr_info("GHES probing device list is empty\n"); return -ENODEV; } diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 3fd22a1eb1a9..24dd896d9a9d 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -942,16 +942,16 @@ static struct res_config gnr_cfg = { }; static const struct x86_cpu_id i10nm_cpuids[] = { - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), + X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0), + X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1), + X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0), + X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1), + X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1), + X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg), + X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg), + X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), + X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), + X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), {} }; MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids); diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index dbe9fe5f2ca6..0fe75eed8973 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -258,6 +258,11 @@ static struct work_struct ecclog_work; #define DID_MTL_P_SKU2 0x7d02 #define DID_MTL_P_SKU3 0x7d14 +/* Compute die IDs for Arrow Lake-UH with IBECC */ +#define DID_ARL_UH_SKU1 0x7d06 +#define DID_ARL_UH_SKU2 0x7d20 +#define DID_ARL_UH_SKU3 0x7d30 + static int get_mchbar(struct pci_dev *pdev, u64 *mchbar) { union { @@ -597,6 +602,9 @@ static const struct pci_device_id igen6_pci_tbl[] = { { PCI_VDEVICE(INTEL, DID_MTL_P_SKU1), (kernel_ulong_t)&mtl_p_cfg }, { PCI_VDEVICE(INTEL, DID_MTL_P_SKU2), (kernel_ulong_t)&mtl_p_cfg }, { PCI_VDEVICE(INTEL, DID_MTL_P_SKU3), (kernel_ulong_t)&mtl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU1), (kernel_ulong_t)&mtl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU2), (kernel_ulong_t)&mtl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_ARL_UH_SKU3), (kernel_ulong_t)&mtl_p_cfg }, { }, }; MODULE_DEVICE_TABLE(pci, igen6_pci_tbl); diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c index d2f895033280..0d42c1238908 100644 --- a/drivers/edac/layerscape_edac.c +++ b/drivers/edac/layerscape_edac.c @@ -69,6 +69,7 @@ static void __exit fsl_ddr_mc_exit(void) module_exit(fsl_ddr_mc_exit); +MODULE_DESCRIPTION("Freescale Layerscape EDAC driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("NXP Semiconductor"); module_param(edac_op_state, int, 0444); diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index e8945d4adbad..d0266cbcbeda 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -704,6 +704,7 @@ static void __exit mpc85xx_mc_exit(void) module_exit(mpc85xx_mc_exit); +MODULE_DESCRIPTION("Freescale MPC85xx Memory Controller EDAC driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Montavista Software, Inc."); module_param(edac_op_state, int, 0444); diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c index 919095d10528..2adb9c8093f8 100644 --- a/drivers/edac/octeon_edac-l2c.c +++ b/drivers/edac/octeon_edac-l2c.c @@ -201,5 +201,6 @@ static struct platform_driver octeon_l2c_driver = { }; module_platform_driver(octeon_l2c_driver); +MODULE_DESCRIPTION("Cavium Octeon Secondary Caches (L2C) EDAC driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c index 18615cbcd9ea..4112c2ee34b8 100644 --- a/drivers/edac/octeon_edac-lmc.c +++ b/drivers/edac/octeon_edac-lmc.c @@ -319,5 +319,6 @@ static struct platform_driver octeon_lmc_edac_driver = { }; module_platform_driver(octeon_lmc_edac_driver); +MODULE_DESCRIPTION("Cavium Octeon DRAM Memory Controller (LMC) EDAC driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c index b8404cc7b65f..d9eeb40d2784 100644 --- a/drivers/edac/octeon_edac-pc.c +++ b/drivers/edac/octeon_edac-pc.c @@ -137,5 +137,6 @@ static struct platform_driver co_cache_error_driver = { }; module_platform_driver(co_cache_error_driver); +MODULE_DESCRIPTION("Cavium Octeon Primary Caches EDAC driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c index 108ad9493cfb..4d368af2c5f0 100644 --- a/drivers/edac/octeon_edac-pci.c +++ b/drivers/edac/octeon_edac-pci.c @@ -104,5 +104,6 @@ static struct platform_driver octeon_pci_driver = { }; module_platform_driver(octeon_pci_driver); +MODULE_DESCRIPTION("Cavium Octeon PCI Controller EDAC driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index 2afcd148fcf8..f93f2f2b1cf2 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1511,8 +1511,8 @@ static struct dunit_ops dnv_ops = { }; static const struct x86_cpu_id pnd2_cpuids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &apl_ops), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &dnv_ops), { } }; MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 26cca5a9322d..cbc92d3683e6 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -3546,13 +3546,13 @@ fail0: } static const struct x86_cpu_id sbridge_cpuids[] = { - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table), - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &pci_dev_descr_ibridge_table), - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &pci_dev_descr_haswell_table), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &pci_dev_descr_broadwell_table), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &pci_dev_descr_broadwell_table), - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &pci_dev_descr_knl_table), - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &pci_dev_descr_knl_table), + X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &pci_dev_descr_sbridge_table), + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &pci_dev_descr_ibridge_table), + X86_MATCH_VFM(INTEL_HASWELL_X, &pci_dev_descr_haswell_table), + X86_MATCH_VFM(INTEL_BROADWELL_X, &pci_dev_descr_broadwell_table), + X86_MATCH_VFM(INTEL_BROADWELL_D, &pci_dev_descr_broadwell_table), + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &pci_dev_descr_knl_table), + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &pci_dev_descr_knl_table), { } }; MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids); diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c index 0a862336a7ce..af3fa807acdb 100644 --- a/drivers/edac/skx_base.c +++ b/drivers/edac/skx_base.c @@ -164,7 +164,7 @@ static struct res_config skx_cfg = { }; static const struct x86_cpu_id skx_cpuids[] = { - X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg), + X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg), { } }; MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 27996b7924c8..8d18099fd528 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -48,7 +48,7 @@ static u64 skx_tolm, skx_tohm; static LIST_HEAD(dev_edac_list); static bool skx_mem_cfg_2lm; -int __init skx_adxl_get(void) +int skx_adxl_get(void) { const char * const *names; int i, j; @@ -110,12 +110,14 @@ err: return -ENODEV; } +EXPORT_SYMBOL_GPL(skx_adxl_get); -void __exit skx_adxl_put(void) +void skx_adxl_put(void) { kfree(adxl_values); kfree(adxl_msg); } +EXPORT_SYMBOL_GPL(skx_adxl_put); static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem) { @@ -187,12 +189,14 @@ void skx_set_mem_cfg(bool mem_cfg_2lm) { skx_mem_cfg_2lm = mem_cfg_2lm; } +EXPORT_SYMBOL_GPL(skx_set_mem_cfg); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log) { driver_decode = decode; skx_show_retry_rd_err_log = show_retry_log; } +EXPORT_SYMBOL_GPL(skx_set_decode); int skx_get_src_id(struct skx_dev *d, int off, u8 *id) { @@ -206,6 +210,7 @@ int skx_get_src_id(struct skx_dev *d, int off, u8 *id) *id = GET_BITFIELD(reg, 12, 14); return 0; } +EXPORT_SYMBOL_GPL(skx_get_src_id); int skx_get_node_id(struct skx_dev *d, u8 *id) { @@ -219,6 +224,7 @@ int skx_get_node_id(struct skx_dev *d, u8 *id) *id = GET_BITFIELD(reg, 0, 2); return 0; } +EXPORT_SYMBOL_GPL(skx_get_node_id); static int get_width(u32 mtr) { @@ -284,6 +290,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list) *list = &dev_edac_list; return ndev; } +EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings); int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) { @@ -323,6 +330,7 @@ fail: pci_dev_put(pdev); return -ENODEV; } +EXPORT_SYMBOL_GPL(skx_get_hi_lo); static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, int maxval, const char *name) @@ -394,6 +402,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, return 1; } +EXPORT_SYMBOL_GPL(skx_get_dimm_info); int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, int chan, int dimmno, const char *mod_str) @@ -442,6 +451,7 @@ unknown_size: return (size == 0 || size == ~0ull) ? 0 : 1; } +EXPORT_SYMBOL_GPL(skx_get_nvdimm_info); int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev, const char *ctl_name, const char *mod_str, @@ -512,6 +522,7 @@ fail0: imc->mci = NULL; return rc; } +EXPORT_SYMBOL_GPL(skx_register_mci); static void skx_unregister_mci(struct skx_imc *imc) { @@ -688,6 +699,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, mce->kflags |= MCE_HANDLED_EDAC; return NOTIFY_DONE; } +EXPORT_SYMBOL_GPL(skx_mce_check_error); void skx_remove(void) { @@ -725,3 +737,8 @@ void skx_remove(void) kfree(d); } } +EXPORT_SYMBOL_GPL(skx_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Tony Luck"); +MODULE_DESCRIPTION("MC Driver for Intel server processors"); diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index b6d3607dffe2..11faf1db4fa4 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -231,8 +231,8 @@ typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci, typedef bool (*skx_decode_f)(struct decoded_addr *res); typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err); -int __init skx_adxl_get(void); -void __exit skx_adxl_put(void); +int skx_adxl_get(void); +void skx_adxl_put(void); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log); void skx_set_mem_cfg(bool mem_cfg_2lm); diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c index fab9891e569a..75c04dfc3962 100644 --- a/drivers/edac/thunderx_edac.c +++ b/drivers/edac/thunderx_edac.c @@ -35,12 +35,6 @@ enum { ERR_UNKNOWN = 3, }; -#define MAX_SYNDROME_REGS 4 - -struct error_syndrome { - u64 reg[MAX_SYNDROME_REGS]; -}; - struct error_descr { int type; u64 mask; diff --git a/drivers/firmware/arm_ffa/Makefile b/drivers/firmware/arm_ffa/Makefile index 9d9f37523200..168990a7e792 100644 --- a/drivers/firmware/arm_ffa/Makefile +++ b/drivers/firmware/arm_ffa/Makefile @@ -2,5 +2,7 @@ ffa-bus-y = bus.o ffa-driver-y = driver.o ffa-transport-$(CONFIG_ARM_FFA_SMCCC) += smccc.o -ffa-module-objs := $(ffa-bus-y) $(ffa-driver-y) $(ffa-transport-y) -obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-module.o +ffa-core-objs := $(ffa-bus-y) +ffa-module-objs := $(ffa-driver-y) $(ffa-transport-y) +obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-core.o +obj-$(CONFIG_ARM_FFA_TRANSPORT) += ffa-module.o diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 2f557e90f2eb..0c83931485f6 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -30,12 +30,11 @@ static int ffa_device_match(struct device *dev, struct device_driver *drv) while (!uuid_is_null(&id_table->uuid)) { /* * FF-A v1.0 doesn't provide discovery of UUIDs, just the - * partition IDs, so fetch the partitions IDs for this - * id_table UUID and assign the UUID to the device if the - * partition ID matches + * partition IDs, so match it unconditionally here and handle + * it via the installed bus notifier during driver binding. */ if (uuid_is_null(&ffa_dev->uuid)) - ffa_device_match_uuid(ffa_dev, &id_table->uuid); + return 1; if (uuid_equal(&ffa_dev->uuid, &id_table->uuid)) return 1; @@ -50,6 +49,10 @@ static int ffa_device_probe(struct device *dev) struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); struct ffa_device *ffa_dev = to_ffa_dev(dev); + /* UUID can be still NULL with FF-A v1.0, so just skip probing them */ + if (uuid_is_null(&ffa_dev->uuid)) + return -ENODEV; + return ffa_drv->probe(ffa_dev); } @@ -232,14 +235,21 @@ void ffa_device_unregister(struct ffa_device *ffa_dev) } EXPORT_SYMBOL_GPL(ffa_device_unregister); -int arm_ffa_bus_init(void) +static int __init arm_ffa_bus_init(void) { return bus_register(&ffa_bus_type); } +subsys_initcall(arm_ffa_bus_init); -void arm_ffa_bus_exit(void) +static void __exit arm_ffa_bus_exit(void) { ffa_devices_unregister(); bus_unregister(&ffa_bus_type); ida_destroy(&ffa_bus_id); } +module_exit(arm_ffa_bus_exit); + +MODULE_ALIAS("ffa-core"); +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("ARM FF-A bus"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_ffa/common.h b/drivers/firmware/arm_ffa/common.h index d6eccf1fd3f6..9c6425a81d0d 100644 --- a/drivers/firmware/arm_ffa/common.h +++ b/drivers/firmware/arm_ffa/common.h @@ -14,8 +14,6 @@ typedef struct arm_smccc_1_2_regs ffa_value_t; typedef void (ffa_fn)(ffa_value_t, ffa_value_t *); -int arm_ffa_bus_init(void); -void arm_ffa_bus_exit(void); bool ffa_device_is_valid(struct ffa_device *ffa_dev); void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid); diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 1609247cfafc..7ba98c7af2e9 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -1224,14 +1224,6 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) int count, idx; struct ffa_partition_info *pbuf, *tpbuf; - /* - * FF-A v1.1 provides UUID for each partition as part of the discovery - * API, the discovered UUID must be populated in the device's UUID and - * there is no need to copy the same from the driver table. - */ - if (drv_info->version > FFA_VERSION_1_0) - return; - count = ffa_partition_probe(uuid, &pbuf); if (count <= 0) return; @@ -1242,6 +1234,35 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) kfree(pbuf); } +static int +ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) +{ + struct device *dev = data; + struct ffa_device *fdev = to_ffa_dev(dev); + + if (action == BUS_NOTIFY_BIND_DRIVER) { + struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); + const struct ffa_device_id *id_table= ffa_drv->id_table; + + /* + * FF-A v1.1 provides UUID for each partition as part of the + * discovery API, the discovered UUID must be populated in the + * device's UUID and there is no need to workaround by copying + * the same from the driver table. + */ + if (uuid_is_null(&fdev->uuid)) + ffa_device_match_uuid(fdev, &id_table->uuid); + + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static struct notifier_block ffa_bus_nb = { + .notifier_call = ffa_bus_notifier, +}; + static int ffa_setup_partitions(void) { int count, idx, ret; @@ -1250,6 +1271,12 @@ static int ffa_setup_partitions(void) struct ffa_dev_part_info *info; struct ffa_partition_info *pbuf, *tpbuf; + if (drv_info->version == FFA_VERSION_1_0) { + ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb); + if (ret) + pr_err("Failed to register FF-A bus notifiers\n"); + } + count = ffa_partition_probe(&uuid_null, &pbuf); if (count <= 0) { pr_info("%s: No partitions found, error %d\n", __func__, count); @@ -1261,7 +1288,7 @@ static int ffa_setup_partitions(void) import_uuid(&uuid, (u8 *)tpbuf->uuid); /* Note that if the UUID will be uuid_null, that will require - * ffa_device_match() to find the UUID of this partition id + * ffa_bus_notifier() to find the UUID of this partition id * with help of ffa_device_match_uuid(). FF-A v1.1 and above * provides UUID here for each partition as part of the * discovery API and the same is passed. @@ -1581,14 +1608,9 @@ static int __init ffa_init(void) if (ret) return ret; - ret = arm_ffa_bus_init(); - if (ret) - return ret; - drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); if (!drv_info) { - ret = -ENOMEM; - goto ffa_bus_exit; + return -ENOMEM; } ret = ffa_version_check(&drv_info->version); @@ -1649,11 +1671,9 @@ free_pages: free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); free_drv_info: kfree(drv_info); -ffa_bus_exit: - arm_ffa_bus_exit(); return ret; } -subsys_initcall(ffa_init); +module_init(ffa_init); static void __exit ffa_exit(void) { @@ -1663,7 +1683,6 @@ static void __exit ffa_exit(void) free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); kfree(drv_info); - arm_ffa_bus_exit(); } module_exit(ffa_exit); diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index b5ac25dbc1ca..4b8c5250cdb5 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -326,6 +326,7 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem); +bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem); /* declarations for message passing transports */ struct scmi_msg_payld; diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index 615a3b2ad83d..0219a12e3209 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -21,6 +21,7 @@ * @cl: Mailbox Client * @chan: Transmit/Receive mailbox uni/bi-directional channel * @chan_receiver: Optional Receiver mailbox unidirectional channel + * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area */ @@ -28,6 +29,7 @@ struct scmi_mailbox { struct mbox_client cl; struct mbox_chan *chan; struct mbox_chan *chan_receiver; + struct mbox_chan *chan_platform_receiver; struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; }; @@ -91,6 +93,8 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx) * for replies on the a2p channel. Set as zero if not present. * @p2a_chan: A reference to the optional p2a channel. * Set as zero if not present. + * @p2a_rx_chan: A reference to the optional p2a completion channel. + * Set as zero if not present. * * At first, validate the transport configuration as described in terms of * 'mboxes' and 'shmem', then determin which mailbox channel indexes are @@ -98,8 +102,8 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx) * * Return: 0 on Success or error */ -static int mailbox_chan_validate(struct device *cdev, - int *a2p_rx_chan, int *p2a_chan) +static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan, + int *p2a_chan, int *p2a_rx_chan) { int num_mb, num_sh, ret = 0; struct device_node *np = cdev->of_node; @@ -109,8 +113,9 @@ static int mailbox_chan_validate(struct device *cdev, dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh); /* Bail out if mboxes and shmem descriptors are inconsistent */ - if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 3 || - (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2)) { + if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 || + (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || + (num_mb == 4 && num_sh != 2)) { dev_warn(cdev, "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n", of_node_full_name(np), num_mb, num_sh); @@ -139,6 +144,7 @@ static int mailbox_chan_validate(struct device *cdev, case 1: *a2p_rx_chan = 0; *p2a_chan = 0; + *p2a_rx_chan = 0; break; case 2: if (num_sh == 2) { @@ -148,10 +154,17 @@ static int mailbox_chan_validate(struct device *cdev, *a2p_rx_chan = 1; *p2a_chan = 0; } + *p2a_rx_chan = 0; break; case 3: *a2p_rx_chan = 1; *p2a_chan = 2; + *p2a_rx_chan = 0; + break; + case 4: + *a2p_rx_chan = 1; + *p2a_chan = 2; + *p2a_rx_chan = 3; break; } } @@ -166,12 +179,12 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, struct device *cdev = cinfo->dev; struct scmi_mailbox *smbox; struct device_node *shmem; - int ret, a2p_rx_chan, p2a_chan, idx = tx ? 0 : 1; + int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan, idx = tx ? 0 : 1; struct mbox_client *cl; resource_size_t size; struct resource res; - ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan); + ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); if (ret) return ret; @@ -229,6 +242,17 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, } } + if (!tx && p2a_rx_chan) { + smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan); + if (IS_ERR(smbox->chan_platform_receiver)) { + ret = PTR_ERR(smbox->chan_platform_receiver); + if (ret != -EPROBE_DEFER) + dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n"); + return ret; + } + } + + cinfo->transport_info = smbox; smbox->cinfo = cinfo; @@ -243,9 +267,11 @@ static int mailbox_chan_free(int id, void *p, void *data) if (smbox && !IS_ERR(smbox->chan)) { mbox_free_channel(smbox->chan); mbox_free_channel(smbox->chan_receiver); + mbox_free_channel(smbox->chan_platform_receiver); cinfo->transport_info = NULL; smbox->chan = NULL; smbox->chan_receiver = NULL; + smbox->chan_platform_receiver = NULL; smbox->cinfo = NULL; } @@ -300,8 +326,27 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, static void mailbox_clear_channel(struct scmi_chan_info *cinfo) { struct scmi_mailbox *smbox = cinfo->transport_info; + struct mbox_chan *intr_chan; + int ret; shmem_clear_channel(smbox->shmem); + + if (!shmem_channel_intr_enabled(smbox->shmem)) + return; + + if (smbox->chan_platform_receiver) + intr_chan = smbox->chan_platform_receiver; + else if (smbox->chan) + intr_chan = smbox->chan; + else + return; + + ret = mbox_send_message(intr_chan, NULL); + /* mbox_send_message returns non-negative value on success, so reset */ + if (ret > 0) + ret = 0; + + mbox_client_txdone(intr_chan, ret); } static bool diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c index 6eb7d2a4b6b1..21f467a92942 100644 --- a/drivers/firmware/arm_scmi/scmi_power_control.c +++ b/drivers/firmware/arm_scmi/scmi_power_control.c @@ -50,6 +50,7 @@ #include <linux/reboot.h> #include <linux/scmi_protocol.h> #include <linux/slab.h> +#include <linux/suspend.h> #include <linux/time64.h> #include <linux/timer.h> #include <linux/types.h> @@ -78,6 +79,7 @@ enum scmi_syspower_state { * @reboot_nb: A notifier_block optionally used to track reboot progress * @forceful_work: A worker used to trigger a forceful transition once a * graceful has timed out. + * @suspend_work: A worker used to trigger system suspend */ struct scmi_syspower_conf { struct device *dev; @@ -90,6 +92,7 @@ struct scmi_syspower_conf { struct notifier_block reboot_nb; struct delayed_work forceful_work; + struct work_struct suspend_work; }; #define userspace_nb_to_sconf(x) \ @@ -249,6 +252,9 @@ static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc, case SCMI_SYSTEM_WARMRESET: orderly_reboot(); break; + case SCMI_SYSTEM_SUSPEND: + schedule_work(&sc->suspend_work); + break; default: break; } @@ -277,7 +283,8 @@ static int scmi_userspace_notifier(struct notifier_block *nb, struct scmi_system_power_state_notifier_report *er = data; struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb); - if (er->system_state >= SCMI_SYSTEM_POWERUP) { + if (er->system_state >= SCMI_SYSTEM_MAX || + er->system_state == SCMI_SYSTEM_POWERUP) { dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n", er->system_state); return NOTIFY_DONE; @@ -315,6 +322,16 @@ static int scmi_userspace_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static void scmi_suspend_work_func(struct work_struct *work) +{ + struct scmi_syspower_conf *sc = + container_of(work, struct scmi_syspower_conf, suspend_work); + + pm_suspend(PM_SUSPEND_MEM); + + sc->state = SCMI_SYSPOWER_IDLE; +} + static int scmi_syspower_probe(struct scmi_device *sdev) { int ret; @@ -338,6 +355,8 @@ static int scmi_syspower_probe(struct scmi_device *sdev) sc->userspace_nb.notifier_call = &scmi_userspace_notifier; sc->dev = &sdev->dev; + INIT_WORK(&sc->suspend_work, scmi_suspend_work_func); + return handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_SYSTEM, SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 8bf495bcad09..b74e5a740f2c 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -128,3 +128,8 @@ bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem) return (ioread32(&shmem->channel_status) & SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); } + +bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem) +{ + return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED; +} diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 0d139e4de37c..8a347b938406 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -1107,9 +1107,16 @@ struct cs_dsp_coeff_parsed_coeff { int len; }; -static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str) +static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, unsigned int avail, + const u8 **str) { - int length; + int length, total_field_len; + + /* String fields are at least one __le32 */ + if (sizeof(__le32) > avail) { + *pos = NULL; + return 0; + } switch (bytes) { case 1: @@ -1122,10 +1129,16 @@ static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str) return 0; } + total_field_len = ((length + bytes) + 3) & ~0x03; + if ((unsigned int)total_field_len > avail) { + *pos = NULL; + return 0; + } + if (str) *str = *pos + bytes; - *pos += ((length + bytes) + 3) & ~0x03; + *pos += total_field_len; return length; } @@ -1150,71 +1163,134 @@ static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos) return val; } -static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data, - struct cs_dsp_coeff_parsed_alg *blk) +static int cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, + const struct wmfw_region *region, + struct cs_dsp_coeff_parsed_alg *blk) { const struct wmfw_adsp_alg_data *raw; + unsigned int data_len = le32_to_cpu(region->len); + unsigned int pos; + const u8 *tmp; + + raw = (const struct wmfw_adsp_alg_data *)region->data; switch (dsp->fw_ver) { case 0: case 1: - raw = (const struct wmfw_adsp_alg_data *)*data; - *data = raw->data; + if (sizeof(*raw) > data_len) + return -EOVERFLOW; blk->id = le32_to_cpu(raw->id); blk->name = raw->name; - blk->name_len = strlen(raw->name); + blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name)); blk->ncoeff = le32_to_cpu(raw->ncoeff); + + pos = sizeof(*raw); break; default: - blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data); - blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data, + if (sizeof(raw->id) > data_len) + return -EOVERFLOW; + + tmp = region->data; + blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), &tmp); + pos = tmp - region->data; + + tmp = ®ion->data[pos]; + blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, &blk->name); - cs_dsp_coeff_parse_string(sizeof(u16), data, NULL); - blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + if (sizeof(raw->ncoeff) > (data_len - pos)) + return -EOVERFLOW; + + blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), &tmp); + pos += sizeof(raw->ncoeff); break; } + if ((int)blk->ncoeff < 0) + return -EOVERFLOW; + cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id); cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name); cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff); + + return pos; } -static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data, - struct cs_dsp_coeff_parsed_coeff *blk) +static int cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, + const struct wmfw_region *region, + unsigned int pos, + struct cs_dsp_coeff_parsed_coeff *blk) { const struct wmfw_adsp_coeff_data *raw; + unsigned int data_len = le32_to_cpu(region->len); + unsigned int blk_len, blk_end_pos; const u8 *tmp; - int length; + + raw = (const struct wmfw_adsp_coeff_data *)®ion->data[pos]; + if (sizeof(raw->hdr) > (data_len - pos)) + return -EOVERFLOW; + + blk_len = le32_to_cpu(raw->hdr.size); + if (blk_len > S32_MAX) + return -EOVERFLOW; + + if (blk_len > (data_len - pos - sizeof(raw->hdr))) + return -EOVERFLOW; + + blk_end_pos = pos + sizeof(raw->hdr) + blk_len; + + blk->offset = le16_to_cpu(raw->hdr.offset); + blk->mem_type = le16_to_cpu(raw->hdr.type); switch (dsp->fw_ver) { case 0: case 1: - raw = (const struct wmfw_adsp_coeff_data *)*data; - *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size); + if (sizeof(*raw) > (data_len - pos)) + return -EOVERFLOW; - blk->offset = le16_to_cpu(raw->hdr.offset); - blk->mem_type = le16_to_cpu(raw->hdr.type); blk->name = raw->name; - blk->name_len = strlen(raw->name); + blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name)); blk->ctl_type = le16_to_cpu(raw->ctl_type); blk->flags = le16_to_cpu(raw->flags); blk->len = le32_to_cpu(raw->len); break; default: - tmp = *data; - blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp); - blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp); - length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp); - blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, + pos += sizeof(raw->hdr); + tmp = ®ion->data[pos]; + blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, &blk->name); - cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL); - cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + if (sizeof(raw->ctl_type) + sizeof(raw->flags) + sizeof(raw->len) > + (data_len - pos)) + return -EOVERFLOW; + blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp); + pos += sizeof(raw->ctl_type); blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp); + pos += sizeof(raw->flags); blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp); - - *data = *data + sizeof(raw->hdr) + length; break; } @@ -1224,6 +1300,8 @@ static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data, cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags); cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type); cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len); + + return blk_end_pos; } static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp, @@ -1247,12 +1325,16 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp, struct cs_dsp_alg_region alg_region = {}; struct cs_dsp_coeff_parsed_alg alg_blk; struct cs_dsp_coeff_parsed_coeff coeff_blk; - const u8 *data = region->data; - int i, ret; + int i, pos, ret; + + pos = cs_dsp_coeff_parse_alg(dsp, region, &alg_blk); + if (pos < 0) + return pos; - cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk); for (i = 0; i < alg_blk.ncoeff; i++) { - cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk); + pos = cs_dsp_coeff_parse_coeff(dsp, region, pos, &coeff_blk); + if (pos < 0) + return pos; switch (coeff_blk.ctl_type) { case WMFW_CTL_TYPE_BYTES: @@ -1321,6 +1403,10 @@ static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp, const struct wmfw_adsp1_sizes *adsp1_sizes; adsp1_sizes = (void *)&firmware->data[pos]; + if (sizeof(*adsp1_sizes) > firmware->size - pos) { + cs_dsp_err(dsp, "%s: file truncated\n", file); + return 0; + } cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file, le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm), @@ -1337,6 +1423,10 @@ static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp, const struct wmfw_adsp2_sizes *adsp2_sizes; adsp2_sizes = (void *)&firmware->data[pos]; + if (sizeof(*adsp2_sizes) > firmware->size - pos) { + cs_dsp_err(dsp, "%s: file truncated\n", file); + return 0; + } cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file, le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym), @@ -1376,7 +1466,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, struct regmap *regmap = dsp->regmap; unsigned int pos = 0; const struct wmfw_header *header; - const struct wmfw_adsp1_sizes *adsp1_sizes; const struct wmfw_footer *footer; const struct wmfw_region *region; const struct cs_dsp_region *mem; @@ -1392,10 +1481,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, ret = -EINVAL; - pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer); - if (pos >= firmware->size) { - cs_dsp_err(dsp, "%s: file too short, %zu bytes\n", - file, firmware->size); + if (sizeof(*header) >= firmware->size) { + ret = -EOVERFLOW; goto out_fw; } @@ -1423,22 +1510,36 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, pos = sizeof(*header); pos = dsp->ops->parse_sizes(dsp, file, pos, firmware); + if ((pos == 0) || (sizeof(*footer) > firmware->size - pos)) { + ret = -EOVERFLOW; + goto out_fw; + } footer = (void *)&firmware->data[pos]; pos += sizeof(*footer); if (le32_to_cpu(header->len) != pos) { - cs_dsp_err(dsp, "%s: unexpected header length %d\n", - file, le32_to_cpu(header->len)); + ret = -EOVERFLOW; goto out_fw; } cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file, le64_to_cpu(footer->timestamp)); - while (pos < firmware->size && - sizeof(*region) < firmware->size - pos) { + while (pos < firmware->size) { + /* Is there enough data for a complete block header? */ + if (sizeof(*region) > firmware->size - pos) { + ret = -EOVERFLOW; + goto out_fw; + } + region = (void *)&(firmware->data[pos]); + + if (le32_to_cpu(region->len) > firmware->size - pos - sizeof(*region)) { + ret = -EOVERFLOW; + goto out_fw; + } + region_name = "Unknown"; reg = 0; text = NULL; @@ -1495,16 +1596,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, regions, le32_to_cpu(region->len), offset, region_name); - if (le32_to_cpu(region->len) > - firmware->size - pos - sizeof(*region)) { - cs_dsp_err(dsp, - "%s.%d: %s region len %d bytes exceeds file length %zu\n", - file, regions, region_name, - le32_to_cpu(region->len), firmware->size); - ret = -EINVAL; - goto out_fw; - } - if (text) { memcpy(text, region->data, le32_to_cpu(region->len)); cs_dsp_info(dsp, "%s: %s\n", file, text); @@ -1555,6 +1646,9 @@ out_fw: cs_dsp_buf_free(&buf_list); kfree(text); + if (ret == -EOVERFLOW) + cs_dsp_err(dsp, "%s: file content overflows file data\n", file); + return ret; } @@ -2122,10 +2216,20 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware pos = le32_to_cpu(hdr->len); blocks = 0; - while (pos < firmware->size && - sizeof(*blk) < firmware->size - pos) { + while (pos < firmware->size) { + /* Is there enough data for a complete block header? */ + if (sizeof(*blk) > firmware->size - pos) { + ret = -EOVERFLOW; + goto out_fw; + } + blk = (void *)(&firmware->data[pos]); + if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) { + ret = -EOVERFLOW; + goto out_fw; + } + type = le16_to_cpu(blk->type); offset = le16_to_cpu(blk->offset); version = le32_to_cpu(blk->ver) >> 8; @@ -2222,17 +2326,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware } if (reg) { - if (le32_to_cpu(blk->len) > - firmware->size - pos - sizeof(*blk)) { - cs_dsp_err(dsp, - "%s.%d: %s region len %d bytes exceeds file length %zu\n", - file, blocks, region_name, - le32_to_cpu(blk->len), - firmware->size); - ret = -EINVAL; - goto out_fw; - } - buf = cs_dsp_buf_alloc(blk->data, le32_to_cpu(blk->len), &buf_list); @@ -2272,6 +2365,10 @@ out_fw: regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); kfree(text); + + if (ret == -EOVERFLOW) + cs_dsp_err(dsp, "%s: file content overflows file data\n", file); + return ret; } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 06f0428a723c..1f32d6cf98d6 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -76,7 +76,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \ lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o -lib-$(CONFIG_X86) += x86-stub.o +lib-$(CONFIG_X86) += x86-stub.o smbios.o lib-$(CONFIG_X86_64) += x86-5lvl.o lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 452b7ccd330e..2c3869356147 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -21,7 +21,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, efi_loaded_image_t *image, efi_handle_t image_handle) { - efi_status_t status; unsigned long kernel_size, kernel_codesize, kernel_memsize; if (image->image_base != _text) { @@ -39,15 +38,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, *reserve_size = kernel_memsize; *image_addr = (unsigned long)_text; - status = efi_kaslr_relocate_kernel(image_addr, - reserve_addr, reserve_size, - kernel_size, kernel_codesize, - kernel_memsize, - efi_kaslr_get_phys_seed(image_handle)); - if (status != EFI_SUCCESS) - return status; - - return EFI_SUCCESS; + return efi_kaslr_relocate_kernel(image_addr, reserve_addr, reserve_size, + kernel_size, kernel_codesize, kernel_memsize, + efi_kaslr_get_phys_seed(image_handle)); } asmlinkage void primary_entry(void); diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c index 446e35eaf3d9..e57cd3de0a00 100644 --- a/drivers/firmware/efi/libstub/arm64.c +++ b/drivers/firmware/efi/libstub/arm64.c @@ -39,8 +39,7 @@ static bool system_needs_vamap(void) static char const emag[] = "eMAG"; default: - version = efi_get_smbios_string(&record->header, 4, - processor_version); + version = efi_get_smbios_string(record, processor_version); if (!version || (strncmp(version, altra, sizeof(altra) - 1) && strncmp(version, emag, sizeof(emag) - 1))) break; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 27abb4ce0291..d33ccbc4a2c6 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -1204,14 +1204,13 @@ struct efi_smbios_type4_record { u16 thread_enabled; }; -#define efi_get_smbios_string(__record, __type, __name) ({ \ - int off = offsetof(struct efi_smbios_type ## __type ## _record, \ - __name); \ - __efi_get_smbios_string((__record), __type, off); \ +#define efi_get_smbios_string(__record, __field) ({ \ + __typeof__(__record) __rec = __record; \ + __efi_get_smbios_string(&__rec->header, &__rec->__field); \ }) const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, - u8 type, int offset); + const u8 *offset); void efi_remap_image(unsigned long image_base, unsigned alloc_size, unsigned long code_size); diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c index 1a9808012abd..6318c40bda38 100644 --- a/drivers/firmware/efi/libstub/kaslr.c +++ b/drivers/firmware/efi/libstub/kaslr.c @@ -18,8 +18,6 @@ */ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle) { - efi_status_t status; - u32 phys_seed; efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID; void *p; @@ -32,18 +30,20 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle) &li_fixed_proto, &p) == EFI_SUCCESS) { efi_info("Image placement fixed by loader\n"); } else { + efi_status_t status; + u32 phys_seed; + status = efi_get_random_bytes(sizeof(phys_seed), (u8 *)&phys_seed); - if (status == EFI_SUCCESS) { + if (status == EFI_SUCCESS) return phys_seed; - } else if (status == EFI_NOT_FOUND) { + + if (status == EFI_NOT_FOUND) efi_info("EFI_RNG_PROTOCOL unavailable\n"); - efi_nokaslr = true; - } else if (status != EFI_SUCCESS) { - efi_err("efi_get_random_bytes() failed (0x%lx)\n", - status); - efi_nokaslr = true; - } + else + efi_err("efi_get_random_bytes() failed (0x%lx)\n", status); + + efi_nokaslr = true; } return 0; diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c index bf6fbd5d22a1..d694bcfa1074 100644 --- a/drivers/firmware/efi/libstub/relocate.c +++ b/drivers/firmware/efi/libstub/relocate.c @@ -48,7 +48,7 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, unsigned long m = (unsigned long)map->map; u64 start, end; - desc = efi_early_memdesc_ptr(m, map->desc_size, i); + desc = efi_memdesc_ptr(m, map->desc_size, i); if (desc->type != EFI_CONVENTIONAL_MEMORY) continue; diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c index c217de2cc8d5..f31410d7e7e1 100644 --- a/drivers/firmware/efi/libstub/smbios.c +++ b/drivers/firmware/efi/libstub/smbios.c @@ -6,20 +6,31 @@ #include "efistub.h" -typedef struct efi_smbios_protocol efi_smbios_protocol_t; - -struct efi_smbios_protocol { - efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t, - u16 *, struct efi_smbios_record *); - efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *, - unsigned long *, u8 *); - efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16); - efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *, - struct efi_smbios_record **, - efi_handle_t *); - - u8 major_version; - u8 minor_version; +typedef union efi_smbios_protocol efi_smbios_protocol_t; + +union efi_smbios_protocol { + struct { + efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t, + u16 *, struct efi_smbios_record *); + efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *, + unsigned long *, u8 *); + efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16); + efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *, + struct efi_smbios_record **, + efi_handle_t *); + + u8 major_version; + u8 minor_version; + }; + struct { + u32 add; + u32 update_string; + u32 remove; + u32 get_next; + + u8 major_version; + u8 minor_version; + } mixed_mode; }; const struct efi_smbios_record *efi_get_smbios_record(u8 type) @@ -38,7 +49,7 @@ const struct efi_smbios_record *efi_get_smbios_record(u8 type) } const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, - u8 type, int offset) + const u8 *offset) { const u8 *strtable; @@ -46,7 +57,7 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, return NULL; strtable = (u8 *)record + record->length; - for (int i = 1; i < ((u8 *)record)[offset]; i++) { + for (int i = 1; i < *offset; i++) { int len = strlen(strtable); if (!len) diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c index 9a655f30ba47..c295ea3a6efc 100644 --- a/drivers/firmware/efi/libstub/unaccepted_memory.c +++ b/drivers/firmware/efi/libstub/unaccepted_memory.c @@ -29,7 +29,7 @@ efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc, efi_memory_desc_t *d; unsigned long m = (unsigned long)map->map; - d = efi_early_memdesc_ptr(m, map->desc_size, i); + d = efi_memdesc_ptr(m, map->desc_size, i); if (d->type != EFI_UNACCEPTED_MEMORY) continue; diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 1983fd3bf392..078055b054e3 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -225,6 +225,68 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) } } +static bool apple_match_product_name(void) +{ + static const char type1_product_matches[][15] = { + "MacBookPro11,3", + "MacBookPro11,5", + "MacBookPro13,3", + "MacBookPro14,3", + "MacBookPro15,1", + "MacBookPro15,3", + "MacBookPro16,1", + "MacBookPro16,4", + }; + const struct efi_smbios_type1_record *record; + const u8 *product; + + record = (struct efi_smbios_type1_record *)efi_get_smbios_record(1); + if (!record) + return false; + + product = efi_get_smbios_string(record, product_name); + if (!product) + return false; + + for (int i = 0; i < ARRAY_SIZE(type1_product_matches); i++) { + if (!strcmp(product, type1_product_matches[i])) + return true; + } + + return false; +} + +static void apple_set_os(void) +{ + struct { + unsigned long version; + efi_status_t (__efiapi *set_os_version)(const char *); + efi_status_t (__efiapi *set_os_vendor)(const char *); + } *set_os; + efi_status_t status; + + if (!efi_is_64bit() || !apple_match_product_name()) + return; + + status = efi_bs_call(locate_protocol, &APPLE_SET_OS_PROTOCOL_GUID, NULL, + (void **)&set_os); + if (status != EFI_SUCCESS) + return; + + if (set_os->version >= 2) { + status = set_os->set_os_vendor("Apple Inc."); + if (status != EFI_SUCCESS) + efi_err("Failed to set OS vendor via apple_set_os\n"); + } + + if (set_os->version > 0) { + /* The version being set doesn't seem to matter */ + status = set_os->set_os_version("Mac OS X 10.9"); + if (status != EFI_SUCCESS) + efi_err("Failed to set OS version via apple_set_os\n"); + } +} + efi_status_t efi_adjust_memory_range_protection(unsigned long start, unsigned long size) { @@ -335,9 +397,12 @@ static const efi_char16_t apple[] = L"Apple"; static void setup_quirks(struct boot_params *boot_params) { - if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) && - !memcmp(efistub_fw_vendor(), apple, sizeof(apple))) - retrieve_apple_device_properties(boot_params); + if (!memcmp(efistub_fw_vendor(), apple, sizeof(apple))) { + if (IS_ENABLED(CONFIG_APPLE_PROPERTIES)) + retrieve_apple_device_properties(boot_params); + + apple_set_os(); + } } /* @@ -476,9 +541,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_status_t status; char *cmdline_ptr; - if (efi_is_native()) - memset(_bss, 0, _ebss - _bss); - efi_system_table = sys_table_arg; /* Check if we were booted by the EFI firmware */ @@ -501,16 +563,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, /* Convert unicode cmdline to ascii */ cmdline_ptr = efi_convert_cmdline(image, &options_size); if (!cmdline_ptr) - goto fail; + efi_exit(handle, EFI_OUT_OF_RESOURCES); efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr, &boot_params.ext_cmd_line_ptr); efi_stub_entry(handle, sys_table_arg, &boot_params); /* not reached */ - -fail: - efi_exit(handle, status); } static void add_e820ext(struct boot_params *params, @@ -555,7 +614,7 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s m |= (u64)efi->efi_memmap_hi << 32; #endif - d = efi_early_memdesc_ptr(m, efi->efi_memdesc_size, i); + d = efi_memdesc_ptr(m, efi->efi_memdesc_size, i); switch (d->type) { case EFI_RESERVED_TYPE: case EFI_RUNTIME_SERVICES_CODE: @@ -781,7 +840,7 @@ static const char *cmdline_memmap_override; static efi_status_t parse_options(const char *cmdline) { static const char opts[][14] = { - "mem=", "memmap=", "efi_fake_mem=", "hugepages=" + "mem=", "memmap=", "hugepages=" }; for (int i = 0; i < ARRAY_SIZE(opts); i++) { diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index ab85bf8e165a..164203429fa7 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -164,7 +164,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm, bool valid; char buf[64]; - valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size, + valid = entry_is_valid(efi_memdesc_ptr(tbl->entry, tbl->desc_size, i), &md); size = md.num_pages << EFI_PAGE_SHIFT; if (efi_enabled(EFI_DBG) || !valid) diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c index 6f810d720f4d..66042160b361 100644 --- a/drivers/firmware/google/cbmem.c +++ b/drivers/firmware/google/cbmem.c @@ -131,4 +131,5 @@ static struct coreboot_driver cbmem_entry_driver = { module_coreboot_driver(cbmem_entry_driver); MODULE_AUTHOR("Jack Rosenthal <jrosenth@chromium.org>"); +MODULE_DESCRIPTION("Driver for exporting CBMEM entries in sysfs"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index fa7752f6e89b..a4e3bbd556a3 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -255,4 +255,5 @@ module_init(coreboot_table_driver_init); module_exit(coreboot_table_driver_exit); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Module providing coreboot table access"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c index 07c458bf64ec..daadd71d8ddd 100644 --- a/drivers/firmware/google/framebuffer-coreboot.c +++ b/drivers/firmware/google/framebuffer-coreboot.c @@ -97,4 +97,5 @@ static struct coreboot_driver framebuffer_driver = { module_coreboot_driver(framebuffer_driver); MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>"); +MODULE_DESCRIPTION("Memory based framebuffer accessed through coreboot table"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 96ea1fa76d35..d304913314e4 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -1090,4 +1090,5 @@ module_init(gsmi_init); module_exit(gsmi_exit); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("EFI SMI interface for Google platforms"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c index 24c97a70aa80..c5f08617aa8d 100644 --- a/drivers/firmware/google/memconsole-coreboot.c +++ b/drivers/firmware/google/memconsole-coreboot.c @@ -113,4 +113,5 @@ static struct coreboot_driver memconsole_driver = { module_coreboot_driver(memconsole_driver); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Memory based BIOS console accessed through coreboot table"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c index 3d3c4f6b8194..a0974c376985 100644 --- a/drivers/firmware/google/memconsole-x86-legacy.c +++ b/drivers/firmware/google/memconsole-x86-legacy.c @@ -154,4 +154,5 @@ module_init(memconsole_x86_init); module_exit(memconsole_x86_exit); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("EBDA specific parts of the memory based BIOS console."); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c index 44d314ad69e4..b9d99fe1ff0f 100644 --- a/drivers/firmware/google/memconsole.c +++ b/drivers/firmware/google/memconsole.c @@ -50,4 +50,5 @@ void memconsole_exit(void) EXPORT_SYMBOL(memconsole_exit); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Architecture-independent parts of the memory based BIOS console"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 8e4216714b29..1749529f63d4 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -323,4 +323,5 @@ static struct coreboot_driver vpd_driver = { module_coreboot_driver(vpd_driver); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Driver for exporting Vital Product Data content to sysfs"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index 5d7f62fe1d5f..f25a9746249b 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -340,4 +340,5 @@ static struct platform_driver meson_sm_driver = { }, }; module_platform_driver_probe(meson_sm_driver, meson_sm_probe); +MODULE_DESCRIPTION("Amlogic Secure Monitor driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c index 835a19a7a3a0..30de47895b1c 100644 --- a/drivers/firmware/microchip/mpfs-auto-update.c +++ b/drivers/firmware/microchip/mpfs-auto-update.c @@ -9,6 +9,7 @@ * * Author: Conor Dooley <conor.dooley@microchip.com> */ +#include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/firmware.h> #include <linux/math.h> @@ -71,8 +72,9 @@ #define AUTO_UPDATE_UPGRADE_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_UPGRADE_INDEX) #define AUTO_UPDATE_BLANK_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_BLANK_INDEX) #define AUTO_UPDATE_DIRECTORY_SIZE SZ_1K -#define AUTO_UPDATE_RESERVED_SIZE SZ_1M -#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_RESERVED_SIZE) +#define AUTO_UPDATE_INFO_BASE AUTO_UPDATE_DIRECTORY_SIZE +#define AUTO_UPDATE_INFO_SIZE SZ_1M +#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE) #define AUTO_UPDATE_TIMEOUT_MS 60000 @@ -86,6 +88,17 @@ struct mpfs_auto_update_priv { bool cancel_request; }; +static bool mpfs_auto_update_is_bitstream_info(const u8 *data, u32 size) +{ + if (size < 4) + return false; + + if (data[0] == 0x4d && data[1] == 0x43 && data[2] == 0x48 && data[3] == 0x50) + return true; + + return false; +} + static enum fw_upload_err mpfs_auto_update_prepare(struct fw_upload *fw_uploader, const u8 *data, u32 size) { @@ -162,28 +175,17 @@ static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_up static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader) { struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; - struct mpfs_mss_response *response; - struct mpfs_mss_msg *message; - u32 *response_msg; + u32 *response_msg __free(kfree) = + kzalloc(AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL); + struct mpfs_mss_response *response __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_response), GFP_KERNEL); + struct mpfs_mss_msg *message __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_msg), GFP_KERNEL); int ret; - response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), - GFP_KERNEL); - if (!response_msg) + if (!response_msg || !response || !message) return -ENOMEM; - response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL); - if (!response) { - ret = -ENOMEM; - goto free_response_msg; - } - - message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL); - if (!message) { - ret = -ENOMEM; - goto free_response; - } - /* * The system controller can verify that an image in the flash is valid. * Rather than duplicate the check in this driver, call the relevant @@ -205,31 +207,25 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader) ret = mpfs_blocking_transaction(priv->sys_controller, message); if (ret | response->resp_status) { dev_warn(priv->dev, "Verification of Upgrade Image failed!\n"); - ret = ret ? ret : -EBADMSG; - goto free_message; + return ret ? ret : -EBADMSG; } dev_info(priv->dev, "Verification of Upgrade Image passed!\n"); -free_message: - devm_kfree(priv->dev, message); -free_response: - devm_kfree(priv->dev, response); -free_response_msg: - devm_kfree(priv->dev, response_msg); - - return ret; + return 0; } -static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv, char *buffer, +static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv, u32 image_address, loff_t directory_address) { struct erase_info erase; - size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE; + size_t erase_size = round_up(AUTO_UPDATE_DIRECTORY_SIZE, (u64)priv->flash->erasesize); size_t bytes_written = 0, bytes_read = 0; + char *buffer __free(kfree) = kzalloc(erase_size, GFP_KERNEL); int ret; - erase_size = round_up(erase_size, (u64)priv->flash->erasesize); + if (!buffer) + return -ENOMEM; erase.addr = AUTO_UPDATE_DIRECTORY_BASE; erase.len = erase_size; @@ -275,7 +271,7 @@ static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv return ret; if (bytes_written != erase_size) - return ret; + return -EIO; return 0; } @@ -285,26 +281,36 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const { struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; struct erase_info erase; - char *buffer; loff_t directory_address = AUTO_UPDATE_UPGRADE_DIRECTORY; size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE; size_t bytes_written = 0; + bool is_info = mpfs_auto_update_is_bitstream_info(data, size); u32 image_address; int ret; erase_size = round_up(erase_size, (u64)priv->flash->erasesize); - image_address = AUTO_UPDATE_BITSTREAM_BASE + - AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream; - - buffer = devm_kzalloc(priv->dev, erase_size, GFP_KERNEL); - if (!buffer) - return -ENOMEM; + if (is_info) + image_address = AUTO_UPDATE_INFO_BASE; + else + image_address = AUTO_UPDATE_BITSTREAM_BASE + + AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream; - ret = mpfs_auto_update_set_image_address(priv, buffer, image_address, directory_address); - if (ret) { - dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret); - goto out; + /* + * For bitstream info, the descriptor is written to a fixed offset, + * so there is no need to set the image address. + */ + if (!is_info) { + ret = mpfs_auto_update_set_image_address(priv, image_address, directory_address); + if (ret) { + dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret); + return ret; + } + } else { + if (size > AUTO_UPDATE_INFO_SIZE) { + dev_err(priv->dev, "bitstream info exceeds permitted size\n"); + return -ENOSPC; + } } /* @@ -318,7 +324,7 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const dev_info(priv->dev, "Erasing the flash at address (0x%x)\n", image_address); ret = mtd_erase(priv->flash, &erase); if (ret) - goto out; + return ret; /* * No parsing etc of the bitstream is required. The system controller @@ -328,18 +334,15 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const dev_info(priv->dev, "Writing the image to the flash at address (0x%x)\n", image_address); ret = mtd_write(priv->flash, (loff_t)image_address, size, &bytes_written, data); if (ret) - goto out; + return ret; - if (bytes_written != size) { - ret = -EIO; - goto out; - } + if (bytes_written != size) + return -EIO; *written = bytes_written; + dev_info(priv->dev, "Wrote 0x%zx bytes to the flash\n", bytes_written); -out: - devm_kfree(priv->dev, buffer); - return ret; + return 0; } static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader, const u8 *data, @@ -362,6 +365,9 @@ static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader, goto out; } + if (mpfs_auto_update_is_bitstream_info(data, size)) + goto out; + ret = mpfs_auto_update_verify_image(fw_uploader); if (ret) err = FW_UPLOAD_ERR_FW_INVALID; @@ -381,23 +387,15 @@ static const struct fw_upload_ops mpfs_auto_update_ops = { static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv) { - struct mpfs_mss_response *response; - struct mpfs_mss_msg *message; - u32 *response_msg; + u32 *response_msg __free(kfree) = + kzalloc(AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL); + struct mpfs_mss_response *response __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_response), GFP_KERNEL); + struct mpfs_mss_msg *message __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_msg), GFP_KERNEL); int ret; - response_msg = devm_kzalloc(priv->dev, - AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), - GFP_KERNEL); - if (!response_msg) - return -ENOMEM; - - response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL); - if (!response) - return -ENOMEM; - - message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL); - if (!message) + if (!response_msg || !response || !message) return -ENOMEM; /* diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig index 3f05d9854ddf..7f6eb4174734 100644 --- a/drivers/firmware/qcom/Kconfig +++ b/drivers/firmware/qcom/Kconfig @@ -7,8 +7,39 @@ menu "Qualcomm firmware drivers" config QCOM_SCM + select QCOM_TZMEM tristate +config QCOM_TZMEM + tristate + select GENERIC_ALLOCATOR + +choice + prompt "TrustZone interface memory allocator mode" + default QCOM_TZMEM_MODE_GENERIC + help + Selects the mode of the memory allocator providing memory buffers of + suitable format for sharing with the TrustZone. If in doubt, select + 'Generic'. + +config QCOM_TZMEM_MODE_GENERIC + bool "Generic" + help + Use the generic allocator mode. The memory is page-aligned, non-cachable + and physically contiguous. + +config QCOM_TZMEM_MODE_SHMBRIDGE + bool "SHM Bridge" + help + Use Qualcomm Shared Memory Bridge. The memory has the same alignment as + in the 'Generic' allocator but is also explicitly marked as an SHM Bridge + buffer. + + With this selected, all buffers passed to the TrustZone must be allocated + using the TZMem allocator or else the TrustZone will refuse to use them. + +endchoice + config QCOM_SCM_DOWNLOAD_MODE_DEFAULT bool "Qualcomm download mode enabled by default" depends on QCOM_SCM diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile index c9f12ee8224a..0be40a1abc13 100644 --- a/drivers/firmware/qcom/Makefile +++ b/drivers/firmware/qcom/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_QCOM_SCM) += qcom-scm.o qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o +obj-$(CONFIG_QCOM_TZMEM) += qcom_tzmem.o obj-$(CONFIG_QCOM_QSEECOM) += qcom_qseecom.o obj-$(CONFIG_QCOM_QSEECOM_UEFISECAPP) += qcom_qseecom_uefisecapp.o diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c index bc550ad0dbe0..6fefa4fe80e8 100644 --- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c +++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c @@ -13,11 +13,14 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/ucs2_string.h> #include <linux/firmware/qcom/qcom_qseecom.h> +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> /* -- Qualcomm "uefisecapp" interface definitions. -------------------------- */ @@ -272,6 +275,7 @@ struct qsee_rsp_uefi_query_variable_info { struct qcuefi_client { struct qseecom_client *client; struct efivars efivars; + struct qcom_tzmem_pool *mempool; }; static struct device *qcuefi_dev(struct qcuefi_client *qcuefi) @@ -293,12 +297,11 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e { struct qsee_req_uefi_get_variable *req_data; struct qsee_rsp_uefi_get_variable *rsp_data; + void *cmd_buf __free(qcom_tzmem) = NULL; unsigned long buffer_size = *data_size; - efi_status_t efi_status = EFI_SUCCESS; unsigned long name_length; - dma_addr_t cmd_buf_dma; + efi_status_t efi_status; size_t cmd_buf_size; - void *cmd_buf; size_t guid_offs; size_t name_offs; size_t req_size; @@ -333,11 +336,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e __reqdata_offs(rsp_size, &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -351,30 +352,22 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e req_data->length = req_size; status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, rsp_size); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, rsp_size); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length < sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length < sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", @@ -388,18 +381,14 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e *attributes = rsp_data->attributes; } - goto out_free; + return qsee_uefi_status_to_efi(rsp_data->status); } - if (rsp_data->length > rsp_size) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length > rsp_size) + return EFI_DEVICE_ERROR; - if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) + return EFI_DEVICE_ERROR; /* * Note: We need to set attributes and data size even if the buffer is @@ -422,22 +411,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e if (attributes) *attributes = rsp_data->attributes; - if (buffer_size == 0 && !data) { - efi_status = EFI_SUCCESS; - goto out_free; - } + if (buffer_size == 0 && !data) + return EFI_SUCCESS; - if (buffer_size < rsp_data->data_size) { - efi_status = EFI_BUFFER_TOO_SMALL; - goto out_free; - } + if (buffer_size < rsp_data->data_size) + return EFI_BUFFER_TOO_SMALL; memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size); -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const efi_char16_t *name, @@ -446,11 +428,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e { struct qsee_req_uefi_set_variable *req_data; struct qsee_rsp_uefi_set_variable *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; + void *cmd_buf __free(qcom_tzmem) = NULL; unsigned long name_length; - dma_addr_t cmd_buf_dma; size_t cmd_buf_size; - void *cmd_buf; size_t name_offs; size_t guid_offs; size_t data_offs; @@ -486,11 +466,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e __reqdata_offs(sizeof(*rsp_data), &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -506,10 +484,8 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e req_data->length = req_size; status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); @@ -517,33 +493,24 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, sizeof(*rsp_data)); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, sizeof(*rsp_data)); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length != sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length != sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", __func__, rsp_data->status); - efi_status = qsee_uefi_status_to_efi(rsp_data->status); + return qsee_uefi_status_to_efi(rsp_data->status); } -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, @@ -552,10 +519,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, { struct qsee_req_uefi_get_next_variable *req_data; struct qsee_rsp_uefi_get_next_variable *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; - dma_addr_t cmd_buf_dma; + void *cmd_buf __free(qcom_tzmem) = NULL; + efi_status_t efi_status; size_t cmd_buf_size; - void *cmd_buf; size_t guid_offs; size_t name_offs; size_t req_size; @@ -587,11 +553,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, __reqdata_offs(rsp_size, &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -606,28 +570,20 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, *name_size / sizeof(*name)); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, rsp_size); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, rsp_size); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length < sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length < sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", @@ -642,53 +598,40 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, if (efi_status == EFI_BUFFER_TOO_SMALL) *name_size = rsp_data->name_size; - goto out_free; + return efi_status; } - if (rsp_data->length > rsp_size) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length > rsp_size) + return EFI_DEVICE_ERROR; - if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) + return EFI_DEVICE_ERROR; - if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) + return EFI_DEVICE_ERROR; if (rsp_data->name_size > *name_size) { *name_size = rsp_data->name_size; - efi_status = EFI_BUFFER_TOO_SMALL; - goto out_free; + return EFI_BUFFER_TOO_SMALL; } - if (rsp_data->guid_size != sizeof(*guid)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->guid_size != sizeof(*guid)) + return EFI_DEVICE_ERROR; memcpy(guid, ((void *)rsp_data) + rsp_data->guid_offset, rsp_data->guid_size); status = ucs2_strscpy(name, ((void *)rsp_data) + rsp_data->name_offset, rsp_data->name_size / sizeof(*name)); *name_size = rsp_data->name_size; - if (status < 0) { + if (status < 0) /* * Return EFI_DEVICE_ERROR here because the buffer size should * have already been validated above, causing this function to * bail with EFI_BUFFER_TOO_SMALL. */ - efi_status = EFI_DEVICE_ERROR; - } + return EFI_DEVICE_ERROR; -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, u32 attr, @@ -697,10 +640,8 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, { struct qsee_req_uefi_query_variable_info *req_data; struct qsee_rsp_uefi_query_variable_info *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; - dma_addr_t cmd_buf_dma; + void *cmd_buf __free(qcom_tzmem) = NULL; size_t cmd_buf_size; - void *cmd_buf; size_t req_offs; size_t rsp_offs; int status; @@ -710,11 +651,9 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, __reqdata_offs(sizeof(*rsp_data), &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -724,28 +663,21 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, req_data->length = sizeof(*req_data); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, sizeof(*req_data), - cmd_buf_dma + rsp_offs, sizeof(*rsp_data)); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, sizeof(*req_data), + cmd_buf + rsp_offs, sizeof(*rsp_data)); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) + return EFI_DEVICE_ERROR; - if (rsp_data->length != sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length != sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", __func__, rsp_data->status); - efi_status = qsee_uefi_status_to_efi(rsp_data->status); - goto out_free; + return qsee_uefi_status_to_efi(rsp_data->status); } if (storage_space) @@ -757,10 +689,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, if (max_variable_size) *max_variable_size = rsp_data->max_variable_size; -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } /* -- Global efivar interface. ---------------------------------------------- */ @@ -871,6 +800,7 @@ static const struct efivar_operations qcom_efivar_ops = { static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *aux_dev_id) { + struct qcom_tzmem_pool_config pool_config; struct qcuefi_client *qcuefi; int status; @@ -889,6 +819,16 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, if (status) qcuefi_set_reference(NULL); + memset(&pool_config, 0, sizeof(pool_config)); + pool_config.initial_size = SZ_4K; + pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER; + pool_config.increment = 2; + pool_config.max_size = SZ_256K; + + qcuefi->mempool = devm_qcom_tzmem_pool_new(&aux_dev->dev, &pool_config); + if (IS_ERR(qcuefi->mempool)) + return PTR_ERR(qcuefi->mempool); + return status; } diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c index 16cf88acfa8e..dca5f3f1883b 100644 --- a/drivers/firmware/qcom/qcom_scm-smc.c +++ b/drivers/firmware/qcom/qcom_scm-smc.c @@ -2,6 +2,7 @@ /* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. */ +#include <linux/cleanup.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/delay.h> @@ -9,6 +10,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> #include <linux/arm-smccc.h> #include <linux/dma-mapping.h> @@ -150,11 +152,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, enum qcom_scm_convention qcom_convention, struct qcom_scm_res *res, bool atomic) { + struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool(); int arglen = desc->arginfo & 0xf; int i, ret; - dma_addr_t args_phys = 0; - void *args_virt = NULL; - size_t alloc_len; + void *args_virt __free(qcom_tzmem) = NULL; gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL; u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ? @@ -172,9 +173,9 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { - alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); - args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); - + args_virt = qcom_tzmem_alloc(mempool, + SCM_SMC_N_EXT_ARGS * sizeof(u64), + flag); if (!args_virt) return -ENOMEM; @@ -192,25 +193,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, SCM_SMC_FIRST_EXT_IDX]); } - args_phys = dma_map_single(dev, args_virt, alloc_len, - DMA_TO_DEVICE); - - if (dma_mapping_error(dev, args_phys)) { - kfree(args_virt); - return -ENOMEM; - } - - smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; + smc.args[SCM_SMC_LAST_REG_IDX] = qcom_tzmem_to_phys(args_virt); } - /* ret error check follows after args_virt cleanup*/ ret = __scm_smc_do(dev, &smc, &smc_res, atomic); - - if (args_virt) { - dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); - kfree(args_virt); - } - if (ret) return ret; diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 68f4df7e6c3c..00c379a3cceb 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -6,12 +6,15 @@ #include <linux/arm-smccc.h> #include <linux/bitfield.h> #include <linux/bits.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/cpumask.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/export.h> #include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> #include <linux/init.h> #include <linux/interconnect.h> #include <linux/interrupt.h> @@ -20,11 +23,14 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/reset-controller.h> +#include <linux/sizes.h> #include <linux/types.h> #include "qcom_scm.h" +#include "qcom_tzmem.h" static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); module_param(download_mode, bool, 0); @@ -43,6 +49,8 @@ struct qcom_scm { int scm_vote_count; u64 dload_mode_addr; + + struct qcom_tzmem_pool *mempool; }; struct qcom_scm_current_perm_info { @@ -114,7 +122,6 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { }; #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) -#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) #define QCOM_DLOAD_MASK GENMASK(5, 4) #define QCOM_DLOAD_NODUMP 0 @@ -198,6 +205,11 @@ static void qcom_scm_bw_disable(void) enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; static DEFINE_SPINLOCK(scm_query_lock); +struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) +{ + return __scm->mempool; +} + static enum qcom_scm_convention __get_convention(void) { unsigned long flags; @@ -570,6 +582,13 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, * During the scm call memory protection will be enabled for the meta * data blob, so make sure it's physically contiguous, 4K aligned and * non-cachable to avoid XPU violations. + * + * For PIL calls the hypervisor creates SHM Bridges for the blob + * buffers on behalf of Linux so we must not do it ourselves hence + * not using the TZMem allocator here. + * + * If we pass a buffer that is already part of an SHM Bridge to this + * call, it will fail. */ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL); @@ -1008,14 +1027,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, struct qcom_scm_mem_map_info *mem_to_map; phys_addr_t mem_to_map_phys; phys_addr_t dest_phys; - dma_addr_t ptr_phys; + phys_addr_t ptr_phys; size_t mem_to_map_sz; size_t dest_sz; size_t src_sz; size_t ptr_sz; int next_vm; __le32 *src; - void *ptr; int ret, i, b; u64 srcvm_bits = *srcvm; @@ -1025,10 +1043,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(dest_sz, SZ_64); - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + ptr_sz, GFP_KERNEL); if (!ptr) return -ENOMEM; + ptr_phys = qcom_tzmem_to_phys(ptr); + /* Fill source vmid detail */ src = ptr; i = 0; @@ -1057,7 +1078,6 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, ptr_phys, src_sz, dest_phys, dest_sz); - dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); if (ret) { dev_err(__scm->dev, "Assign memory protection call failed %d\n", ret); @@ -1205,32 +1225,21 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, .args[4] = data_unit_size, .owner = ARM_SMCCC_OWNER_SIP, }; - void *keybuf; - dma_addr_t key_phys; - int ret; - /* - * 'key' may point to vmalloc()'ed memory, but we need to pass a - * physical address that's been properly flushed. The sanctioned way to - * do this is by using the DMA API. But as is best practice for crypto - * keys, we also must wipe the key after use. This makes kmemdup() + - * dma_map_single() not clearly correct, since the DMA API can use - * bounce buffers. Instead, just use dma_alloc_coherent(). Programming - * keys is normally rare and thus not performance-critical. - */ + int ret; - keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, - GFP_KERNEL); + void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + key_size, + GFP_KERNEL); if (!keybuf) return -ENOMEM; memcpy(keybuf, key, key_size); - desc.args[1] = key_phys; + desc.args[1] = qcom_tzmem_to_phys(keybuf); ret = qcom_scm_call(__scm->dev, &desc, NULL); memzero_explicit(keybuf, key_size); - dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); return ret; } EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); @@ -1342,6 +1351,66 @@ bool qcom_scm_lmh_dcvsh_available(void) } EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); +int qcom_scm_shm_bridge_enable(void) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, + .owner = ARM_SMCCC_OWNER_SIP + }; + + struct qcom_scm_res res; + + if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, + QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) + return -EOPNOTSUPP; + + return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); + +int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, + u64 ipfn_and_s_perm_flags, u64 size_and_flags, + u64 ns_vmids, u64 *handle) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, + .owner = ARM_SMCCC_OWNER_SIP, + .args[0] = pfn_and_ns_perm_flags, + .args[1] = ipfn_and_s_perm_flags, + .args[2] = size_and_flags, + .args[3] = ns_vmids, + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL, QCOM_SCM_VAL), + }; + + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + if (handle && !ret) + *handle = res.result[1]; + + return ret ?: res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); + +int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, + .owner = ARM_SMCCC_OWNER_SIP, + .args[0] = handle, + .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); + int qcom_scm_lmh_profile_change(u32 profile_id) { struct qcom_scm_desc desc = { @@ -1359,8 +1428,6 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, u64 limit_node, u32 node_id, u64 version) { - dma_addr_t payload_phys; - u32 *payload_buf; int ret, payload_size = 5 * sizeof(u32); struct qcom_scm_desc desc = { @@ -1375,7 +1442,9 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, .owner = ARM_SMCCC_OWNER_SIP, }; - payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); + u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + payload_size, + GFP_KERNEL); if (!payload_buf) return -ENOMEM; @@ -1385,15 +1454,28 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, payload_buf[3] = 1; payload_buf[4] = payload_val; - desc.args[0] = payload_phys; + desc.args[0] = qcom_tzmem_to_phys(payload_buf); ret = qcom_scm_call(__scm->dev, &desc, NULL); - dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); return ret; } EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); +int qcom_scm_gpu_init_regs(u32 gpu_req) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_GPU, + .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = gpu_req, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); + static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) { struct device_node *tcsr; @@ -1545,37 +1627,27 @@ int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) unsigned long app_name_len = strlen(app_name); struct qcom_scm_desc desc = {}; struct qcom_scm_qseecom_resp res = {}; - dma_addr_t name_buf_phys; - char *name_buf; int status; if (app_name_len >= name_buf_size) return -EINVAL; - name_buf = kzalloc(name_buf_size, GFP_KERNEL); + char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + name_buf_size, + GFP_KERNEL); if (!name_buf) return -ENOMEM; memcpy(name_buf, app_name, app_name_len); - name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE); - status = dma_mapping_error(__scm->dev, name_buf_phys); - if (status) { - kfree(name_buf); - dev_err(__scm->dev, "qseecom: failed to map dma address\n"); - return status; - } - desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; desc.svc = QSEECOM_TZ_SVC_APP_MGR; desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); - desc.args[0] = name_buf_phys; + desc.args[0] = qcom_tzmem_to_phys(name_buf); desc.args[1] = app_name_len; status = qcom_scm_qseecom_call(&desc, &res); - dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE); - kfree(name_buf); if (status) return status; @@ -1597,9 +1669,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); /** * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. * @app_id: The ID of the target app. - * @req: DMA address of the request buffer sent to the app. + * @req: Request buffer sent to the app (must be TZ memory) * @req_size: Size of the request buffer. - * @rsp: DMA address of the response buffer, written to by the app. + * @rsp: Response buffer, written to by the app (must be TZ memory) * @rsp_size: Size of the response buffer. * * Sends a request to the QSEE app associated with the given ID and read back @@ -1610,13 +1682,18 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); * * Return: Zero on success, nonzero on failure. */ -int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, - dma_addr_t rsp, size_t rsp_size) +int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, + void *rsp, size_t rsp_size) { struct qcom_scm_qseecom_resp res = {}; struct qcom_scm_desc desc = {}; + phys_addr_t req_phys; + phys_addr_t rsp_phys; int status; + req_phys = qcom_tzmem_to_phys(req); + rsp_phys = qcom_tzmem_to_phys(rsp); + desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; desc.cmd = QSEECOM_TZ_CMD_APP_SEND; @@ -1624,9 +1701,9 @@ int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL); desc.args[0] = app_id; - desc.args[1] = req; + desc.args[1] = req_phys; desc.args[2] = req_size; - desc.args[3] = rsp; + desc.args[3] = rsp_phys; desc.args[4] = rsp_size; status = qcom_scm_qseecom_call(&desc, &res); @@ -1649,6 +1726,8 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { { .compatible = "lenovo,flex-5g" }, { .compatible = "lenovo,thinkpad-x13s", }, { .compatible = "qcom,sc8180x-primus" }, + { .compatible = "qcom,x1e80100-crd" }, + { .compatible = "qcom,x1e80100-qcp" }, { } }; @@ -1793,9 +1872,8 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data) goto out; } - if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE && - flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) { - dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags); + if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { + dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); goto out; } @@ -1810,6 +1888,7 @@ out: static int qcom_scm_probe(struct platform_device *pdev) { + struct qcom_tzmem_pool_config pool_config; struct qcom_scm *scm; int irq, ret; @@ -1885,6 +1964,26 @@ static int qcom_scm_probe(struct platform_device *pdev) if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) qcom_scm_disable_sdi(); + ret = of_reserved_mem_device_init(__scm->dev); + if (ret && ret != -ENODEV) + return dev_err_probe(__scm->dev, ret, + "Failed to setup the reserved memory region for TZ mem\n"); + + ret = qcom_tzmem_enable(__scm->dev); + if (ret) + return dev_err_probe(__scm->dev, ret, + "Failed to enable the TrustZone memory allocator\n"); + + memset(&pool_config, 0, sizeof(pool_config)); + pool_config.initial_size = 0; + pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; + pool_config.max_size = SZ_256K; + + __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config); + if (IS_ERR(__scm->mempool)) + return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), + "Failed to create the SCM memory pool\n"); + /* * Initialize the QSEECOM interface. * diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h index 4532907e8489..685b8f59e7a6 100644 --- a/drivers/firmware/qcom/qcom_scm.h +++ b/drivers/firmware/qcom/qcom_scm.h @@ -5,6 +5,7 @@ #define __QCOM_SCM_INT_H struct device; +struct qcom_tzmem_pool; enum qcom_scm_convention { SMC_CONVENTION_UNKNOWN, @@ -78,6 +79,8 @@ int scm_legacy_call_atomic(struct device *dev, const struct qcom_scm_desc *desc, int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, struct qcom_scm_res *res); +struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void); + #define QCOM_SCM_SVC_BOOT 0x01 #define QCOM_SCM_BOOT_SET_ADDR 0x01 #define QCOM_SCM_BOOT_TERMINATE_PC 0x02 @@ -113,6 +116,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 #define QCOM_SCM_MP_VIDEO_VAR 0x08 #define QCOM_SCM_MP_ASSIGN 0x16 +#define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c +#define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d +#define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e #define QCOM_SCM_SVC_OCMEM 0x0f #define QCOM_SCM_OCMEM_LOCK_CMD 0x01 @@ -138,6 +144,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_WAITQ_RESUME 0x02 #define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03 +#define QCOM_SCM_SVC_GPU 0x28 +#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01 + /* common error codes */ #define QCOM_SCM_V2_EBUSY -12 #define QCOM_SCM_ENOMEM -5 diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c new file mode 100644 index 000000000000..17948cfc82e7 --- /dev/null +++ b/drivers/firmware/qcom/qcom_tzmem.c @@ -0,0 +1,469 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Memory allocator for buffers shared with the TrustZone. + * + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/firmware/qcom/qcom_tzmem.h> +#include <linux/genalloc.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/radix-tree.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "qcom_tzmem.h" + +struct qcom_tzmem_area { + struct list_head list; + void *vaddr; + dma_addr_t paddr; + size_t size; + void *priv; +}; + +struct qcom_tzmem_pool { + struct gen_pool *genpool; + struct list_head areas; + enum qcom_tzmem_policy policy; + size_t increment; + size_t max_size; + spinlock_t lock; +}; + +struct qcom_tzmem_chunk { + phys_addr_t paddr; + size_t size; + struct qcom_tzmem_pool *owner; +}; + +static struct device *qcom_tzmem_dev; +static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC); +static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock); + +#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC) + +static int qcom_tzmem_init(void) +{ + return 0; +} + +static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) +{ + return 0; +} + +static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) +{ + +} + +#elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE) + +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/of.h> + +#define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9 + +static bool qcom_tzmem_using_shm_bridge; + +/* List of machines that are known to not support SHM bridge correctly. */ +static const char *const qcom_tzmem_blacklist[] = { + "qcom,sc8180x", + "qcom,sdm845", /* reset in rmtfs memory assignment */ + "qcom,sm8150", /* reset in rmtfs memory assignment */ + NULL +}; + +static int qcom_tzmem_init(void) +{ + const char *const *platform; + int ret; + + for (platform = qcom_tzmem_blacklist; *platform; platform++) { + if (of_machine_is_compatible(*platform)) + goto notsupp; + } + + ret = qcom_scm_shm_bridge_enable(); + if (ret == -EOPNOTSUPP) + goto notsupp; + + if (!ret) + qcom_tzmem_using_shm_bridge = true; + + return ret; + +notsupp: + dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n"); + return 0; +} + +static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) +{ + u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags; + int ret; + + if (!qcom_tzmem_using_shm_bridge) + return 0; + + pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; + ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; + size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT); + + u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm, + ipfn_and_s_perm, size_and_flags, + QCOM_SCM_VMID_HLOS, handle); + if (ret) + return ret; + + area->priv = no_free_ptr(handle); + + return 0; +} + +static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) +{ + u64 *handle = area->priv; + + if (!qcom_tzmem_using_shm_bridge) + return; + + qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle); + kfree(handle); +} + +#endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */ + +static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool, + size_t size, gfp_t gfp) +{ + int ret; + + struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area), + gfp); + if (!area) + return -ENOMEM; + + area->size = PAGE_ALIGN(size); + + area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size, + &area->paddr, gfp); + if (!area->vaddr) + return -ENOMEM; + + ret = qcom_tzmem_init_area(area); + if (ret) { + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + return ret; + } + + ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr, + (phys_addr_t)area->paddr, size, -1); + if (ret) { + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + return ret; + } + + scoped_guard(spinlock_irqsave, &pool->lock) + list_add_tail(&area->list, &pool->areas); + + area = NULL; + return 0; +} + +/** + * qcom_tzmem_pool_new() - Create a new TZ memory pool. + * @config: Pool configuration. + * + * Create a new pool of memory suitable for sharing with the TrustZone. + * + * Must not be used in atomic context. + * + * Return: New memory pool address or ERR_PTR() on error. + */ +struct qcom_tzmem_pool * +qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config) +{ + int ret = -ENOMEM; + + might_sleep(); + + switch (config->policy) { + case QCOM_TZMEM_POLICY_STATIC: + if (!config->initial_size) + return ERR_PTR(-EINVAL); + break; + case QCOM_TZMEM_POLICY_MULTIPLIER: + if (!config->increment) + return ERR_PTR(-EINVAL); + break; + case QCOM_TZMEM_POLICY_ON_DEMAND: + break; + default: + return ERR_PTR(-EINVAL); + } + + struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool), + GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + + pool->genpool = gen_pool_create(PAGE_SHIFT, -1); + if (!pool->genpool) + return ERR_PTR(-ENOMEM); + + gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL); + + pool->policy = config->policy; + pool->increment = config->increment; + pool->max_size = config->max_size; + INIT_LIST_HEAD(&pool->areas); + spin_lock_init(&pool->lock); + + if (config->initial_size) { + ret = qcom_tzmem_pool_add_memory(pool, config->initial_size, + GFP_KERNEL); + if (ret) { + gen_pool_destroy(pool->genpool); + return ERR_PTR(ret); + } + } + + return_ptr(pool); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new); + +/** + * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources. + * @pool: Memory pool to free. + * + * Must not be called if any of the allocated chunks has not been freed. + * Must not be used in atomic context. + */ +void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool) +{ + struct qcom_tzmem_area *area, *next; + struct qcom_tzmem_chunk *chunk; + struct radix_tree_iter iter; + bool non_empty = false; + void __rcu **slot; + + might_sleep(); + + if (!pool) + return; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { + radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) { + chunk = radix_tree_deref_slot_protected(slot, + &qcom_tzmem_chunks_lock); + + if (chunk->owner == pool) + non_empty = true; + } + } + + WARN(non_empty, "Freeing TZ memory pool with memory still allocated"); + + list_for_each_entry_safe(area, next, &pool->areas, list) { + list_del(&area->list); + qcom_tzmem_cleanup_area(area); + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + kfree(area); + } + + gen_pool_destroy(pool->genpool); + kfree(pool); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free); + +static void devm_qcom_tzmem_pool_free(void *data) +{ + struct qcom_tzmem_pool *pool = data; + + qcom_tzmem_pool_free(pool); +} + +/** + * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new(). + * @dev: Device managing this resource. + * @config: Pool configuration. + * + * Must not be used in atomic context. + * + * Return: Address of the managed pool or ERR_PTR() on failure. + */ +struct qcom_tzmem_pool * +devm_qcom_tzmem_pool_new(struct device *dev, + const struct qcom_tzmem_pool_config *config) +{ + struct qcom_tzmem_pool *pool; + int ret; + + pool = qcom_tzmem_pool_new(config); + if (IS_ERR(pool)) + return pool; + + ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool); + if (ret) + return ERR_PTR(ret); + + return pool; +} +EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new); + +static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool, + size_t requested, gfp_t gfp) +{ + size_t current_size = gen_pool_size(pool->genpool); + + if (pool->max_size && (current_size + requested) > pool->max_size) + return false; + + switch (pool->policy) { + case QCOM_TZMEM_POLICY_STATIC: + return false; + case QCOM_TZMEM_POLICY_MULTIPLIER: + requested = current_size * pool->increment; + break; + case QCOM_TZMEM_POLICY_ON_DEMAND: + break; + } + + return !qcom_tzmem_pool_add_memory(pool, requested, gfp); +} + +/** + * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ. + * @pool: TZ memory pool from which to allocate memory. + * @size: Number of bytes to allocate. + * @gfp: GFP flags. + * + * Can be used in any context. + * + * Return: + * Address of the allocated buffer or NULL if no more memory can be allocated. + * The buffer must be released using qcom_tzmem_free(). + */ +void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp) +{ + unsigned long vaddr; + int ret; + + if (!size) + return NULL; + + size = PAGE_ALIGN(size); + + struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk), + gfp); + if (!chunk) + return NULL; + +again: + vaddr = gen_pool_alloc(pool->genpool, size); + if (!vaddr) { + if (qcom_tzmem_try_grow_pool(pool, size, gfp)) + goto again; + + return NULL; + } + + chunk->paddr = gen_pool_virt_to_phys(pool->genpool, vaddr); + chunk->size = size; + chunk->owner = pool; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { + ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk); + if (ret) { + gen_pool_free(pool->genpool, vaddr, size); + return NULL; + } + + chunk = NULL; + } + + return (void *)vaddr; +} +EXPORT_SYMBOL_GPL(qcom_tzmem_alloc); + +/** + * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool. + * @vaddr: Virtual address of the buffer. + * + * Can be used in any context. + */ +void qcom_tzmem_free(void *vaddr) +{ + struct qcom_tzmem_chunk *chunk; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) + chunk = radix_tree_delete_item(&qcom_tzmem_chunks, + (unsigned long)vaddr, NULL); + + if (!chunk) { + WARN(1, "Virtual address %p not owned by TZ memory allocator", + vaddr); + return; + } + + scoped_guard(spinlock_irqsave, &chunk->owner->lock) + gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr, + chunk->size); + kfree(chunk); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_free); + +/** + * qcom_tzmem_to_phys() - Map the virtual address of a TZ buffer to physical. + * @vaddr: Virtual address of the buffer allocated from a TZ memory pool. + * + * Can be used in any context. The address must have been returned by a call + * to qcom_tzmem_alloc(). + * + * Returns: Physical address of the buffer. + */ +phys_addr_t qcom_tzmem_to_phys(void *vaddr) +{ + struct qcom_tzmem_chunk *chunk; + + guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock); + + chunk = radix_tree_lookup(&qcom_tzmem_chunks, (unsigned long)vaddr); + if (!chunk) + return 0; + + return chunk->paddr; +} +EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys); + +int qcom_tzmem_enable(struct device *dev) +{ + if (qcom_tzmem_dev) + return -EBUSY; + + qcom_tzmem_dev = dev; + + return qcom_tzmem_init(); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_enable); + +MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers"); +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/qcom/qcom_tzmem.h b/drivers/firmware/qcom/qcom_tzmem.h new file mode 100644 index 000000000000..8fa8a3eb940e --- /dev/null +++ b/drivers/firmware/qcom/qcom_tzmem.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#ifndef __QCOM_TZMEM_PRIV_H +#define __QCOM_TZMEM_PRIV_H + +struct device; + +int qcom_tzmem_enable(struct device *dev); + +#endif /* __QCOM_TZMEM_PRIV_H */ diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index 880ffcb50088..921f61507ae8 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -101,8 +101,10 @@ static __init struct device *sysfb_parent_dev(const struct screen_info *si) if (IS_ERR(pdev)) { return ERR_CAST(pdev); } else if (pdev) { - if (!sysfb_pci_dev_is_enabled(pdev)) + if (!sysfb_pci_dev_is_enabled(pdev)) { + pci_dev_put(pdev); return ERR_PTR(-ENODEV); + } return &pdev->dev; } @@ -137,7 +139,7 @@ static __init int sysfb_init(void) if (compatible) { pd = sysfb_create_simplefb(si, &mode, parent); if (!IS_ERR(pd)) - goto unlock_mutex; + goto put_device; } /* if the FB is incompatible, create a legacy framebuffer device */ @@ -155,7 +157,7 @@ static __init int sysfb_init(void) pd = platform_device_alloc(name, 0); if (!pd) { ret = -ENOMEM; - goto unlock_mutex; + goto put_device; } pd->dev.parent = parent; @@ -170,9 +172,11 @@ static __init int sysfb_init(void) if (ret) goto err; - goto unlock_mutex; + goto put_device; err: platform_device_put(pd); +put_device: + put_device(parent); unlock_mutex: mutex_unlock(&disable_lock); return ret; diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index ef3a8214d002..5846c60220f5 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -4,7 +4,7 @@ * * Communication protocol with TI SCI hardware * The system works in a message response protocol - * See: http://processors.wiki.ti.com/index.php/TISCI for details + * See: https://software-dl.ti.com/tisci/esd/latest/index.html for details * * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ */ diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 31d962cdd6eb..3e7f186d239a 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -2,7 +2,7 @@ /* * Turris Mox rWTM firmware driver * - * Copyright (C) 2019 Marek Behún <kabel@kernel.org> + * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org> */ #include <linux/armada-37xx-rwtm-mailbox.h> @@ -174,6 +174,9 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data) struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev); struct armada_37xx_rwtm_rx_msg *msg = data; + if (completion_done(&rwtm->cmd_done)) + return; + rwtm->reply = *msg; complete(&rwtm->cmd_done); } @@ -199,9 +202,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval); if (ret == -ENODATA) { @@ -235,9 +237,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval); if (ret == -ENODATA) { @@ -274,9 +275,8 @@ static int check_get_random_support(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); } @@ -499,6 +499,7 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rwtm); mutex_init(&rwtm->busy); + init_completion(&rwtm->cmd_done); rwtm->mbox_client.dev = dev; rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback; @@ -512,8 +513,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) goto remove_files; } - init_completion(&rwtm->cmd_done); - ret = mox_get_board_info(rwtm); if (ret < 0) dev_warn(dev, "Cannot read board information: %i\n", ret); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 9bc45357e1a8..add8acf66a9c 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -41,9 +41,6 @@ /* IOCTL/QUERY feature payload size */ #define FEATURE_PAYLOAD_SIZE 2 -/* Firmware feature check version mask */ -#define FIRMWARE_VERSION_MASK GENMASK(15, 0) - static bool feature_check_enabled; static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); static u32 ioctl_features[FEATURE_PAYLOAD_SIZE]; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 1c28a48915bb..58f43bcced7c 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -219,7 +219,7 @@ config GPIO_BCM_XGS_IPROC config GPIO_BRCMSTB tristate "BRCMSTB GPIO support" default y if (ARCH_BRCMSTB || BMIPS_GENERIC) - depends on OF_GPIO && (ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST) + depends on OF_GPIO && (ARCH_BRCMSTB || ARCH_BCM2835 || BMIPS_GENERIC || COMPILE_TEST) select GPIO_GENERIC select IRQ_DOMAIN help @@ -1891,4 +1891,35 @@ config GPIO_SIM endmenu +menu "GPIO Debugging utilities" + +config GPIO_SLOPPY_LOGIC_ANALYZER + tristate "Sloppy GPIO logic analyzer" + depends on (GPIOLIB || COMPILE_TEST) && CPUSETS && DEBUG_FS && EXPERT + help + This option enables support for a sloppy logic analyzer using polled + GPIOs. Use the 'tools/gpio/gpio-sloppy-logic-analyzer' script with + this driver. The script will make it easier to use and will also + isolate a CPU for the polling task. Note that this is a last resort + analyzer which can be affected by latencies, non-deterministic code + paths, or NMIs. However, for e.g. remote development, it may be useful + to get a first view and aid further debugging. + + If this driver is built as a module it will be called + 'gpio-sloppy-logic-analyzer'. + +config GPIO_VIRTUSER + tristate "GPIO Virtual User Testing Module" + select DEBUG_FS + select CONFIGFS_FS + select IRQ_WORK + help + Say Y here to enable the configurable, configfs-based virtual GPIO + consumer testing driver. + + This driver is aimed as a helper in spotting any regressions in + hot-unplug handling in GPIOLIB. + +endmenu + endif diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index e2a53013780e..64dd6d9d730d 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -150,6 +150,7 @@ obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o obj-$(CONFIG_GPIO_SIM) += gpio-sim.o obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o +obj-$(CONFIG_GPIO_SLOPPY_LOGIC_ANALYZER) += gpio-sloppy-logic-analyzer.o obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o @@ -181,6 +182,7 @@ obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o +obj-$(CONFIG_GPIO_VIRTUSER) += gpio-virtuser.o obj-$(CONFIG_GPIO_VIRTIO) += gpio-virtio.o obj-$(CONFIG_GPIO_VISCONTI) += gpio-visconti.o obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c index 6f3ded619c8b..3377667a28de 100644 --- a/drivers/gpio/gpio-amd8111.c +++ b/drivers/gpio/gpio-amd8111.c @@ -195,8 +195,10 @@ static int __init amd_gpio_init(void) found: err = pci_read_config_dword(pdev, 0x58, &gp.pmbase); - if (err) + if (err) { + err = pcibios_err_to_errno(err); goto out; + } err = -EIO; gp.pmbase &= 0x0000FF00; if (gp.pmbase == 0) diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index f0c0c0f77eb0..6211d99a5770 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -273,8 +273,6 @@ static int ath79_gpio_probe(struct platform_device *pdev) dev_err(dev, "bgpio_init failed\n"); return err; } - /* Use base 0 to stay compatible with legacy platforms */ - ctrl->gc.base = 0; /* Optional interrupt setup */ if (!np || of_property_read_bool(np, "interrupt-controller")) { diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c index 94f6fefc011b..5fb357d7b78a 100644 --- a/drivers/gpio/gpio-mc33880.c +++ b/drivers/gpio/gpio-mc33880.c @@ -99,7 +99,7 @@ static int mc33880_probe(struct spi_device *spi) mc->spi = spi; - mc->chip.label = DRIVER_NAME, + mc->chip.label = DRIVER_NAME; mc->chip.set = mc33880_set; mc->chip.base = pdata->base; mc->chip.ngpio = PIN_NUMBER; diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c index 71e1af7c2184..d89e78f0ead3 100644 --- a/drivers/gpio/gpio-mmio.c +++ b/drivers/gpio/gpio-mmio.c @@ -619,8 +619,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, ret = gpiochip_get_ngpios(gc, dev); if (ret) gc->ngpio = gc->bgpio_bits; - else - gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8)); ret = bgpio_setup_io(gc, dat, set, clr, flags); if (ret) diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 732a6964748c..8baf3edd5274 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -1315,6 +1315,7 @@ static const struct of_device_id pca953x_dt_ids[] = { { .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), }, { .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), }, { .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), }, + { .compatible = "ti,tca9535", .data = OF_953X(16, PCA_INT), }, { .compatible = "ti,tca9538", .data = OF_953X( 8, PCA_INT), }, { .compatible = "ti,tca9539", .data = OF_953X(16, PCA_INT), }, diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c index 01ed2517e9fd..ec7fb9220a47 100644 --- a/drivers/gpio/gpio-rdc321x.c +++ b/drivers/gpio/gpio-rdc321x.c @@ -102,7 +102,7 @@ static int rdc_gpio_config(struct gpio_chip *chip, unlock: spin_unlock(&gpch->lock); - return err; + return pcibios_err_to_errno(err); } /* configure GPIO pin as input */ @@ -170,13 +170,13 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) rdc321x_gpio_dev->reg1_data_base, &rdc321x_gpio_dev->data_reg[0]); if (err) - return err; + return pcibios_err_to_errno(err); err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, rdc321x_gpio_dev->reg2_data_base, &rdc321x_gpio_dev->data_reg[1]); if (err) - return err; + return pcibios_err_to_errno(err); dev_info(&pdev->dev, "registering %d GPIOs\n", rdc321x_gpio_dev->chip.ngpio); diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 2ed5cbe7c8a8..dcca1d7f173e 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/array_size.h> #include <linux/bitmap.h> #include <linux/cleanup.h> #include <linux/completion.h> @@ -20,7 +21,6 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irq_sim.h> -#include <linux/kernel.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/minmax.h> @@ -227,6 +227,27 @@ static void gpio_sim_free(struct gpio_chip *gc, unsigned int offset) } } +static int gpio_sim_irq_requested(struct irq_domain *domain, + irq_hw_number_t hwirq, void *data) +{ + struct gpio_sim_chip *chip = data; + + return gpiochip_lock_as_irq(&chip->gc, hwirq); +} + +static void gpio_sim_irq_released(struct irq_domain *domain, + irq_hw_number_t hwirq, void *data) +{ + struct gpio_sim_chip *chip = data; + + gpiochip_unlock_as_irq(&chip->gc, hwirq); +} + +static const struct irq_sim_ops gpio_sim_irq_sim_ops = { + .irq_sim_irq_requested = gpio_sim_irq_requested, + .irq_sim_irq_released = gpio_sim_irq_released, +}; + static void gpio_sim_dbg_show(struct seq_file *seq, struct gpio_chip *gc) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); @@ -308,13 +329,6 @@ static ssize_t gpio_sim_sysfs_pull_store(struct device *dev, return len; } -static void gpio_sim_mutex_destroy(void *data) -{ - struct mutex *lock = data; - - mutex_destroy(lock); -} - static void gpio_sim_put_device(void *data) { struct device *dev = data; @@ -450,7 +464,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) if (!chip->pull_map) return -ENOMEM; - chip->irq_sim = devm_irq_domain_create_sim(dev, swnode, num_lines); + chip->irq_sim = devm_irq_domain_create_sim_full(dev, swnode, num_lines, + &gpio_sim_irq_sim_ops, + chip); if (IS_ERR(chip->irq_sim)) return PTR_ERR(chip->irq_sim); @@ -458,9 +474,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) if (ret) return ret; - mutex_init(&chip->lock); - ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy, - &chip->lock); + ret = devm_mutex_init(dev, &chip->lock); if (ret) return ret; @@ -581,19 +595,19 @@ static int gpio_sim_bus_notifier_call(struct notifier_block *nb, snprintf(devname, sizeof(devname), "gpio-sim.%u", simdev->id); - if (strcmp(dev_name(dev), devname) == 0) { - if (action == BUS_NOTIFY_BOUND_DRIVER) - simdev->driver_bound = true; - else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND) - simdev->driver_bound = false; - else - return NOTIFY_DONE; + if (!device_match_name(dev, devname)) + return NOTIFY_DONE; - complete(&simdev->probe_completion); - return NOTIFY_OK; - } + if (action == BUS_NOTIFY_BOUND_DRIVER) + simdev->driver_bound = true; + else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND) + simdev->driver_bound = false; + else + return NOTIFY_DONE; + + complete(&simdev->probe_completion); - return NOTIFY_DONE; + return NOTIFY_OK; } static struct gpio_sim_device *to_gpio_sim_device(struct config_item *item) diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c new file mode 100644 index 000000000000..aed6d1f6cfc3 --- /dev/null +++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Sloppy logic analyzer using GPIOs (to be run on an isolated CPU) + * + * Use the 'gpio-sloppy-logic-analyzer' script in the 'tools/gpio' folder for + * easier usage and further documentation. Note that this is a last resort + * analyzer which can be affected by latencies and non-deterministic code + * paths. However, for e.g. remote development, it may be useful to get a first + * view and aid further debugging. + * + * Copyright (C) Wolfram Sang <wsa@sang-engineering.com> + * Copyright (C) Renesas Electronics Corporation + */ + +#include <linux/ctype.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/init.h> +#include <linux/ktime.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/slab.h> +#include <linux/sizes.h> +#include <linux/timekeeping.h> +#include <linux/types.h> +#include <linux/vmalloc.h> + +#define GPIO_LA_NAME "gpio-sloppy-logic-analyzer" +#define GPIO_LA_DEFAULT_BUF_SIZE SZ_256K +/* can be increased but then we need to extend the u8 buffers */ +#define GPIO_LA_MAX_PROBES 8 +#define GPIO_LA_NUM_TESTS 1024 + +struct gpio_la_poll_priv { + struct mutex blob_lock; /* serialize access to the blob (data) */ + u32 buf_idx; + struct gpio_descs *descs; + unsigned long delay_ns; + unsigned long acq_delay; + struct debugfs_blob_wrapper blob; + struct dentry *debug_dir; + struct dentry *blob_dent; + struct debugfs_blob_wrapper meta; + struct device *dev; + unsigned int trig_len; + u8 *trig_data; +}; + +static struct dentry *gpio_la_poll_debug_dir; + +static __always_inline int gpio_la_get_array(struct gpio_descs *d, unsigned long *sptr) +{ + int ret; + + ret = gpiod_get_array_value(d->ndescs, d->desc, d->info, sptr); + if (ret == 0 && fatal_signal_pending(current)) + ret = -EINTR; + + return ret; +} + +static int fops_capture_set(void *data, u64 val) +{ + struct gpio_la_poll_priv *priv = data; + u8 *la_buf = priv->blob.data; + unsigned long state = 0; /* zeroed because GPIO arrays are bitfields */ + unsigned long delay; + ktime_t start_time; + unsigned int i; + int ret; + + if (!val) + return 0; + + if (!la_buf) + return -ENOMEM; + + if (!priv->delay_ns) + return -EINVAL; + + mutex_lock(&priv->blob_lock); + if (priv->blob_dent) { + debugfs_remove(priv->blob_dent); + priv->blob_dent = NULL; + } + + priv->buf_idx = 0; + + local_irq_disable(); + preempt_disable_notrace(); + + /* Measure delay of reading GPIOs */ + start_time = ktime_get(); + for (i = 0; i < GPIO_LA_NUM_TESTS; i++) { + ret = gpio_la_get_array(priv->descs, &state); + if (ret) + goto out; + } + + priv->acq_delay = ktime_sub(ktime_get(), start_time) / GPIO_LA_NUM_TESTS; + if (priv->delay_ns < priv->acq_delay) { + ret = -ERANGE; + goto out; + } + + delay = priv->delay_ns - priv->acq_delay; + + /* Wait for triggers */ + for (i = 0; i < priv->trig_len; i += 2) { + do { + ret = gpio_la_get_array(priv->descs, &state); + if (ret) + goto out; + + ndelay(delay); + } while ((state & priv->trig_data[i]) != priv->trig_data[i + 1]); + } + + /* With triggers, final state is also the first sample */ + if (priv->trig_len) + la_buf[priv->buf_idx++] = state; + + /* Sample */ + while (priv->buf_idx < priv->blob.size) { + ret = gpio_la_get_array(priv->descs, &state); + if (ret) + goto out; + + la_buf[priv->buf_idx++] = state; + ndelay(delay); + } +out: + preempt_enable_notrace(); + local_irq_enable(); + if (ret) + dev_err(priv->dev, "couldn't read GPIOs: %d\n", ret); + + kfree(priv->trig_data); + priv->trig_data = NULL; + priv->trig_len = 0; + + priv->blob_dent = debugfs_create_blob("sample_data", 0400, priv->debug_dir, &priv->blob); + mutex_unlock(&priv->blob_lock); + + return ret; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_capture, NULL, fops_capture_set, "%llu\n"); + +static int fops_buf_size_get(void *data, u64 *val) +{ + struct gpio_la_poll_priv *priv = data; + + *val = priv->blob.size; + + return 0; +} + +static int fops_buf_size_set(void *data, u64 val) +{ + struct gpio_la_poll_priv *priv = data; + int ret = 0; + void *p; + + if (!val) + return -EINVAL; + + mutex_lock(&priv->blob_lock); + + vfree(priv->blob.data); + p = vzalloc(val); + if (!p) { + val = 0; + ret = -ENOMEM; + } + + priv->blob.data = p; + priv->blob.size = val; + + mutex_unlock(&priv->blob_lock); + return ret; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_buf_size, fops_buf_size_get, fops_buf_size_set, "%llu\n"); + +static int trigger_open(struct inode *inode, struct file *file) +{ + return single_open(file, NULL, inode->i_private); +} + +static ssize_t trigger_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *offset) +{ + struct seq_file *m = file->private_data; + struct gpio_la_poll_priv *priv = m->private; + char *buf; + + /* upper limit is arbitrary but should be less than PAGE_SIZE */ + if (count > 2048 || count & 1) + return -EINVAL; + + buf = memdup_user(ubuf, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + priv->trig_data = buf; + priv->trig_len = count; + + return count; +} + +static const struct file_operations fops_trigger = { + .owner = THIS_MODULE, + .open = trigger_open, + .write = trigger_write, + .llseek = no_llseek, + .release = single_release, +}; + +static int gpio_la_poll_probe(struct platform_device *pdev) +{ + struct gpio_la_poll_priv *priv; + struct device *dev = &pdev->dev; + const char *devname = dev_name(dev); + const char *gpio_names[GPIO_LA_MAX_PROBES]; + char *meta = NULL; + unsigned int i, meta_len = 0; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + devm_mutex_init(dev, &priv->blob_lock); + + fops_buf_size_set(priv, GPIO_LA_DEFAULT_BUF_SIZE); + + priv->descs = devm_gpiod_get_array(dev, "probe", GPIOD_IN); + if (IS_ERR(priv->descs)) + return PTR_ERR(priv->descs); + + /* artificial limit to keep 1 byte per sample for now */ + if (priv->descs->ndescs > GPIO_LA_MAX_PROBES) + return -EFBIG; + + ret = device_property_read_string_array(dev, "probe-names", gpio_names, + priv->descs->ndescs); + if (ret >= 0 && ret != priv->descs->ndescs) + ret = -EBADR; + if (ret < 0) + return dev_err_probe(dev, ret, "error naming the GPIOs"); + + for (i = 0; i < priv->descs->ndescs; i++) { + unsigned int add_len; + char *new_meta, *consumer_name; + + if (gpiod_cansleep(priv->descs->desc[i])) + return -EREMOTE; + + consumer_name = kasprintf(GFP_KERNEL, "%s: %s", devname, gpio_names[i]); + if (!consumer_name) + return -ENOMEM; + gpiod_set_consumer_name(priv->descs->desc[i], consumer_name); + kfree(consumer_name); + + /* '10' is length of 'probe00=\n\0' */ + add_len = strlen(gpio_names[i]) + 10; + + new_meta = devm_krealloc(dev, meta, meta_len + add_len, GFP_KERNEL); + if (!new_meta) + return -ENOMEM; + + meta = new_meta; + meta_len += snprintf(meta + meta_len, add_len, "probe%02u=%s\n", + i + 1, gpio_names[i]); + } + + platform_set_drvdata(pdev, priv); + priv->dev = dev; + + priv->meta.data = meta; + priv->meta.size = meta_len; + priv->debug_dir = debugfs_create_dir(devname, gpio_la_poll_debug_dir); + debugfs_create_blob("meta_data", 0400, priv->debug_dir, &priv->meta); + debugfs_create_ulong("delay_ns", 0600, priv->debug_dir, &priv->delay_ns); + debugfs_create_ulong("delay_ns_acquisition", 0400, priv->debug_dir, &priv->acq_delay); + debugfs_create_file_unsafe("buf_size", 0600, priv->debug_dir, priv, &fops_buf_size); + debugfs_create_file_unsafe("capture", 0200, priv->debug_dir, priv, &fops_capture); + debugfs_create_file_unsafe("trigger", 0200, priv->debug_dir, priv, &fops_trigger); + + return 0; +} + +static void gpio_la_poll_remove(struct platform_device *pdev) +{ + struct gpio_la_poll_priv *priv = platform_get_drvdata(pdev); + + mutex_lock(&priv->blob_lock); + debugfs_remove_recursive(priv->debug_dir); + mutex_unlock(&priv->blob_lock); +} + +static const struct of_device_id gpio_la_poll_of_match[] = { + { .compatible = GPIO_LA_NAME }, + { } +}; +MODULE_DEVICE_TABLE(of, gpio_la_poll_of_match); + +static struct platform_driver gpio_la_poll_device_driver = { + .probe = gpio_la_poll_probe, + .remove_new = gpio_la_poll_remove, + .driver = { + .name = GPIO_LA_NAME, + .of_match_table = gpio_la_poll_of_match, + } +}; + +static int __init gpio_la_poll_init(void) +{ + gpio_la_poll_debug_dir = debugfs_create_dir(GPIO_LA_NAME, NULL); + + return platform_driver_register(&gpio_la_poll_device_driver); +} +/* + * Non-strict pin controllers can read GPIOs while being muxed to something else. + * To support that, we need to claim GPIOs before further pinmuxing happens. So, + * we probe early using 'late_initcall' + */ +late_initcall(gpio_la_poll_init); + +static void __exit gpio_la_poll_exit(void) +{ + platform_driver_unregister(&gpio_la_poll_device_driver); + debugfs_remove_recursive(gpio_la_poll_debug_dir); +} +module_exit(gpio_la_poll_exit); + +MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>"); +MODULE_DESCRIPTION("Sloppy logic analyzer using GPIOs"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 6e1a2581e6ae..3a90a3a1caea 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c @@ -208,6 +208,7 @@ static int syscon_gpio_probe(struct platform_device *pdev) struct syscon_gpio_priv *priv; struct device_node *np = dev->of_node; int ret; + bool use_parent_regmap = false; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -216,24 +217,28 @@ static int syscon_gpio_probe(struct platform_device *pdev) priv->data = of_device_get_match_data(dev); priv->syscon = syscon_regmap_lookup_by_phandle(np, "gpio,syscon-dev"); - if (IS_ERR(priv->syscon) && np->parent) + if (IS_ERR(priv->syscon) && np->parent) { priv->syscon = syscon_node_to_regmap(np->parent); + use_parent_regmap = true; + } if (IS_ERR(priv->syscon)) return PTR_ERR(priv->syscon); - ret = of_property_read_u32_index(np, "gpio,syscon-dev", 1, - &priv->dreg_offset); - if (ret) - dev_err(dev, "can't read the data register offset!\n"); + if (!use_parent_regmap) { + ret = of_property_read_u32_index(np, "gpio,syscon-dev", 1, + &priv->dreg_offset); + if (ret) + dev_err(dev, "can't read the data register offset!\n"); - priv->dreg_offset <<= 3; + priv->dreg_offset <<= 3; - ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2, - &priv->dir_reg_offset); - if (ret) - dev_dbg(dev, "can't read the dir register offset!\n"); + ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2, + &priv->dir_reg_offset); + if (ret) + dev_dbg(dev, "can't read the dir register offset!\n"); - priv->dir_reg_offset <<= 3; + priv->dir_reg_offset <<= 3; + } priv->chip.parent = dev; priv->chip.owner = THIS_MODULE; diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c new file mode 100644 index 000000000000..0e0d55da4f01 --- /dev/null +++ b/drivers/gpio/gpio-virtuser.c @@ -0,0 +1,1807 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Configurable virtual GPIO consumer module. + * + * Copyright (C) 2023-2024 Bartosz Golaszewski <bartosz.golaszewski@linaro.org> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/array_size.h> +#include <linux/atomic.h> +#include <linux/bitmap.h> +#include <linux/cleanup.h> +#include <linux/completion.h> +#include <linux/configfs.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/machine.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/irq_work.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/overflow.h> +#include <linux/platform_device.h> +#include <linux/printk.h> +#include <linux/property.h> +#include <linux/slab.h> +#include <linux/string_helpers.h> +#include <linux/types.h> + +#define GPIO_VIRTUSER_NAME_BUF_LEN 32 + +static DEFINE_IDA(gpio_virtuser_ida); +static struct dentry *gpio_virtuser_dbg_root; + +struct gpio_virtuser_attr_data { + union { + struct gpio_desc *desc; + struct gpio_descs *descs; + }; + struct dentry *dbgfs_dir; +}; + +struct gpio_virtuser_line_array_data { + struct gpio_virtuser_attr_data ad; +}; + +struct gpio_virtuser_line_data { + struct gpio_virtuser_attr_data ad; + char consumer[GPIO_VIRTUSER_NAME_BUF_LEN]; + struct mutex consumer_lock; + unsigned int debounce; + atomic_t irq; + atomic_t irq_count; +}; + +struct gpio_virtuser_dbgfs_attr_descr { + const char *name; + const struct file_operations *fops; +}; + +struct gpio_virtuser_irq_work_context { + struct irq_work work; + struct completion work_completion; + union { + struct { + struct gpio_desc *desc; + int dir; + int val; + int ret; + }; + struct { + struct gpio_descs *descs; + unsigned long *values; + }; + }; +}; + +static struct gpio_virtuser_irq_work_context * +to_gpio_virtuser_irq_work_context(struct irq_work *work) +{ + return container_of(work, struct gpio_virtuser_irq_work_context, work); +} + +static void +gpio_virtuser_init_irq_work_context(struct gpio_virtuser_irq_work_context *ctx) +{ + memset(ctx, 0, sizeof(*ctx)); + init_completion(&ctx->work_completion); +} + +static void +gpio_virtuser_irq_work_queue_sync(struct gpio_virtuser_irq_work_context *ctx) +{ + irq_work_queue(&ctx->work); + wait_for_completion(&ctx->work_completion); +} + +static void gpio_virtuser_dbgfs_emit_value_array(char *buf, + unsigned long *values, + size_t num_values) +{ + size_t i; + + for (i = 0; i < num_values; i++) + buf[i] = test_bit(i, values) ? '1' : '0'; + + buf[i++] = '\n'; +} + +static void gpio_virtuser_get_value_array_atomic(struct irq_work *work) +{ + struct gpio_virtuser_irq_work_context *ctx = + to_gpio_virtuser_irq_work_context(work); + struct gpio_descs *descs = ctx->descs; + + ctx->ret = gpiod_get_array_value(descs->ndescs, descs->desc, + descs->info, ctx->values); + complete(&ctx->work_completion); +} + +static int gpio_virtuser_get_array_value(struct gpio_descs *descs, + unsigned long *values, bool atomic) +{ + struct gpio_virtuser_irq_work_context ctx; + + if (!atomic) + return gpiod_get_array_value_cansleep(descs->ndescs, + descs->desc, + descs->info, values); + + gpio_virtuser_init_irq_work_context(&ctx); + ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_get_value_array_atomic); + ctx.descs = descs; + ctx.values = values; + + gpio_virtuser_irq_work_queue_sync(&ctx); + + return ctx.ret; +} + +static ssize_t gpio_virtuser_value_array_do_read(struct file *file, + char __user *user_buf, + size_t size, loff_t *ppos, + bool atomic) +{ + struct gpio_virtuser_line_data *data = file->private_data; + struct gpio_descs *descs = data->ad.descs; + size_t bufsize; + int ret; + + unsigned long *values __free(bitmap) = bitmap_zalloc(descs->ndescs, + GFP_KERNEL); + if (!values) + return -ENOMEM; + + ret = gpio_virtuser_get_array_value(descs, values, atomic); + if (ret) + return ret; + + bufsize = descs->ndescs + 2; + + char *buf __free(kfree) = kzalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + gpio_virtuser_dbgfs_emit_value_array(buf, values, descs->ndescs); + + return simple_read_from_buffer(user_buf, size, ppos, buf, + descs->ndescs + 1); +} + +static int gpio_virtuser_dbgfs_parse_value_array(const char *buf, + size_t len, + unsigned long *values) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (buf[i] == '0') + clear_bit(i, values); + else if (buf[i] == '1') + set_bit(i, values); + else + return -EINVAL; + } + + return 0; +} + +static void gpio_virtuser_set_value_array_atomic(struct irq_work *work) +{ + struct gpio_virtuser_irq_work_context *ctx = + to_gpio_virtuser_irq_work_context(work); + struct gpio_descs *descs = ctx->descs; + + ctx->ret = gpiod_set_array_value(descs->ndescs, descs->desc, + descs->info, ctx->values); + complete(&ctx->work_completion); +} + +static int gpio_virtuser_set_array_value(struct gpio_descs *descs, + unsigned long *values, bool atomic) +{ + struct gpio_virtuser_irq_work_context ctx; + + if (!atomic) + return gpiod_set_array_value_cansleep(descs->ndescs, + descs->desc, + descs->info, values); + + gpio_virtuser_init_irq_work_context(&ctx); + ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_array_atomic); + ctx.descs = descs; + ctx.values = values; + + gpio_virtuser_irq_work_queue_sync(&ctx); + + return ctx.ret; +} + +static ssize_t gpio_virtuser_value_array_do_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos, + bool atomic) +{ + struct gpio_virtuser_line_data *data = file->private_data; + struct gpio_descs *descs = data->ad.descs; + int ret; + + if (count - 1 != descs->ndescs) + return -EINVAL; + + char *buf __free(kfree) = kzalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = simple_write_to_buffer(buf, count, ppos, user_buf, count); + if (ret < 0) + return ret; + + unsigned long *values __free(bitmap) = bitmap_zalloc(descs->ndescs, + GFP_KERNEL); + if (!values) + return -ENOMEM; + + ret = gpio_virtuser_dbgfs_parse_value_array(buf, count - 1, values); + if (ret) + return ret; + + ret = gpio_virtuser_set_array_value(descs, values, atomic); + if (ret) + return ret; + + return count; +} + +static ssize_t gpio_virtuser_value_array_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return gpio_virtuser_value_array_do_read(file, user_buf, count, ppos, + false); +} + +static ssize_t gpio_virtuser_value_array_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return gpio_virtuser_value_array_do_write(file, user_buf, count, ppos, + false); +} + +static const struct file_operations gpio_virtuser_value_array_fops = { + .read = gpio_virtuser_value_array_read, + .write = gpio_virtuser_value_array_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t +gpio_virtuser_value_array_atomic_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return gpio_virtuser_value_array_do_read(file, user_buf, count, ppos, + true); +} + +static ssize_t +gpio_virtuser_value_array_atomic_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return gpio_virtuser_value_array_do_write(file, user_buf, count, ppos, + true); +} + +static const struct file_operations gpio_virtuser_value_array_atomic_fops = { + .read = gpio_virtuser_value_array_atomic_read, + .write = gpio_virtuser_value_array_atomic_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static void gpio_virtuser_do_get_direction_atomic(struct irq_work *work) +{ + struct gpio_virtuser_irq_work_context *ctx = + to_gpio_virtuser_irq_work_context(work); + + ctx->ret = gpiod_get_direction(ctx->desc); + complete(&ctx->work_completion); +} + +static int gpio_virtuser_get_direction_atomic(struct gpio_desc *desc) +{ + struct gpio_virtuser_irq_work_context ctx; + + gpio_virtuser_init_irq_work_context(&ctx); + ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_do_get_direction_atomic); + ctx.desc = desc; + + gpio_virtuser_irq_work_queue_sync(&ctx); + + return ctx.ret; +} + +static ssize_t gpio_virtuser_direction_do_read(struct file *file, + char __user *user_buf, + size_t size, loff_t *ppos, + bool atomic) +{ + struct gpio_virtuser_line_data *data = file->private_data; + struct gpio_desc *desc = data->ad.desc; + char buf[32]; + int dir; + + if (!atomic) + dir = gpiod_get_direction(desc); + else + dir = gpio_virtuser_get_direction_atomic(desc); + if (dir < 0) + return dir; + + snprintf(buf, sizeof(buf), "%s\n", dir ? "input" : "output"); + + return simple_read_from_buffer(user_buf, size, ppos, buf, strlen(buf)); +} + +static int gpio_virtuser_set_direction(struct gpio_desc *desc, int dir, int val) +{ + if (dir) + return gpiod_direction_input(desc); + + return gpiod_direction_output(desc, val); +} + +static void gpio_virtuser_do_set_direction_atomic(struct irq_work *work) +{ + struct gpio_virtuser_irq_work_context *ctx = + to_gpio_virtuser_irq_work_context(work); + + ctx->ret = gpio_virtuser_set_direction(ctx->desc, ctx->dir, ctx->val); + complete(&ctx->work_completion); +} + +static int gpio_virtuser_set_direction_atomic(struct gpio_desc *desc, + int dir, int val) +{ + struct gpio_virtuser_irq_work_context ctx; + + gpio_virtuser_init_irq_work_context(&ctx); + ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_do_set_direction_atomic); + ctx.desc = desc; + ctx.dir = dir; + ctx.val = val; + + gpio_virtuser_irq_work_queue_sync(&ctx); + + return ctx.ret; +} + +static ssize_t gpio_virtuser_direction_do_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos, + bool atomic) +{ + struct gpio_virtuser_line_data *data = file->private_data; + struct gpio_desc *desc = data->ad.desc; + char buf[32], *trimmed; + int ret, dir, val = 0; + + ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (ret < 0) + return ret; + + trimmed = strim(buf); + + if (strcmp(trimmed, "input") == 0) { + dir = 1; + } else if (strcmp(trimmed, "output-high") == 0) { + dir = 0; + val = 1; + } else if (strcmp(trimmed, "output-low") == 0) { + dir = val = 0; + } else { + return -EINVAL; + } + + if (!atomic) + ret = gpio_virtuser_set_direction(desc, dir, val); + else + ret = gpio_virtuser_set_direction_atomic(desc, dir, val); + if (ret) + return ret; + + return count; +} + +static ssize_t gpio_virtuser_direction_read(struct file *file, + char __user *user_buf, + size_t size, loff_t *ppos) +{ + return gpio_virtuser_direction_do_read(file, user_buf, size, ppos, + false); +} + +static ssize_t gpio_virtuser_direction_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return gpio_virtuser_direction_do_write(file, user_buf, count, ppos, + false); +} + +static const struct file_operations gpio_virtuser_direction_fops = { + .read = gpio_virtuser_direction_read, + .write = gpio_virtuser_direction_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t gpio_virtuser_direction_atomic_read(struct file *file, + char __user *user_buf, + size_t size, loff_t *ppos) +{ + return gpio_virtuser_direction_do_read(file, user_buf, size, ppos, + true); +} + +static ssize_t gpio_virtuser_direction_atomic_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return gpio_virtuser_direction_do_write(file, user_buf, count, ppos, + true); +} + +static const struct file_operations gpio_virtuser_direction_atomic_fops = { + .read = gpio_virtuser_direction_atomic_read, + .write = gpio_virtuser_direction_atomic_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int gpio_virtuser_value_get(void *data, u64 *val) +{ + struct gpio_virtuser_line_data *ld = data; + int ret; + + ret = gpiod_get_value_cansleep(ld->ad.desc); + if (ret < 0) + return ret; + + *val = ret; + + return 0; +} + +static int gpio_virtuser_value_set(void *data, u64 val) +{ + struct gpio_virtuser_line_data *ld = data; + + if (val > 1) + return -EINVAL; + + gpiod_set_value_cansleep(ld->ad.desc, (int)val); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_value_fops, + gpio_virtuser_value_get, + gpio_virtuser_value_set, + "%llu\n"); + +static void gpio_virtuser_get_value_atomic(struct irq_work *work) +{ + struct gpio_virtuser_irq_work_context *ctx = + to_gpio_virtuser_irq_work_context(work); + + ctx->val = gpiod_get_value(ctx->desc); + complete(&ctx->work_completion); +} + +static int gpio_virtuser_value_atomic_get(void *data, u64 *val) +{ + struct gpio_virtuser_line_data *ld = data; + struct gpio_virtuser_irq_work_context ctx; + + gpio_virtuser_init_irq_work_context(&ctx); + ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_get_value_atomic); + ctx.desc = ld->ad.desc; + + gpio_virtuser_irq_work_queue_sync(&ctx); + + if (ctx.val < 0) + return ctx.val; + + *val = ctx.val; + + return 0; +} + +static void gpio_virtuser_set_value_atomic(struct irq_work *work) +{ + struct gpio_virtuser_irq_work_context *ctx = + to_gpio_virtuser_irq_work_context(work); + + gpiod_set_value(ctx->desc, ctx->val); + complete(&ctx->work_completion); +} + +static int gpio_virtuser_value_atomic_set(void *data, u64 val) +{ + struct gpio_virtuser_line_data *ld = data; + struct gpio_virtuser_irq_work_context ctx; + + if (val > 1) + return -EINVAL; + + gpio_virtuser_init_irq_work_context(&ctx); + ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_atomic); + ctx.desc = ld->ad.desc; + ctx.val = (int)val; + + gpio_virtuser_irq_work_queue_sync(&ctx); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_value_atomic_fops, + gpio_virtuser_value_atomic_get, + gpio_virtuser_value_atomic_set, + "%llu\n"); + +static int gpio_virtuser_debounce_get(void *data, u64 *val) +{ + struct gpio_virtuser_line_data *ld = data; + + *val = READ_ONCE(ld->debounce); + + return 0; +} + +static int gpio_virtuser_debounce_set(void *data, u64 val) +{ + struct gpio_virtuser_line_data *ld = data; + int ret; + + if (val > UINT_MAX) + return -E2BIG; + + ret = gpiod_set_debounce(ld->ad.desc, (unsigned int)val); + if (ret) + /* Don't propagate errno unknown to user-space. */ + return ret == -ENOTSUPP ? -EOPNOTSUPP : ret; + + WRITE_ONCE(ld->debounce, (unsigned int)val); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_debounce_fops, + gpio_virtuser_debounce_get, + gpio_virtuser_debounce_set, + "%llu\n"); + +static ssize_t gpio_virtuser_consumer_read(struct file *file, + char __user *user_buf, + size_t size, loff_t *ppos) +{ + struct gpio_virtuser_line_data *data = file->private_data; + char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 1]; + ssize_t ret; + + memset(buf, 0x0, sizeof(buf)); + + scoped_guard(mutex, &data->consumer_lock) + ret = snprintf(buf, sizeof(buf), "%s\n", data->consumer); + + return simple_read_from_buffer(user_buf, size, ppos, buf, ret); +} + +static ssize_t gpio_virtuser_consumer_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct gpio_virtuser_line_data *data = file->private_data; + char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2]; + int ret; + + ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos, + user_buf, count); + if (ret < 0) + return ret; + + buf[strlen(buf) - 1] = '\0'; + + ret = gpiod_set_consumer_name(data->ad.desc, buf); + if (ret) + return ret; + + scoped_guard(mutex, &data->consumer_lock) + strscpy(data->consumer, buf, sizeof(data->consumer)); + + return count; +} + +static const struct file_operations gpio_virtuser_consumer_fops = { + .read = gpio_virtuser_consumer_read, + .write = gpio_virtuser_consumer_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int gpio_virtuser_interrupts_get(void *data, u64 *val) +{ + struct gpio_virtuser_line_data *ld = data; + + *val = atomic_read(&ld->irq_count); + + return 0; +} + +static irqreturn_t gpio_virtuser_irq_handler(int irq, void *data) +{ + struct gpio_virtuser_line_data *ld = data; + + atomic_inc(&ld->irq_count); + + return IRQ_HANDLED; +} + +static int gpio_virtuser_interrupts_set(void *data, u64 val) +{ + struct gpio_virtuser_line_data *ld = data; + int irq, ret; + + if (val > 1) + return -EINVAL; + + if (val) { + irq = gpiod_to_irq(ld->ad.desc); + if (irq < 0) + return irq; + + ret = request_threaded_irq(irq, NULL, + gpio_virtuser_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + ld->consumer, data); + if (ret) + return ret; + + atomic_set(&ld->irq, irq); + } else { + irq = atomic_xchg(&ld->irq, 0); + free_irq(irq, ld); + } + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_interrupts_fops, + gpio_virtuser_interrupts_get, + gpio_virtuser_interrupts_set, + "%llu\n"); + +static const struct gpio_virtuser_dbgfs_attr_descr +gpio_virtuser_line_array_dbgfs_attrs[] = { + { + .name = "values", + .fops = &gpio_virtuser_value_array_fops, + }, + { + .name = "values_atomic", + .fops = &gpio_virtuser_value_array_atomic_fops, + }, +}; + +static const struct gpio_virtuser_dbgfs_attr_descr +gpio_virtuser_line_dbgfs_attrs[] = { + { + .name = "direction", + .fops = &gpio_virtuser_direction_fops, + }, + { + .name = "direction_atomic", + .fops = &gpio_virtuser_direction_atomic_fops, + }, + { + .name = "value", + .fops = &gpio_virtuser_value_fops, + }, + { + .name = "value_atomic", + .fops = &gpio_virtuser_value_atomic_fops, + }, + { + .name = "debounce", + .fops = &gpio_virtuser_debounce_fops, + }, + { + .name = "consumer", + .fops = &gpio_virtuser_consumer_fops, + }, + { + .name = "interrupts", + .fops = &gpio_virtuser_interrupts_fops, + }, +}; + +static int gpio_virtuser_create_debugfs_attrs( + const struct gpio_virtuser_dbgfs_attr_descr *attr, + size_t num_attrs, struct dentry *parent, void *data) +{ + struct dentry *ret; + size_t i; + + for (i = 0; i < num_attrs; i++, attr++) { + ret = debugfs_create_file(attr->name, 0644, parent, data, + attr->fops); + if (IS_ERR(ret)) + return PTR_ERR(ret); + } + + return 0; +} + +static int gpio_virtuser_dbgfs_init_line_array_attrs(struct device *dev, + struct gpio_descs *descs, + const char *id, + struct dentry *dbgfs_entry) +{ + struct gpio_virtuser_line_array_data *data; + char *name; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->ad.descs = descs; + + name = devm_kasprintf(dev, GFP_KERNEL, "gpiod:%s", id); + if (!name) + return -ENOMEM; + + data->ad.dbgfs_dir = debugfs_create_dir(name, dbgfs_entry); + if (IS_ERR(data->ad.dbgfs_dir)) + return PTR_ERR(data->ad.dbgfs_dir); + + return gpio_virtuser_create_debugfs_attrs( + gpio_virtuser_line_array_dbgfs_attrs, + ARRAY_SIZE(gpio_virtuser_line_array_dbgfs_attrs), + data->ad.dbgfs_dir, data); +} + +static int gpio_virtuser_dbgfs_init_line_attrs(struct device *dev, + struct gpio_desc *desc, + const char *id, + unsigned int index, + struct dentry *dbgfs_entry) +{ + struct gpio_virtuser_line_data *data; + char *name; + int ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->ad.desc = desc; + sprintf(data->consumer, id); + atomic_set(&data->irq, 0); + atomic_set(&data->irq_count, 0); + + name = devm_kasprintf(dev, GFP_KERNEL, "gpiod:%s:%u", id, index); + if (!name) + return -ENOMEM; + + ret = devm_mutex_init(dev, &data->consumer_lock); + if (ret) + return ret; + + data->ad.dbgfs_dir = debugfs_create_dir(name, dbgfs_entry); + if (IS_ERR(data->ad.dbgfs_dir)) + return PTR_ERR(data->ad.dbgfs_dir); + + return gpio_virtuser_create_debugfs_attrs( + gpio_virtuser_line_dbgfs_attrs, + ARRAY_SIZE(gpio_virtuser_line_dbgfs_attrs), + data->ad.dbgfs_dir, data); +} + +static void gpio_virtuser_debugfs_remove(void *data) +{ + struct dentry *dbgfs_entry = data; + + debugfs_remove_recursive(dbgfs_entry); +} + +static int gpio_virtuser_prop_is_gpio(struct property *prop) +{ + char *dash = strrchr(prop->name, '-'); + + return dash && strcmp(dash, "-gpios") == 0; +} + +/* + * If this is an OF-based system, then we iterate over properties and consider + * all whose names end in "-gpios". For configfs we expect an additional string + * array property - "gpio-virtuser,ids" - containing the list of all GPIO IDs + * to request. + */ +static int gpio_virtuser_count_ids(struct device *dev) +{ + struct device_node *of_node = dev_of_node(dev); + struct property *prop; + int ret = 0; + + if (!of_node) + return device_property_string_array_count(dev, + "gpio-virtuser,ids"); + + for_each_property_of_node(of_node, prop) { + if (gpio_virtuser_prop_is_gpio(prop)) + ++ret; + } + + return ret; +} + +static int gpio_virtuser_get_ids(struct device *dev, const char **ids, + int num_ids) +{ + struct device_node *of_node = dev_of_node(dev); + struct property *prop; + size_t pos = 0, diff; + char *dash, *tmp; + + if (!of_node) + return device_property_read_string_array(dev, + "gpio-virtuser,ids", + ids, num_ids); + + for_each_property_of_node(of_node, prop) { + if (!gpio_virtuser_prop_is_gpio(prop)) + continue; + + dash = strrchr(prop->name, '-'); + diff = dash - prop->name; + + tmp = devm_kmemdup(dev, prop->name, diff + 1, + GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp[diff] = '\0'; + ids[pos++] = tmp; + } + + return 0; +} + +static int gpio_virtuser_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dentry *dbgfs_entry; + struct gpio_descs *descs; + int ret, num_ids = 0, i; + const char **ids; + unsigned int j; + + num_ids = gpio_virtuser_count_ids(dev); + if (num_ids < 0) + return dev_err_probe(dev, num_ids, + "Failed to get the number of GPIOs to request\n"); + + if (num_ids == 0) + return dev_err_probe(dev, -EINVAL, "No GPIO IDs specified\n"); + + ids = devm_kcalloc(dev, num_ids, sizeof(*ids), GFP_KERNEL); + if (!ids) + return -ENOMEM; + + ret = gpio_virtuser_get_ids(dev, ids, num_ids); + if (ret < 0) + return dev_err_probe(dev, ret, + "Failed to get the IDs of GPIOs to request\n"); + + dbgfs_entry = debugfs_create_dir(dev_name(dev), gpio_virtuser_dbg_root); + ret = devm_add_action_or_reset(dev, gpio_virtuser_debugfs_remove, + dbgfs_entry); + if (ret) + return ret; + + for (i = 0; i < num_ids; i++) { + descs = devm_gpiod_get_array(dev, ids[i], GPIOD_ASIS); + if (IS_ERR(descs)) + return dev_err_probe(dev, PTR_ERR(descs), + "Failed to request the '%s' GPIOs\n", + ids[i]); + + ret = gpio_virtuser_dbgfs_init_line_array_attrs(dev, descs, + ids[i], + dbgfs_entry); + if (ret) + return dev_err_probe(dev, ret, + "Failed to setup the debugfs array interface for the '%s' GPIOs\n", + ids[i]); + + for (j = 0; j < descs->ndescs; j++) { + ret = gpio_virtuser_dbgfs_init_line_attrs(dev, + descs->desc[j], ids[i], + j, dbgfs_entry); + if (ret) + return dev_err_probe(dev, ret, + "Failed to setup the debugfs line interface for the '%s' GPIOs\n", + ids[i]); + } + } + + return 0; +} + +static const struct of_device_id gpio_virtuser_of_match[] = { + { .compatible = "gpio-virtuser" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpio_virtuser_of_match); + +static struct platform_driver gpio_virtuser_driver = { + .driver = { + .name = "gpio-virtuser", + .of_match_table = gpio_virtuser_of_match, + }, + .probe = gpio_virtuser_probe, +}; + +struct gpio_virtuser_device { + struct config_group group; + + struct platform_device *pdev; + int id; + struct mutex lock; + + struct notifier_block bus_notifier; + struct completion probe_completion; + bool driver_bound; + + struct gpiod_lookup_table *lookup_table; + + struct list_head lookup_list; +}; + +static int gpio_virtuser_bus_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct gpio_virtuser_device *vdev; + struct device *dev = data; + char devname[32]; + + vdev = container_of(nb, struct gpio_virtuser_device, bus_notifier); + snprintf(devname, sizeof(devname), "gpio-virtuser.%d", vdev->id); + + if (!device_match_name(dev, devname)) + return NOTIFY_DONE; + + switch (action) { + case BUS_NOTIFY_BOUND_DRIVER: + vdev->driver_bound = true; + break; + case BUS_NOTIFY_DRIVER_NOT_BOUND: + vdev->driver_bound = false; + break; + default: + return NOTIFY_DONE; + } + + complete(&vdev->probe_completion); + return NOTIFY_OK; +} + +static struct gpio_virtuser_device * +to_gpio_virtuser_device(struct config_item *item) +{ + struct config_group *group = to_config_group(item); + + return container_of(group, struct gpio_virtuser_device, group); +} + +static bool +gpio_virtuser_device_is_live(struct gpio_virtuser_device *dev) +{ + lockdep_assert_held(&dev->lock); + + return !!dev->pdev; +} + +struct gpio_virtuser_lookup { + struct config_group group; + + struct gpio_virtuser_device *parent; + struct list_head siblings; + + char *con_id; + + struct list_head entry_list; +}; + +static struct gpio_virtuser_lookup * +to_gpio_virtuser_lookup(struct config_item *item) +{ + struct config_group *group = to_config_group(item); + + return container_of(group, struct gpio_virtuser_lookup, group); +} + +struct gpio_virtuser_lookup_entry { + struct config_group group; + + struct gpio_virtuser_lookup *parent; + struct list_head siblings; + + char *key; + /* Can be negative to indicate lookup by name. */ + int offset; + enum gpio_lookup_flags flags; +}; + +static struct gpio_virtuser_lookup_entry * +to_gpio_virtuser_lookup_entry(struct config_item *item) +{ + struct config_group *group = to_config_group(item); + + return container_of(group, struct gpio_virtuser_lookup_entry, group); +} + +static ssize_t +gpio_virtuser_lookup_entry_config_key_show(struct config_item *item, char *page) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + + guard(mutex)(&dev->lock); + + return sprintf(page, "%s\n", entry->key ?: ""); +} + +static ssize_t +gpio_virtuser_lookup_entry_config_key_store(struct config_item *item, + const char *page, size_t count) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + + char *key __free(kfree) = kstrndup(skip_spaces(page), count, + GFP_KERNEL); + if (!key) + return -ENOMEM; + + strim(key); + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + return -EBUSY; + + kfree(entry->key); + entry->key = no_free_ptr(key); + + return count; +} + +CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, key); + +static ssize_t +gpio_virtuser_lookup_entry_config_offset_show(struct config_item *item, + char *page) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + unsigned int offset; + + scoped_guard(mutex, &dev->lock) + offset = entry->offset; + + return sprintf(page, "%d\n", offset); +} + +static ssize_t +gpio_virtuser_lookup_entry_config_offset_store(struct config_item *item, + const char *page, size_t count) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + int offset, ret; + + ret = kstrtoint(page, 0, &offset); + if (ret) + return ret; + + /* + * Negative number here means: 'key' represents a line name to lookup. + * Non-negative means: 'key' represents the label of the chip with + * the 'offset' value representing the line within that chip. + * + * GPIOLIB uses the U16_MAX value to indicate lookup by line name so + * the greatest offset we can accept is (U16_MAX - 1). + */ + if (offset > (U16_MAX - 1)) + return -EINVAL; + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + return -EBUSY; + + entry->offset = offset; + + return count; +} + +CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, offset); + +static enum gpio_lookup_flags +gpio_virtuser_lookup_get_flags(struct config_item *item) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + + guard(mutex)(&dev->lock); + + return entry->flags; +} + +static ssize_t +gpio_virtuser_lookup_entry_config_drive_show(struct config_item *item, char *page) +{ + enum gpio_lookup_flags flags = gpio_virtuser_lookup_get_flags(item); + const char *repr; + + if (flags & GPIO_OPEN_DRAIN) + repr = "open-drain"; + else if (flags & GPIO_OPEN_SOURCE) + repr = "open-source"; + else + repr = "push-pull"; + + return sprintf(page, "%s\n", repr); +} + +static ssize_t +gpio_virtuser_lookup_entry_config_drive_store(struct config_item *item, + const char *page, size_t count) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + return -EBUSY; + + if (sysfs_streq(page, "push-pull")) { + entry->flags &= ~(GPIO_OPEN_DRAIN | GPIO_OPEN_SOURCE); + } else if (sysfs_streq(page, "open-drain")) { + entry->flags &= ~GPIO_OPEN_SOURCE; + entry->flags |= GPIO_OPEN_DRAIN; + } else if (sysfs_streq(page, "open-source")) { + entry->flags &= ~GPIO_OPEN_DRAIN; + entry->flags |= GPIO_OPEN_SOURCE; + } else { + count = -EINVAL; + } + + return count; +} + +CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, drive); + +static ssize_t +gpio_virtuser_lookup_entry_config_pull_show(struct config_item *item, char *page) +{ + enum gpio_lookup_flags flags = gpio_virtuser_lookup_get_flags(item); + const char *repr; + + if (flags & GPIO_PULL_UP) + repr = "pull-up"; + else if (flags & GPIO_PULL_DOWN) + repr = "pull-down"; + else if (flags & GPIO_PULL_DISABLE) + repr = "pull-disabled"; + else + repr = "as-is"; + + return sprintf(page, "%s\n", repr); +} + +static ssize_t +gpio_virtuser_lookup_entry_config_pull_store(struct config_item *item, + const char *page, size_t count) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + return -EBUSY; + + if (sysfs_streq(page, "pull-up")) { + entry->flags &= ~(GPIO_PULL_DOWN | GPIO_PULL_DISABLE); + entry->flags |= GPIO_PULL_UP; + } else if (sysfs_streq(page, "pull-down")) { + entry->flags &= ~(GPIO_PULL_UP | GPIO_PULL_DISABLE); + entry->flags |= GPIO_PULL_DOWN; + } else if (sysfs_streq(page, "pull-disabled")) { + entry->flags &= ~(GPIO_PULL_UP | GPIO_PULL_DOWN); + entry->flags |= GPIO_PULL_DISABLE; + } else if (sysfs_streq(page, "as-is")) { + entry->flags &= ~(GPIO_PULL_UP | GPIO_PULL_DOWN | + GPIO_PULL_DISABLE); + } else { + count = -EINVAL; + } + + return count; +} + +CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, pull); + +static ssize_t +gpio_virtuser_lookup_entry_config_active_low_show(struct config_item *item, + char *page) +{ + enum gpio_lookup_flags flags = gpio_virtuser_lookup_get_flags(item); + + return sprintf(page, "%c\n", flags & GPIO_ACTIVE_LOW ? '1' : '0'); +} + +static ssize_t +gpio_virtuser_lookup_entry_config_active_low_store(struct config_item *item, + const char *page, + size_t count) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + bool active_low; + int ret; + + ret = kstrtobool(page, &active_low); + if (ret) + return ret; + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + return -EBUSY; + + if (active_low) + entry->flags |= GPIO_ACTIVE_LOW; + else + entry->flags &= ~GPIO_ACTIVE_LOW; + + return count; +} + +CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, active_low); + +static ssize_t +gpio_virtuser_lookup_entry_config_transitory_show(struct config_item *item, + char *page) +{ + enum gpio_lookup_flags flags = gpio_virtuser_lookup_get_flags(item); + + return sprintf(page, "%c\n", flags & GPIO_TRANSITORY ? '1' : '0'); +} + +static ssize_t +gpio_virtuser_lookup_entry_config_transitory_store(struct config_item *item, + const char *page, + size_t count) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + bool transitory; + int ret; + + ret = kstrtobool(page, &transitory); + if (ret) + return ret; + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + return -EBUSY; + + if (transitory) + entry->flags |= GPIO_TRANSITORY; + else + entry->flags &= ~GPIO_TRANSITORY; + + return count; +} + +CONFIGFS_ATTR(gpio_virtuser_lookup_entry_config_, transitory); + +static struct configfs_attribute *gpio_virtuser_lookup_entry_config_attrs[] = { + &gpio_virtuser_lookup_entry_config_attr_key, + &gpio_virtuser_lookup_entry_config_attr_offset, + &gpio_virtuser_lookup_entry_config_attr_drive, + &gpio_virtuser_lookup_entry_config_attr_pull, + &gpio_virtuser_lookup_entry_config_attr_active_low, + &gpio_virtuser_lookup_entry_config_attr_transitory, + NULL +}; + +static ssize_t +gpio_virtuser_device_config_dev_name_show(struct config_item *item, + char *page) +{ + struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item); + struct platform_device *pdev; + + guard(mutex)(&dev->lock); + + pdev = dev->pdev; + if (pdev) + return sprintf(page, "%s\n", dev_name(&pdev->dev)); + + return sprintf(page, "gpio-sim.%d\n", dev->id); +} + +CONFIGFS_ATTR_RO(gpio_virtuser_device_config_, dev_name); + +static ssize_t gpio_virtuser_device_config_live_show(struct config_item *item, + char *page) +{ + struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item); + bool live; + + scoped_guard(mutex, &dev->lock) + live = gpio_virtuser_device_is_live(dev); + + return sprintf(page, "%c\n", live ? '1' : '0'); +} + +static size_t +gpio_virtuser_get_lookup_count(struct gpio_virtuser_device *dev) +{ + struct gpio_virtuser_lookup *lookup; + size_t count = 0; + + lockdep_assert_held(&dev->lock); + + list_for_each_entry(lookup, &dev->lookup_list, siblings) + count += list_count_nodes(&lookup->entry_list); + + return count; +} + +static int +gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev) +{ + size_t num_entries = gpio_virtuser_get_lookup_count(dev); + struct gpio_virtuser_lookup_entry *entry; + struct gpio_virtuser_lookup *lookup; + struct gpiod_lookup *curr; + unsigned int i = 0; + + lockdep_assert_held(&dev->lock); + + struct gpiod_lookup_table *table __free(kfree) = + kzalloc(struct_size(table, table, num_entries + 1), GFP_KERNEL); + if (!table) + return -ENOMEM; + + table->dev_id = kasprintf(GFP_KERNEL, "gpio-virtuser.%d", dev->id); + if (!table->dev_id) + return -ENOMEM; + + list_for_each_entry(lookup, &dev->lookup_list, siblings) { + list_for_each_entry(entry, &lookup->entry_list, siblings) { + curr = &table->table[i]; + + curr->con_id = lookup->con_id; + curr->idx = i; + curr->key = entry->key; + curr->chip_hwnum = entry->offset < 0 ? + U16_MAX : entry->offset; + curr->flags = entry->flags; + i++; + } + } + + gpiod_add_lookup_table(table); + dev->lookup_table = no_free_ptr(table); + + return 0; +} + +static struct fwnode_handle * +gpio_virtuser_make_device_swnode(struct gpio_virtuser_device *dev) +{ + struct property_entry properties[2]; + struct gpio_virtuser_lookup *lookup; + unsigned int i = 0; + size_t num_ids; + + memset(properties, 0, sizeof(properties)); + + num_ids = list_count_nodes(&dev->lookup_list); + char **ids __free(kfree) = kcalloc(num_ids + 1, sizeof(*ids), + GFP_KERNEL); + if (!ids) + return ERR_PTR(-ENOMEM); + + list_for_each_entry(lookup, &dev->lookup_list, siblings) + ids[i++] = lookup->con_id; + + properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN("gpio-virtuser,ids", + ids, num_ids); + + return fwnode_create_software_node(properties, NULL); +} + +static int +gpio_virtuser_device_activate(struct gpio_virtuser_device *dev) +{ + struct platform_device_info pdevinfo; + struct fwnode_handle *swnode; + struct platform_device *pdev; + int ret; + + lockdep_assert_held(&dev->lock); + + if (list_empty(&dev->lookup_list)) + return -ENODATA; + + swnode = gpio_virtuser_make_device_swnode(dev); + if (IS_ERR(swnode)) + return PTR_ERR(swnode); + + memset(&pdevinfo, 0, sizeof(pdevinfo)); + pdevinfo.name = "gpio-virtuser"; + pdevinfo.id = dev->id; + pdevinfo.fwnode = swnode; + + ret = gpio_virtuser_make_lookup_table(dev); + if (ret) { + fwnode_remove_software_node(swnode); + return ret; + } + + reinit_completion(&dev->probe_completion); + dev->driver_bound = false; + bus_register_notifier(&platform_bus_type, &dev->bus_notifier); + + pdev = platform_device_register_full(&pdevinfo); + if (IS_ERR(pdev)) { + bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier); + fwnode_remove_software_node(swnode); + return PTR_ERR(pdev); + } + + wait_for_completion(&dev->probe_completion); + bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier); + + if (!dev->driver_bound) { + platform_device_unregister(pdev); + fwnode_remove_software_node(swnode); + return -ENXIO; + } + + dev->pdev = pdev; + + return 0; +} + +static void +gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev) +{ + struct fwnode_handle *swnode; + + lockdep_assert_held(&dev->lock); + + swnode = dev_fwnode(&dev->pdev->dev); + platform_device_unregister(dev->pdev); + fwnode_remove_software_node(swnode); + dev->pdev = NULL; + gpiod_remove_lookup_table(dev->lookup_table); + kfree(dev->lookup_table); +} + +static ssize_t +gpio_virtuser_device_config_live_store(struct config_item *item, + const char *page, size_t count) +{ + struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item); + int ret = 0; + bool live; + + ret = kstrtobool(page, &live); + if (ret) + return ret; + + guard(mutex)(&dev->lock); + + if (live == gpio_virtuser_device_is_live(dev)) + return -EPERM; + + if (live) + ret = gpio_virtuser_device_activate(dev); + else + gpio_virtuser_device_deactivate(dev); + + return ret ?: count; +} + +CONFIGFS_ATTR(gpio_virtuser_device_config_, live); + +static struct configfs_attribute *gpio_virtuser_device_config_attrs[] = { + &gpio_virtuser_device_config_attr_dev_name, + &gpio_virtuser_device_config_attr_live, + NULL +}; + +static void +gpio_virtuser_lookup_entry_config_group_release(struct config_item *item) +{ + struct gpio_virtuser_lookup_entry *entry = + to_gpio_virtuser_lookup_entry(item); + struct gpio_virtuser_device *dev = entry->parent->parent; + + guard(mutex)(&dev->lock); + + list_del(&entry->siblings); + + kfree(entry->key); + kfree(entry); +} + +static struct +configfs_item_operations gpio_virtuser_lookup_entry_config_item_ops = { + .release = gpio_virtuser_lookup_entry_config_group_release, +}; + +static const struct +config_item_type gpio_virtuser_lookup_entry_config_group_type = { + .ct_item_ops = &gpio_virtuser_lookup_entry_config_item_ops, + .ct_attrs = gpio_virtuser_lookup_entry_config_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group * +gpio_virtuser_make_lookup_entry_group(struct config_group *group, + const char *name) +{ + struct gpio_virtuser_lookup *lookup = + to_gpio_virtuser_lookup(&group->cg_item); + struct gpio_virtuser_device *dev = lookup->parent; + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + return ERR_PTR(-EBUSY); + + struct gpio_virtuser_lookup_entry *entry __free(kfree) = + kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + + config_group_init_type_name(&entry->group, name, + &gpio_virtuser_lookup_entry_config_group_type); + entry->flags = GPIO_LOOKUP_FLAGS_DEFAULT; + entry->parent = lookup; + list_add_tail(&entry->siblings, &lookup->entry_list); + + return &no_free_ptr(entry)->group; +} + +static void gpio_virtuser_lookup_config_group_release(struct config_item *item) +{ + struct gpio_virtuser_lookup *lookup = to_gpio_virtuser_lookup(item); + struct gpio_virtuser_device *dev = lookup->parent; + + guard(mutex)(&dev->lock); + + list_del(&lookup->siblings); + + kfree(lookup->con_id); + kfree(lookup); +} + +static struct configfs_item_operations gpio_virtuser_lookup_config_item_ops = { + .release = gpio_virtuser_lookup_config_group_release, +}; + +static struct +configfs_group_operations gpio_virtuser_lookup_config_group_ops = { + .make_group = gpio_virtuser_make_lookup_entry_group, +}; + +static const struct config_item_type gpio_virtuser_lookup_config_group_type = { + .ct_group_ops = &gpio_virtuser_lookup_config_group_ops, + .ct_item_ops = &gpio_virtuser_lookup_config_item_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_group * +gpio_virtuser_make_lookup_group(struct config_group *group, const char *name) +{ + struct gpio_virtuser_device *dev = + to_gpio_virtuser_device(&group->cg_item); + + if (strlen(name) > (GPIO_VIRTUSER_NAME_BUF_LEN - 1)) + return ERR_PTR(-E2BIG); + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + return ERR_PTR(-EBUSY); + + struct gpio_virtuser_lookup *lookup __free(kfree) = + kzalloc(sizeof(*lookup), GFP_KERNEL); + if (!lookup) + return ERR_PTR(-ENOMEM); + + lookup->con_id = kstrdup(name, GFP_KERNEL); + if (!lookup->con_id) + return ERR_PTR(-ENOMEM); + + config_group_init_type_name(&lookup->group, name, + &gpio_virtuser_lookup_config_group_type); + INIT_LIST_HEAD(&lookup->entry_list); + lookup->parent = dev; + list_add_tail(&lookup->siblings, &dev->lookup_list); + + return &no_free_ptr(lookup)->group; +} + +static void gpio_virtuser_device_config_group_release(struct config_item *item) +{ + struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item); + + guard(mutex)(&dev->lock); + + if (gpio_virtuser_device_is_live(dev)) + gpio_virtuser_device_deactivate(dev); + + mutex_destroy(&dev->lock); + ida_free(&gpio_virtuser_ida, dev->id); + kfree(dev); +} + +static struct configfs_item_operations gpio_virtuser_device_config_item_ops = { + .release = gpio_virtuser_device_config_group_release, +}; + +static struct configfs_group_operations gpio_virtuser_device_config_group_ops = { + .make_group = gpio_virtuser_make_lookup_group, +}; + +static const struct config_item_type gpio_virtuser_device_config_group_type = { + .ct_group_ops = &gpio_virtuser_device_config_group_ops, + .ct_item_ops = &gpio_virtuser_device_config_item_ops, + .ct_attrs = gpio_virtuser_device_config_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group * +gpio_virtuser_config_make_device_group(struct config_group *group, + const char *name) +{ + struct gpio_virtuser_device *dev __free(kfree) = kzalloc(sizeof(*dev), + GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + dev->id = ida_alloc(&gpio_virtuser_ida, GFP_KERNEL); + if (dev->id < 0) + return ERR_PTR(dev->id); + + config_group_init_type_name(&dev->group, name, + &gpio_virtuser_device_config_group_type); + mutex_init(&dev->lock); + INIT_LIST_HEAD(&dev->lookup_list); + dev->bus_notifier.notifier_call = gpio_virtuser_bus_notifier_call; + init_completion(&dev->probe_completion); + + return &no_free_ptr(dev)->group; +} + +static struct configfs_group_operations gpio_virtuser_config_group_ops = { + .make_group = gpio_virtuser_config_make_device_group, +}; + +static const struct config_item_type gpio_virtuser_config_type = { + .ct_group_ops = &gpio_virtuser_config_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem gpio_virtuser_config_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "gpio-virtuser", + .ci_type = &gpio_virtuser_config_type, + }, + }, +}; + +static int __init gpio_virtuser_init(void) +{ + int ret; + + ret = platform_driver_register(&gpio_virtuser_driver); + if (ret) { + pr_err("Failed to register the platform driver: %d\n", ret); + return ret; + } + + config_group_init(&gpio_virtuser_config_subsys.su_group); + mutex_init(&gpio_virtuser_config_subsys.su_mutex); + ret = configfs_register_subsystem(&gpio_virtuser_config_subsys); + if (ret) { + pr_err("Failed to register the '%s' configfs subsystem: %d\n", + gpio_virtuser_config_subsys.su_group.cg_item.ci_namebuf, + ret); + goto err_plat_drv_unreg; + } + + gpio_virtuser_dbg_root = debugfs_create_dir("gpio-virtuser", NULL); + if (IS_ERR(gpio_virtuser_dbg_root)) { + ret = PTR_ERR(gpio_virtuser_dbg_root); + pr_err("Failed to create the debugfs tree: %d\n", ret); + goto err_configfs_unreg; + } + + return 0; + +err_configfs_unreg: + configfs_unregister_subsystem(&gpio_virtuser_config_subsys); +err_plat_drv_unreg: + mutex_destroy(&gpio_virtuser_config_subsys.su_mutex); + platform_driver_unregister(&gpio_virtuser_driver); + + return ret; +} +module_init(gpio_virtuser_init); + +static void __exit gpio_virtuser_exit(void) +{ + configfs_unregister_subsystem(&gpio_virtuser_config_subsys); + mutex_destroy(&gpio_virtuser_config_subsys.su_mutex); + platform_driver_unregister(&gpio_virtuser_driver); + debugfs_remove_recursive(gpio_virtuser_dbg_root); +} +module_exit(gpio_virtuser_exit); + +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_DESCRIPTION("Virtual GPIO consumer module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index bb063b81cee6..69cd2be9c7f3 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -976,7 +976,7 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int int i; /* Try first from _DSD */ - for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { + for (i = 0; i < gpio_suffix_count; i++) { if (con_id) { snprintf(propname, sizeof(propname), "%s-%s", con_id, gpio_suffixes[i]); @@ -1453,7 +1453,7 @@ int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) unsigned int i; /* Try first from _DSD */ - for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { + for (i = 0; i < gpio_suffix_count; i++) { if (con_id) snprintf(propname, sizeof(propname), "%s-%s", con_id, gpio_suffixes[i]); diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 5639abce6ec5..ef08b23a56e2 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -1132,6 +1132,14 @@ static void edge_detector_stop(struct line *line) /* do not change line->level - see comment in debounced_value() */ } +static int edge_detector_fifo_init(struct linereq *req) +{ + if (kfifo_initialized(&req->events)) + return 0; + + return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL); +} + static int edge_detector_setup(struct line *line, struct gpio_v2_line_config *lc, unsigned int line_idx, u64 edflags) @@ -1143,9 +1151,8 @@ static int edge_detector_setup(struct line *line, char *label; eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; - if (eflags && !kfifo_initialized(&line->req->events)) { - ret = kfifo_alloc(&line->req->events, - line->req->event_buffer_size, GFP_KERNEL); + if (eflags) { + ret = edge_detector_fifo_init(line->req); if (ret) return ret; } @@ -1197,8 +1204,6 @@ static int edge_detector_update(struct line *line, struct gpio_v2_line_config *lc, unsigned int line_idx, u64 edflags) { - u64 eflags; - int ret; u64 active_edflags = READ_ONCE(line->edflags); unsigned int debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); @@ -1214,14 +1219,9 @@ static int edge_detector_update(struct line *line, * ensure event fifo is initialised if edge detection * is now enabled. */ - eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; - if (eflags && !kfifo_initialized(&line->req->events)) { - ret = kfifo_alloc(&line->req->events, - line->req->event_buffer_size, - GFP_KERNEL); - if (ret) - return ret; - } + if (edflags & GPIO_V2_LINE_EDGE_FLAGS) + return edge_detector_fifo_init(line->req); + return 0; } @@ -1648,16 +1648,15 @@ static ssize_t linereq_read(struct file *file, char __user *buf, return ret; } - ret = kfifo_out(&lr->events, &le, 1); - } - if (ret != 1) { - /* - * This should never happen - we were holding the - * lock from the moment we learned the fifo is no - * longer empty until now. - */ - ret = -EIO; - break; + if (kfifo_out(&lr->events, &le, 1) != 1) { + /* + * This should never happen - we hold the + * lock from the moment we learned the fifo + * is no longer empty until now. + */ + WARN(1, "failed to read from non-empty kfifo"); + return -EIO; + } } if (copy_to_user(buf + bytes_read, &le, sizeof(le))) @@ -1780,6 +1779,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip) mutex_init(&lr->config_mutex); init_waitqueue_head(&lr->wait); + INIT_KFIFO(lr->events); lr->event_buffer_size = ulr.event_buffer_size; if (lr->event_buffer_size == 0) lr->event_buffer_size = ulr.num_lines * 16; @@ -2000,16 +2000,15 @@ static ssize_t lineevent_read(struct file *file, char __user *buf, return ret; } - ret = kfifo_out(&le->events, &ge, 1); - } - if (ret != 1) { - /* - * This should never happen - we were holding the lock - * from the moment we learned the fifo is no longer - * empty until now. - */ - ret = -EIO; - break; + if (kfifo_out(&le->events, &ge, 1) != 1) { + /* + * This should never happen - we hold the + * lock from the moment we learned the fifo + * is no longer empty until now. + */ + WARN(1, "failed to read from non-empty kfifo"); + return -EIO; + } } if (copy_to_user(buf + bytes_read, &ge, ge_size)) @@ -2712,12 +2711,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, if (count < event_size) return -EINVAL; #endif - ret = kfifo_out(&cdev->events, &event, 1); - } - if (ret != 1) { - ret = -EIO; - break; - /* We should never get here. See lineevent_read(). */ + if (kfifo_out(&cdev->events, &event, 1) != 1) { + /* + * This should never happen - we hold the + * lock from the moment we learned the fifo + * is no longer empty until now. + */ + WARN(1, "failed to read from non-empty kfifo"); + return -EIO; + } } #ifdef CONFIG_GPIO_CDEV_V1 diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index d75f6ee37028..f6af5e7be4d1 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -103,7 +103,7 @@ int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) if (ret > 0) return ret; - for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { + for (i = 0; i < gpio_suffix_count; i++) { if (con_id) snprintf(propname, sizeof(propname), "%s-%s", con_id, gpio_suffixes[i]); @@ -203,6 +203,24 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np, */ { "qi,lb60", "rb-gpios", true }, #endif +#if IS_ENABLED(CONFIG_PCI_LANTIQ) + /* + * According to the PCI specification, the RST# pin is an + * active-low signal. However, most of the device trees that + * have been widely used for a long time incorrectly describe + * reset GPIO as active-high, and were also using wrong name + * for the property. + */ + { "lantiq,pci-xway", "gpio-reset", false }, +#endif +#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005) + /* + * DTS for Nokia N900 incorrectly specified "active high" + * polarity for the reset line, while the chip actually + * treats it as "active low". + */ + { "ti,tsc2005", "reset-gpios", false }, +#endif }; unsigned int i; @@ -504,9 +522,9 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np, { "reset", "reset-n-io", "marvell,nfc-uart" }, { "reset", "reset-n-io", "mrvl,nfc-uart" }, #endif -#if !IS_ENABLED(CONFIG_PCI_LANTIQ) +#if IS_ENABLED(CONFIG_PCI_LANTIQ) /* MIPS Lantiq PCI */ - { "reset", "gpios-reset", "lantiq,pci-xway" }, + { "reset", "gpio-reset", "lantiq,pci-xway" }, #endif /* @@ -676,7 +694,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id, unsigned int i; /* Try GPIO property "foo-gpios" and "foo-gpio" */ - for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { + for (i = 0; i < gpio_suffix_count; i++) { if (con_id) snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id, gpio_suffixes[i]); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index fa62367ee929..edaeee53db75 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/acpi.h> +#include <linux/array_size.h> #include <linux/bitmap.h> #include <linux/cleanup.h> #include <linux/compat.h> @@ -17,6 +18,7 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/module.h> +#include <linux/nospec.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/seq_file.h> @@ -88,6 +90,9 @@ DEFINE_STATIC_SRCU(gpio_devices_srcu); static DEFINE_MUTEX(gpio_machine_hogs_mutex); static LIST_HEAD(gpio_machine_hogs); +const char *const gpio_suffixes[] = { "gpios", "gpio" }; +const size_t gpio_suffix_count = ARRAY_SIZE(gpio_suffixes); + static void gpiochip_free_hogs(struct gpio_chip *gc); static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, @@ -105,16 +110,16 @@ const char *gpiod_get_label(struct gpio_desc *desc) unsigned long flags; flags = READ_ONCE(desc->flags); - if (test_bit(FLAG_USED_AS_IRQ, &flags) && - !test_bit(FLAG_REQUESTED, &flags)) - return "interrupt"; - - if (!test_bit(FLAG_REQUESTED, &flags)) - return NULL; label = srcu_dereference_check(desc->label, &desc->gdev->desc_srcu, srcu_read_lock_held(&desc->gdev->desc_srcu)); + if (test_bit(FLAG_USED_AS_IRQ, &flags)) + return label->str ?: "interrupt"; + + if (!test_bit(FLAG_REQUESTED, &flags)) + return NULL; + return label->str; } @@ -174,7 +179,6 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, { return gpio_device_get_desc(gc->gpiodev, hwnum); } -EXPORT_SYMBOL_GPL(gpiochip_get_desc); /** * gpio_device_get_desc() - get the GPIO descriptor corresponding to the given @@ -198,7 +202,7 @@ gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum) if (hwnum >= gdev->ngpio) return ERR_PTR(-EINVAL); - return &gdev->descs[hwnum]; + return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)]; } EXPORT_SYMBOL_GPL(gpio_device_get_desc); @@ -485,7 +489,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name) * 1. Non-unique names are still accepted, * 2. Name collisions within the same GPIO chip are not reported. */ -static int gpiochip_set_desc_names(struct gpio_chip *gc) +static void gpiochip_set_desc_names(struct gpio_chip *gc) { struct gpio_device *gdev = gc->gpiodev; int i; @@ -504,8 +508,6 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc) /* Then add all names to the GPIO descriptors */ for (i = 0; i != gc->ngpio; ++i) gdev->descs[i].name = gc->names[i]; - - return 0; } /* @@ -999,11 +1001,9 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, INIT_LIST_HEAD(&gdev->pin_ranges); #endif - if (gc->names) { - ret = gpiochip_set_desc_names(gc); - if (ret) - goto err_cleanup_desc_srcu; - } + if (gc->names) + gpiochip_set_desc_names(gc); + ret = gpiochip_set_names(gc); if (ret) goto err_cleanup_desc_srcu; @@ -4798,11 +4798,11 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) for_each_gpio_desc(gc, desc) { guard(srcu)(&desc->gdev->desc_srcu); - if (test_bit(FLAG_REQUESTED, &desc->flags)) { + is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags); + if (is_irq || test_bit(FLAG_REQUESTED, &desc->flags)) { gpiod_get_direction(desc); is_out = test_bit(FLAG_IS_OUT, &desc->flags); value = gpio_chip_get_value(gc, desc); - is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags); active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags); seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n", gpio, desc->name ?: "", gpiod_get_label(desc), diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 48e086c2f416..4de0bf1a62d3 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -90,7 +90,8 @@ static inline struct gpio_device *to_gpio_device(struct device *dev) } /* gpio suffixes used for ACPI and device tree lookup */ -static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" }; +extern const char *const gpio_suffixes[]; +extern const size_t gpio_suffix_count; /** * struct gpio_array - Opaque descriptor for a structure of GPIO array attributes @@ -242,6 +243,7 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce); int gpiod_hog(struct gpio_desc *desc, const char *name, unsigned long lflags, enum gpiod_flags dflags); int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev); +struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum); const char *gpiod_get_label(struct gpio_desc *desc); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ec888fc6ead8..916b6b8cf7d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1093,6 +1093,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) unsigned int i; int r; + /* + * We can't use gang submit on with reserved VMIDs when the VM changes + * can't be invalidated by more than one engine at the same time. + */ + if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) { + for (i = 0; i < p->gang_size; ++i) { + struct drm_sched_entity *entity = p->entities[i]; + struct drm_gpu_scheduler *sched = entity->rq->sched; + struct amdgpu_ring *ring = to_amdgpu_ring(sched); + + if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub)) + return -EINVAL; + } + } + r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3d7fcdeaf8cf..e8f6e4dbc5a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -406,7 +406,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !idle) goto error; - if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { + if (amdgpu_vmid_uses_reserved(vm, vmhub)) { r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; @@ -456,6 +456,19 @@ error: return r; } +/* + * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID + * @vm: the VM to check + * @vmhub: the VMHUB which will be used + * + * Returns: True if the VM will use a reserved VMID. + */ +bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) +{ + return vm->reserved_vmid[vmhub] || + (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0))); +} + int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, unsigned vmhub) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index fa8c42c83d5d..240fa6751260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); +bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub); int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, unsigned vmhub); void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e9ac20bed0f2..a622aca8c649 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -11181,6 +11181,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, return ret; } +static void parse_edid_displayid_vrr(struct drm_connector *connector, + struct edid *edid) +{ + u8 *edid_ext = NULL; + int i; + int j = 0; + u16 min_vfreq; + u16 max_vfreq; + + if (edid == NULL || edid->extensions == 0) + return; + + /* Find DisplayID extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (void *)(edid + (i + 1)); + if (edid_ext[0] == DISPLAYID_EXT) + break; + } + + if (edid_ext == NULL) + return; + + while (j < EDID_LENGTH) { + /* Get dynamic video timing range from DisplayID if available */ + if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && + (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { + min_vfreq = edid_ext[j+9]; + if (edid_ext[j+1] & 7) + max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); + else + max_vfreq = edid_ext[j+10]; + + if (max_vfreq && min_vfreq) { + connector->display_info.monitor_range.max_vfreq = max_vfreq; + connector->display_info.monitor_range.min_vfreq = min_vfreq; + + return; + } + } + j++; + } +} + static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { @@ -11302,6 +11345,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; + /* Some eDP panels only have the refresh rate range info in DisplayID */ + if ((connector->display_info.monitor_range.min_vfreq == 0 || + connector->display_info.monitor_range.max_vfreq == 0)) + parse_edid_displayid_vrr(connector, edid); + if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { bool edid_check_required = false; @@ -11309,9 +11357,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (is_dp_capable_without_timing_msa(adev->dm.dc, amdgpu_dm_connector)) { if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { - freesync_capable = true; amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + if (amdgpu_dm_connector->max_vfreq - + amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; } else { edid_check_required = edid->version > 1 || (edid->version == 1 && diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 6c84b0fa40f4..0782a34689a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -3364,6 +3364,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.UrgentBurstFactorLumaPre[k], &mode_lib->vba.UrgentBurstFactorChromaPre[k], &mode_lib->vba.NotUrgentLatencyHidingPre[k]); + + v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] / + 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k]; } { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index a41812598ce8..8ecc972dbffd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -234,6 +234,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s out->round_trip_ping_latency_dcfclk_cycles = 106; out->smn_latency_us = 2; out->dispclk_dppclk_vco_speed_mhz = 3600; + out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index 0f8b3336e26d..cbd1c1f26b7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -294,7 +294,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000; context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; - if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx] == dml_fclock_change_unsupported) + if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0] == dml_fclock_change_unsupported) context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false; else context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true; diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 571691837200..09cbc3afd6d8 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -734,7 +734,7 @@ struct atom_gpio_pin_lut_v2_1 { struct atom_common_table_header table_header; /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */ - struct atom_gpio_pin_assignment gpio_pin[8]; + struct atom_gpio_pin_assignment gpio_pin[]; }; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index ea271f62b214..ec0b7f3d889c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -401,7 +401,7 @@ struct adv7511 { #ifdef CONFIG_DRM_I2C_ADV7511_CEC int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511); -void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); +int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); #else static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) { diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index 44451a9658a3..2e9c88a2b5ed 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c @@ -119,7 +119,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf) cec_received_msg(adv7511->cec_adap, &msg); } -void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) +int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) { unsigned int offset = adv7511->info->reg_cec_offset; const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY | @@ -131,16 +131,19 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) unsigned int rx_status; int rx_order[3] = { -1, -1, -1 }; int i; + int irq_status = IRQ_NONE; - if (irq1 & irq_tx_mask) + if (irq1 & irq_tx_mask) { adv_cec_tx_raw_status(adv7511, irq1); + irq_status = IRQ_HANDLED; + } if (!(irq1 & irq_rx_mask)) - return; + return irq_status; if (regmap_read(adv7511->regmap_cec, ADV7511_REG_CEC_RX_STATUS + offset, &rx_status)) - return; + return irq_status; /* * ADV7511_REG_CEC_RX_STATUS[5:0] contains the reception order of RX @@ -172,6 +175,8 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) adv7511_cec_rx(adv7511, rx_buf); } + + return IRQ_HANDLED; } static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 66ccb61e2a66..c8d2c4a157b2 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -469,6 +469,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) { unsigned int irq0, irq1; int ret; + int cec_status = IRQ_NONE; + int irq_status = IRQ_NONE; ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); if (ret < 0) @@ -478,29 +480,31 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) if (ret < 0) return ret; - /* If there is no IRQ to handle, exit indicating no IRQ data */ - if (!(irq0 & (ADV7511_INT0_HPD | ADV7511_INT0_EDID_READY)) && - !(irq1 & ADV7511_INT1_DDC_ERROR)) - return -ENODATA; - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); - if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) + if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) { schedule_work(&adv7511->hpd_work); + irq_status = IRQ_HANDLED; + } if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { adv7511->edid_read = true; if (adv7511->i2c_main->irq) wake_up_all(&adv7511->wq); + irq_status = IRQ_HANDLED; } #ifdef CONFIG_DRM_I2C_ADV7511_CEC - adv7511_cec_irq_process(adv7511, irq1); + cec_status = adv7511_cec_irq_process(adv7511, irq1); #endif - return 0; + /* If there is no IRQ to handle, exit indicating no IRQ data */ + if (irq_status == IRQ_HANDLED || cec_status == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; } static irqreturn_t adv7511_irq_handler(int irq, void *devid) @@ -509,7 +513,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid) int ret; ret = adv7511_irq_process(adv7511, true); - return ret < 0 ? IRQ_NONE : IRQ_HANDLED; + return ret < 0 ? IRQ_NONE : ret; } /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c index 97e579c33d84..1e200d815e1a 100644 --- a/drivers/gpu/drm/drm_fbdev_generic.c +++ b/drivers/gpu/drm/drm_fbdev_generic.c @@ -84,7 +84,8 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper, sizes->surface_width, sizes->surface_height, sizes->surface_bpp); - format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, + sizes->surface_depth); buffer = drm_client_framebuffer_create(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 2166208a961d..3860a8ce1e2d 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -420,13 +420,20 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"), }, .driver_data = (void *)&lcd1280x1920_rightside_up, - }, { /* Valve Steam Deck */ + }, { /* Valve Steam Deck (Jupiter) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Valve Steam Deck (Galileo) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* VIOS LTH17 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index f08a6803dc18..3adc2c9ab72d 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -311,6 +311,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); return 1; } diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 8486de230ec9..8d1be94a443b 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -504,6 +504,9 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector) if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); return 1; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 3c3fc53376ce..6bff169fa8d4 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2088,6 +2088,9 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, u32 ln0, ln1, pin_assignment; u8 width; + if (DISPLAY_VER(dev_priv) >= 14) + return; + if (!intel_encoder_is_tc(&dig_port->base) || intel_tc_port_in_tbt_alt_mode(dig_port)) return; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 17a5cca007e2..4bd0baa2a4f5 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -250,29 +250,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (ret) goto free_drm; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - goto free_drm; - } + if (ret) + goto free_canvas_osd1; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - goto free_drm; - } + if (ret) + goto free_canvas_vd1_0; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); - goto free_drm; - } + if (ret) + goto free_canvas_vd1_1; priv->vsync_irq = platform_get_irq(pdev, 0); ret = drm_vblank_init(drm, 1); if (ret) - goto free_drm; + goto free_canvas_vd1_2; /* Assign limits per soc revision/package */ for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) { @@ -288,11 +279,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) */ ret = drm_aperture_remove_framebuffers(&meson_driver); if (ret) - goto free_drm; + goto free_canvas_vd1_2; ret = drmm_mode_config_init(drm); if (ret) - goto free_drm; + goto free_canvas_vd1_2; drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; drm->mode_config.funcs = &meson_mode_config_funcs; @@ -307,7 +298,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (priv->afbcd.ops) { ret = priv->afbcd.ops->init(priv); if (ret) - goto free_drm; + goto free_canvas_vd1_2; } /* Encoder Initialization */ @@ -371,6 +362,14 @@ uninstall_irq: exit_afbcd: if (priv->afbcd.ops) priv->afbcd.ops->exit(priv); +free_canvas_vd1_2: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2); +free_canvas_vd1_1: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); +free_canvas_vd1_0: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); +free_canvas_osd1: + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); free_drm: drm_dev_put(drm); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 856b3ef5edb8..0c71d761d378 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1001,6 +1001,9 @@ nouveau_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, nv_connector->native_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); ret = 1; } diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index b8a84f26b3ef..b5e7b919f241 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -86,15 +86,15 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride, int ret = 0; void *out_alloc; + if (!in->count) + return NULL; + /* User stride must be at least the minimum object size, otherwise it might * lack useful information. */ if (in->stride < min_stride) return ERR_PTR(-EINVAL); - if (!in->count) - return NULL; - out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL); if (!out_alloc) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 79ffcbc41d78..9a0ff48f7061 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -459,6 +459,16 @@ struct panthor_queue { atomic64_t seqno; /** + * @last_fence: Fence of the last submitted job. + * + * We return this fence when we get an empty command stream. + * This way, we are guaranteed that all earlier jobs have completed + * when drm_sched_job::s_fence::finished without having to feed + * the CS ring buffer with a dummy job that only signals the fence. + */ + struct dma_fence *last_fence; + + /** * @in_flight_jobs: List containing all in-flight jobs. * * Used to keep track and signal panthor_job::done_fence when the @@ -829,6 +839,9 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * panthor_kernel_bo_destroy(queue->ringbuf); panthor_kernel_bo_destroy(queue->iface.mem); + /* Release the last_fence we were holding, if any. */ + dma_fence_put(queue->fence_ctx.last_fence); + kfree(queue); } @@ -2784,9 +2797,6 @@ static void group_sync_upd_work(struct work_struct *work) spin_lock(&queue->fence_ctx.lock); list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { - if (!job->call_info.size) - continue; - if (syncobj->seqno < job->done_fence->seqno) break; @@ -2865,11 +2875,14 @@ queue_run_job(struct drm_sched_job *sched_job) static_assert(sizeof(call_instrs) % 64 == 0, "call_instrs is not aligned on a cacheline"); - /* Stream size is zero, nothing to do => return a NULL fence and let - * drm_sched signal the parent. + /* Stream size is zero, nothing to do except making sure all previously + * submitted jobs are done before we signal the + * drm_sched_job::s_fence::finished fence. */ - if (!job->call_info.size) - return NULL; + if (!job->call_info.size) { + job->done_fence = dma_fence_get(queue->fence_ctx.last_fence); + return dma_fence_get(job->done_fence); + } ret = pm_runtime_resume_and_get(ptdev->base.dev); if (drm_WARN_ON(&ptdev->base, ret)) @@ -2928,6 +2941,10 @@ queue_run_job(struct drm_sched_job *sched_job) } } + /* Update the last fence. */ + dma_fence_put(queue->fence_ctx.last_fence); + queue->fence_ctx.last_fence = dma_fence_get(job->done_fence); + done_fence = dma_fence_get(job->done_fence); out_unlock: @@ -3378,10 +3395,15 @@ panthor_job_create(struct panthor_file *pfile, goto err_put_job; } - job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); - if (!job->done_fence) { - ret = -ENOMEM; - goto err_put_job; + /* Empty command streams don't need a fence, they'll pick the one from + * the previously submitted job. + */ + if (job->call_info.size) { + job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); + if (!job->done_fence) { + ret = -ENOMEM; + goto err_put_job; + } } ret = drm_sched_job_init(&job->base, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 2ef201a072f1..e66a230331ee 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -642,7 +642,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev, if (r) goto error_unlock; - if (bo_va->it.start) + if (bo_va->it.start && bo_va->bo) r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); error_unlock: diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6396dece0db1..2427be8bc97f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -346,6 +346,7 @@ static void ttm_bo_release(struct kref *kref) if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) || (want_init_on_free() && (bo->ttm != NULL)) || + bo->type == ttm_bo_type_sg || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 2651fe0ef518..1f15990d3934 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -48,8 +48,6 @@ #define RETRIES 3 -#define VMW_HYPERVISOR_MAGIC 0x564D5868 - #define VMW_PORT_CMD_MSG 30 #define VMW_PORT_CMD_HB_MSG 0 #define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG) @@ -104,20 +102,18 @@ static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] = */ static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol) { - unsigned long eax, ebx, ecx, edx, si = 0, di = 0; + u32 ecx, edx, esi, edi; - VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL, - (protocol | GUESTMSG_FLAG_COOKIE), si, di, - 0, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall6(VMW_PORT_CMD_OPEN_CHANNEL, + (protocol | GUESTMSG_FLAG_COOKIE), 0, + &ecx, &edx, &esi, &edi); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) return -EINVAL; channel->channel_id = HIGH_WORD(edx); - channel->cookie_high = si; - channel->cookie_low = di; + channel->cookie_high = esi; + channel->cookie_low = edi; return 0; } @@ -133,17 +129,13 @@ static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol) */ static int vmw_close_channel(struct rpc_channel *channel) { - unsigned long eax, ebx, ecx, edx, si, di; - - /* Set up additional parameters */ - si = channel->cookie_high; - di = channel->cookie_low; + u32 ecx; - VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL, - 0, si, di, - channel->channel_id << 16, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall5(VMW_PORT_CMD_CLOSE_CHANNEL, + 0, channel->channel_id << 16, + channel->cookie_high, + channel->cookie_low, + &ecx); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) return -EINVAL; @@ -163,24 +155,18 @@ static int vmw_close_channel(struct rpc_channel *channel) static unsigned long vmw_port_hb_out(struct rpc_channel *channel, const char *msg, bool hb) { - unsigned long si, di, eax, ebx, ecx, edx; + u32 ebx, ecx; unsigned long msg_len = strlen(msg); /* HB port can't access encrypted memory. */ if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { - unsigned long bp = channel->cookie_high; - u32 channel_id = (channel->channel_id << 16); - - si = (uintptr_t) msg; - di = channel->cookie_low; - - VMW_PORT_HB_OUT( + vmware_hypercall_hb_out( (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - msg_len, si, di, - VMWARE_HYPERVISOR_HB | channel_id | - VMWARE_HYPERVISOR_OUT, - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); + msg_len, + channel->channel_id << 16, + (uintptr_t) msg, channel->cookie_low, + channel->cookie_high, + &ebx); return ebx; } @@ -194,14 +180,13 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, memcpy(&word, msg, bytes); msg_len -= bytes; msg += bytes; - si = channel->cookie_high; - di = channel->cookie_low; - - VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16), - word, si, di, - channel->channel_id << 16, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + + vmware_hypercall5(VMW_PORT_CMD_MSG | + (MSG_TYPE_SENDPAYLOAD << 16), + word, channel->channel_id << 16, + channel->cookie_high, + channel->cookie_low, + &ecx); } return ecx; @@ -220,22 +205,17 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, unsigned long reply_len, bool hb) { - unsigned long si, di, eax, ebx, ecx, edx; + u32 ebx, ecx, edx; /* HB port can't access encrypted memory */ if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { - unsigned long bp = channel->cookie_low; - u32 channel_id = (channel->channel_id << 16); - - si = channel->cookie_high; - di = (uintptr_t) reply; - - VMW_PORT_HB_IN( + vmware_hypercall_hb_in( (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - reply_len, si, di, - VMWARE_HYPERVISOR_HB | channel_id, - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); + reply_len, + channel->channel_id << 16, + channel->cookie_high, + (uintptr_t) reply, channel->cookie_low, + &ebx); return ebx; } @@ -245,14 +225,13 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, while (reply_len) { unsigned int bytes = min_t(unsigned long, reply_len, 4); - si = channel->cookie_high; - di = channel->cookie_low; - - VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16), - MESSAGE_STATUS_SUCCESS, si, di, - channel->channel_id << 16, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall7(VMW_PORT_CMD_MSG | + (MSG_TYPE_RECVPAYLOAD << 16), + MESSAGE_STATUS_SUCCESS, + channel->channel_id << 16, + channel->cookie_high, + channel->cookie_low, + &ebx, &ecx, &edx); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) break; @@ -276,22 +255,18 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, */ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) { - unsigned long eax, ebx, ecx, edx, si, di; + u32 ebx, ecx; size_t msg_len = strlen(msg); int retries = 0; while (retries < RETRIES) { retries++; - /* Set up additional parameters */ - si = channel->cookie_high; - di = channel->cookie_low; - - VMW_PORT(VMW_PORT_CMD_SENDSIZE, - msg_len, si, di, - channel->channel_id << 16, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall5(VMW_PORT_CMD_SENDSIZE, + msg_len, channel->channel_id << 16, + channel->cookie_high, + channel->cookie_low, + &ecx); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { /* Expected success. Give up. */ @@ -329,7 +304,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg); static int vmw_recv_msg(struct rpc_channel *channel, void **msg, size_t *msg_len) { - unsigned long eax, ebx, ecx, edx, si, di; + u32 ebx, ecx, edx; char *reply; size_t reply_len; int retries = 0; @@ -341,15 +316,11 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, while (retries < RETRIES) { retries++; - /* Set up additional parameters */ - si = channel->cookie_high; - di = channel->cookie_low; - - VMW_PORT(VMW_PORT_CMD_RECVSIZE, - 0, si, di, - channel->channel_id << 16, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall7(VMW_PORT_CMD_RECVSIZE, + 0, channel->channel_id << 16, + channel->cookie_high, + channel->cookie_low, + &ebx, &ecx, &edx); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { DRM_ERROR("Failed to get reply size for host message.\n"); @@ -384,16 +355,12 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, reply[reply_len] = '\0'; - - /* Ack buffer */ - si = channel->cookie_high; - di = channel->cookie_low; - - VMW_PORT(VMW_PORT_CMD_RECVSTATUS, - MESSAGE_STATUS_SUCCESS, si, di, - channel->channel_id << 16, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall5(VMW_PORT_CMD_RECVSTATUS, + MESSAGE_STATUS_SUCCESS, + channel->channel_id << 16, + channel->cookie_high, + channel->cookie_low, + &ecx); if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); @@ -652,13 +619,7 @@ static inline void reset_ppn_array(PPN64 *arr, size_t size) */ static inline void hypervisor_ppn_reset_all(void) { - unsigned long eax, ebx, ecx, edx, si = 0, di = 0; - - VMW_PORT(VMW_PORT_CMD_MKSGS_RESET, - 0, si, di, - 0, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall1(VMW_PORT_CMD_MKSGS_RESET, 0); } /** @@ -669,13 +630,7 @@ static inline void hypervisor_ppn_reset_all(void) */ static inline void hypervisor_ppn_add(PPN64 pfn) { - unsigned long eax, ebx, ecx, edx, si = 0, di = 0; - - VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN, - (unsigned long)pfn, si, di, - 0, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall1(VMW_PORT_CMD_MKSGS_ADD_PPN, (unsigned long)pfn); } /** @@ -686,13 +641,7 @@ static inline void hypervisor_ppn_add(PPN64 pfn) */ static inline void hypervisor_ppn_remove(PPN64 pfn) { - unsigned long eax, ebx, ecx, edx, si = 0, di = 0; - - VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN, - (unsigned long)pfn, si, di, - 0, - VMW_HYPERVISOR_MAGIC, - eax, ebx, ecx, edx, si, di); + vmware_hypercall1(VMW_PORT_CMD_MKSGS_REMOVE_PPN, (unsigned long)pfn); } #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h index 4f40167ad61f..3c78e9338b54 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h @@ -34,6 +34,8 @@ #define VMWARE_HYPERVISOR_HB BIT(0) #define VMWARE_HYPERVISOR_OUT BIT(1) +#define VMWARE_HYPERVISOR_MAGIC 0x564D5868 + #define X86_IO_MAGIC 0x86 #define X86_IO_W7_SIZE_SHIFT 0 @@ -45,86 +47,158 @@ #define X86_IO_W7_IMM_SHIFT 5 #define X86_IO_W7_IMM_MASK (0xff << X86_IO_W7_IMM_SHIFT) -static inline void vmw_port(unsigned long cmd, unsigned long in_ebx, - unsigned long in_si, unsigned long in_di, - unsigned long flags, unsigned long magic, - unsigned long *eax, unsigned long *ebx, - unsigned long *ecx, unsigned long *edx, - unsigned long *si, unsigned long *di) +static inline +unsigned long vmware_hypercall1(unsigned long cmd, unsigned long in1) { - register u64 x0 asm("x0") = magic; - register u64 x1 asm("x1") = in_ebx; + register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC; + register u64 x1 asm("x1") = in1; register u64 x2 asm("x2") = cmd; - register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT; - register u64 x4 asm("x4") = in_si; - register u64 x5 asm("x5") = in_di; + register u64 x3 asm("x3") = VMWARE_HYPERVISOR_PORT; + register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | + X86_IO_W7_WITH | + X86_IO_W7_DIR | + (2 << X86_IO_W7_SIZE_SHIFT); + asm_inline volatile ( + "mrs xzr, mdccsr_el0; " + : "+r" (x0) + : "r" (x1), "r" (x2), "r" (x3), "r" (x7) + : "memory"); + + return x0; +} + +static inline +unsigned long vmware_hypercall5(unsigned long cmd, unsigned long in1, + unsigned long in3, unsigned long in4, + unsigned long in5, u32 *out2) +{ + register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC; + register u64 x1 asm("x1") = in1; + register u64 x2 asm("x2") = cmd; + register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT; + register u64 x4 asm("x4") = in4; + register u64 x5 asm("x5") = in5; register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | X86_IO_W7_WITH | X86_IO_W7_DIR | (2 << X86_IO_W7_SIZE_SHIFT); - asm volatile("mrs xzr, mdccsr_el0 \n\t" - : "+r"(x0), "+r"(x1), "+r"(x2), - "+r"(x3), "+r"(x4), "+r"(x5) - : "r"(x7) - :); - *eax = x0; - *ebx = x1; - *ecx = x2; - *edx = x3; - *si = x4; - *di = x5; + asm_inline volatile ( + "mrs xzr, mdccsr_el0; " + : "+r" (x0), "+r" (x2) + : "r" (x1), "r" (x3), "r" (x4), "r" (x5), "r" (x7) + : "memory"); + + *out2 = x2; + return x0; } -static inline void vmw_port_hb(unsigned long cmd, unsigned long in_ecx, - unsigned long in_si, unsigned long in_di, - unsigned long flags, unsigned long magic, - unsigned long bp, u32 w7dir, - unsigned long *eax, unsigned long *ebx, - unsigned long *ecx, unsigned long *edx, - unsigned long *si, unsigned long *di) +static inline +unsigned long vmware_hypercall6(unsigned long cmd, unsigned long in1, + unsigned long in3, u32 *out2, + u32 *out3, u32 *out4, u32 *out5) { - register u64 x0 asm("x0") = magic; + register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC; + register u64 x1 asm("x1") = in1; + register u64 x2 asm("x2") = cmd; + register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT; + register u64 x4 asm("x4"); + register u64 x5 asm("x5"); + register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | + X86_IO_W7_WITH | + X86_IO_W7_DIR | + (2 << X86_IO_W7_SIZE_SHIFT); + + asm_inline volatile ( + "mrs xzr, mdccsr_el0; " + : "+r" (x0), "+r" (x2), "+r" (x3), "=r" (x4), "=r" (x5) + : "r" (x1), "r" (x7) + : "memory"); + + *out2 = x2; + *out3 = x3; + *out4 = x4; + *out5 = x5; + return x0; +} + +static inline +unsigned long vmware_hypercall7(unsigned long cmd, unsigned long in1, + unsigned long in3, unsigned long in4, + unsigned long in5, u32 *out1, + u32 *out2, u32 *out3) +{ + register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC; + register u64 x1 asm("x1") = in1; + register u64 x2 asm("x2") = cmd; + register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT; + register u64 x4 asm("x4") = in4; + register u64 x5 asm("x5") = in5; + register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | + X86_IO_W7_WITH | + X86_IO_W7_DIR | + (2 << X86_IO_W7_SIZE_SHIFT); + + asm_inline volatile ( + "mrs xzr, mdccsr_el0; " + : "+r" (x0), "+r" (x1), "+r" (x2), "+r" (x3) + : "r" (x4), "r" (x5), "r" (x7) + : "memory"); + + *out1 = x1; + *out2 = x2; + *out3 = x3; + return x0; +} + +static inline +unsigned long vmware_hypercall_hb(unsigned long cmd, unsigned long in2, + unsigned long in3, unsigned long in4, + unsigned long in5, unsigned long in6, + u32 *out1, int dir) +{ + register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC; register u64 x1 asm("x1") = cmd; - register u64 x2 asm("x2") = in_ecx; - register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT_HB; - register u64 x4 asm("x4") = in_si; - register u64 x5 asm("x5") = in_di; - register u64 x6 asm("x6") = bp; + register u64 x2 asm("x2") = in2; + register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT_HB; + register u64 x4 asm("x4") = in4; + register u64 x5 asm("x5") = in5; + register u64 x6 asm("x6") = in6; register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | X86_IO_W7_STR | X86_IO_W7_WITH | - w7dir; - - asm volatile("mrs xzr, mdccsr_el0 \n\t" - : "+r"(x0), "+r"(x1), "+r"(x2), - "+r"(x3), "+r"(x4), "+r"(x5) - : "r"(x6), "r"(x7) - :); - *eax = x0; - *ebx = x1; - *ecx = x2; - *edx = x3; - *si = x4; - *di = x5; -} + dir; -#define VMW_PORT(cmd, in_ebx, in_si, in_di, flags, magic, eax, ebx, ecx, edx, \ - si, di) \ - vmw_port(cmd, in_ebx, in_si, in_di, flags, magic, &eax, &ebx, &ecx, \ - &edx, &si, &di) + asm_inline volatile ( + "mrs xzr, mdccsr_el0; " + : "+r" (x0), "+r" (x1) + : "r" (x2), "r" (x3), "r" (x4), "r" (x5), + "r" (x6), "r" (x7) + : "memory"); -#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \ - ecx, edx, si, di) \ - vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \ - 0, &eax, &ebx, &ecx, &edx, &si, &di) + *out1 = x1; + return x0; +} -#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \ - ecx, edx, si, di) \ - vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \ - X86_IO_W7_DIR, &eax, &ebx, &ecx, &edx, &si, &di) +static inline +unsigned long vmware_hypercall_hb_out(unsigned long cmd, unsigned long in2, + unsigned long in3, unsigned long in4, + unsigned long in5, unsigned long in6, + u32 *out1) +{ + return vmware_hypercall_hb(cmd, in2, in3, in4, in5, in6, out1, 0); +} +static inline +unsigned long vmware_hypercall_hb_in(unsigned long cmd, unsigned long in2, + unsigned long in3, unsigned long in4, + unsigned long in5, unsigned long in6, + u32 *out1) +{ + return vmware_hypercall_hb(cmd, in2, in3, in4, in5, in6, out1, + X86_IO_W7_DIR); +} #endif #endif /* _VMWGFX_MSG_ARM64_H */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h index 23899d743a90..13304d34cc6e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h @@ -37,191 +37,6 @@ #include <asm/vmware.h> -/** - * Hypervisor-specific bi-directional communication channel. Should never - * execute on bare metal hardware. The caller must make sure to check for - * supported hypervisor before using these macros. - * - * The last two parameters are both input and output and must be initialized. - * - * @cmd: [IN] Message Cmd - * @in_ebx: [IN] Message Len, through EBX - * @in_si: [IN] Input argument through SI, set to 0 if not used - * @in_di: [IN] Input argument through DI, set ot 0 if not used - * @flags: [IN] hypercall flags + [channel id] - * @magic: [IN] hypervisor magic value - * @eax: [OUT] value of EAX register - * @ebx: [OUT] e.g. status from an HB message status command - * @ecx: [OUT] e.g. status from a non-HB message status command - * @edx: [OUT] e.g. channel id - * @si: [OUT] - * @di: [OUT] - */ -#define VMW_PORT(cmd, in_ebx, in_si, in_di, \ - flags, magic, \ - eax, ebx, ecx, edx, si, di) \ -({ \ - asm volatile (VMWARE_HYPERCALL : \ - "=a"(eax), \ - "=b"(ebx), \ - "=c"(ecx), \ - "=d"(edx), \ - "=S"(si), \ - "=D"(di) : \ - "a"(magic), \ - "b"(in_ebx), \ - "c"(cmd), \ - "d"(flags), \ - "S"(in_si), \ - "D"(in_di) : \ - "memory"); \ -}) - - -/** - * Hypervisor-specific bi-directional communication channel. Should never - * execute on bare metal hardware. The caller must make sure to check for - * supported hypervisor before using these macros. - * - * The last 3 parameters are both input and output and must be initialized. - * - * @cmd: [IN] Message Cmd - * @in_ecx: [IN] Message Len, through ECX - * @in_si: [IN] Input argument through SI, set to 0 if not used - * @in_di: [IN] Input argument through DI, set to 0 if not used - * @flags: [IN] hypercall flags + [channel id] - * @magic: [IN] hypervisor magic value - * @bp: [IN] - * @eax: [OUT] value of EAX register - * @ebx: [OUT] e.g. status from an HB message status command - * @ecx: [OUT] e.g. status from a non-HB message status command - * @edx: [OUT] e.g. channel id - * @si: [OUT] - * @di: [OUT] - */ -#ifdef __x86_64__ - -#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ - flags, magic, bp, \ - eax, ebx, ecx, edx, si, di) \ -({ \ - asm volatile ( \ - UNWIND_HINT_SAVE \ - "push %%rbp;" \ - UNWIND_HINT_UNDEFINED \ - "mov %12, %%rbp;" \ - VMWARE_HYPERCALL_HB_OUT \ - "pop %%rbp;" \ - UNWIND_HINT_RESTORE : \ - "=a"(eax), \ - "=b"(ebx), \ - "=c"(ecx), \ - "=d"(edx), \ - "=S"(si), \ - "=D"(di) : \ - "a"(magic), \ - "b"(cmd), \ - "c"(in_ecx), \ - "d"(flags), \ - "S"(in_si), \ - "D"(in_di), \ - "r"(bp) : \ - "memory", "cc"); \ -}) - - -#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \ - flags, magic, bp, \ - eax, ebx, ecx, edx, si, di) \ -({ \ - asm volatile ( \ - UNWIND_HINT_SAVE \ - "push %%rbp;" \ - UNWIND_HINT_UNDEFINED \ - "mov %12, %%rbp;" \ - VMWARE_HYPERCALL_HB_IN \ - "pop %%rbp;" \ - UNWIND_HINT_RESTORE : \ - "=a"(eax), \ - "=b"(ebx), \ - "=c"(ecx), \ - "=d"(edx), \ - "=S"(si), \ - "=D"(di) : \ - "a"(magic), \ - "b"(cmd), \ - "c"(in_ecx), \ - "d"(flags), \ - "S"(in_si), \ - "D"(in_di), \ - "r"(bp) : \ - "memory", "cc"); \ -}) - -#elif defined(__i386__) - -/* - * In the 32-bit version of this macro, we store bp in a memory location - * because we've ran out of registers. - * Now we can't reference that memory location while we've modified - * %esp or %ebp, so we first push it on the stack, just before we push - * %ebp, and then when we need it we read it from the stack where we - * just pushed it. - */ -#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ - flags, magic, bp, \ - eax, ebx, ecx, edx, si, di) \ -({ \ - asm volatile ("push %12;" \ - "push %%ebp;" \ - "mov 0x04(%%esp), %%ebp;" \ - VMWARE_HYPERCALL_HB_OUT \ - "pop %%ebp;" \ - "add $0x04, %%esp;" : \ - "=a"(eax), \ - "=b"(ebx), \ - "=c"(ecx), \ - "=d"(edx), \ - "=S"(si), \ - "=D"(di) : \ - "a"(magic), \ - "b"(cmd), \ - "c"(in_ecx), \ - "d"(flags), \ - "S"(in_si), \ - "D"(in_di), \ - "m"(bp) : \ - "memory", "cc"); \ -}) - - -#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \ - flags, magic, bp, \ - eax, ebx, ecx, edx, si, di) \ -({ \ - asm volatile ("push %12;" \ - "push %%ebp;" \ - "mov 0x04(%%esp), %%ebp;" \ - VMWARE_HYPERCALL_HB_IN \ - "pop %%ebp;" \ - "add $0x04, %%esp;" : \ - "=a"(eax), \ - "=b"(ebx), \ - "=c"(ecx), \ - "=d"(edx), \ - "=S"(si), \ - "=D"(di) : \ - "a"(magic), \ - "b"(cmd), \ - "c"(in_ecx), \ - "d"(flags), \ - "S"(in_si), \ - "D"(in_di), \ - "m"(bp) : \ - "memory", "cc"); \ -}) -#endif /* defined(__i386__) */ - #endif /* defined(__i386__) || defined(__x86_64__) */ #endif /* _VMWGFX_MSG_X86_H */ diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index d46f87a039f2..b3d3c065dd9d 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -159,12 +159,16 @@ void intel_hdcp_gsc_fini(struct xe_device *xe) { struct intel_hdcp_gsc_message *hdcp_message = xe->display.hdcp.hdcp_message; + struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter; - if (!hdcp_message) - return; + if (hdcp_message) { + xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); + kfree(hdcp_message); + xe->display.hdcp.hdcp_message = NULL; + } - xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); - kfree(hdcp_message); + kfree(arb); + xe->display.hdcp.arbiter = NULL; } static int xe_gsc_send_sync(struct xe_device *xe, diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index bc1f794e3e61..b6f3a43d637f 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -317,7 +317,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, struct xe_device *xe = xe_bo_device(bo); struct xe_ttm_tt *tt; unsigned long extra_pages; - enum ttm_caching caching; + enum ttm_caching caching = ttm_cached; int err; tt = kzalloc(sizeof(*tt), GFP_KERNEL); @@ -331,26 +331,35 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), PAGE_SIZE); - switch (bo->cpu_caching) { - case DRM_XE_GEM_CPU_CACHING_WC: - caching = ttm_write_combined; - break; - default: - caching = ttm_cached; - break; - } - - WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); - /* - * Display scanout is always non-coherent with the CPU cache. - * - * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and - * require a CPU:WC mapping. + * DGFX system memory is always WB / ttm_cached, since + * other caching modes are only supported on x86. DGFX + * GPU system memory accesses are always coherent with the + * CPU. */ - if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || - (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE)) - caching = ttm_write_combined; + if (!IS_DGFX(xe)) { + switch (bo->cpu_caching) { + case DRM_XE_GEM_CPU_CACHING_WC: + caching = ttm_write_combined; + break; + default: + caching = ttm_cached; + break; + } + + WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); + + /* + * Display scanout is always non-coherent with the CPU cache. + * + * For Xe_LPG and beyond, PPGTT PTE lookups are also + * non-coherent and require a CPU:WC mapping. + */ + if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || + (xe->info.graphics_verx100 >= 1270 && + bo->flags & XE_BO_FLAG_PAGETABLE)) + caching = ttm_write_combined; + } err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); if (err) { diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 86422e113d39..10450f1fbbde 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -66,7 +66,8 @@ struct xe_bo { /** * @cpu_caching: CPU caching mode. Currently only used for userspace - * objects. + * objects. Exceptions are system memory on DGFX, which is always + * WB. */ u16 cpu_caching; diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index 577bd7043740..0443e07880a0 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -342,7 +342,7 @@ static void init_steering_oaddrm(struct xe_gt *gt) else gt->steering[OADDRM].group_target = 1; - gt->steering[DSS].instance_target = 0; /* unused */ + gt->steering[OADDRM].instance_target = 0; /* unused */ } static void init_steering_sqidi_psmi(struct xe_gt *gt) @@ -357,8 +357,8 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt) static void init_steering_inst0(struct xe_gt *gt) { - gt->steering[DSS].group_target = 0; /* unused */ - gt->steering[DSS].instance_target = 0; /* unused */ + gt->steering[INSTANCE0].group_target = 0; /* unused */ + gt->steering[INSTANCE0].instance_target = 0; /* unused */ } static const struct { diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 65e5a3f4c340..198f5c2189cb 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1334,7 +1334,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, GFP_KERNEL, true, 0); if (IS_ERR(sa_bo)) { err = PTR_ERR(sa_bo); - goto err; + goto err_bb; } ppgtt_ofs = NUM_KERNEL_PDE + @@ -1385,7 +1385,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, update_idx); if (IS_ERR(job)) { err = PTR_ERR(job); - goto err_bb; + goto err_sa; } /* Wait on BO move */ @@ -1434,12 +1434,12 @@ xe_migrate_update_pgtables(struct xe_migrate *m, err_job: xe_sched_job_put(job); +err_sa: + drm_suballoc_free(sa_bo, NULL); err_bb: if (!q) mutex_unlock(&m->job_mutex); xe_bb_free(bb, NULL); -err: - drm_suballoc_free(sa_bo, NULL); return ERR_PTR(err); } diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index e14ae18a973b..b60fe2e58ad6 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -105,18 +105,6 @@ config SENSORS_AD7418 This driver can also be built as a module. If so, the module will be called ad7418. -config SENSORS_ADM1021 - tristate "Analog Devices ADM1021 and compatibles" - depends on I2C - depends on SENSORS_LM90=n - help - If you say yes here you get support for Analog Devices ADM1021 - and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A, - Genesys Logic GL523SM, National Semiconductor LM84 and TI THMC10. - - This driver can also be built as a module. If so, the module - will be called adm1021. - config SENSORS_ADM1025 tristate "Analog Devices ADM1025 and compatibles" depends on I2C @@ -506,6 +494,17 @@ config SENSORS_CORSAIR_PSU This driver can also be built as a module. If so, the module will be called corsair-psu. +config SENSORS_CROS_EC + tristate "ChromeOS Embedded Controller sensors" + depends on MFD_CROS_EC_DEV + default MFD_CROS_EC_DEV + help + If you say yes here you get support for ChromeOS Embedded Controller + sensors. + + This driver can also be built as a module. If so, the module + will be called cros_ec_hwmon. + config SENSORS_DRIVETEMP tristate "Hard disk drives with temperature sensors" depends on SCSI && ATA @@ -1241,18 +1240,6 @@ config SENSORS_MAX6639 This driver can also be built as a module. If so, the module will be called max6639. -config SENSORS_MAX6642 - tristate "Maxim MAX6642 sensor chip" - depends on I2C - depends on SENSORS_LM90=n - help - If you say yes here you get support for MAX6642 sensor chip. - MAX6642 is a SMBus-Compatible Remote/Local Temperature Sensor - with Overtemperature Alarm from Maxim. - - This driver can also be built as a module. If so, the module - will be called max6642. - config SENSORS_MAX6650 tristate "Maxim MAX6650 sensor chip" depends on I2C @@ -2127,6 +2114,7 @@ config SENSORS_ADS7871 config SENSORS_AMC6821 tristate "Texas Instruments AMC6821" depends on I2C + select REGMAP_I2C help If you say yes here you get support for the Texas Instruments AMC6821 hardware monitoring chips. @@ -2181,6 +2169,37 @@ config SENSORS_INA3221 This driver can also be built as a module. If so, the module will be called ina3221. +config SENSORS_SPD5118 + tristate "SPD5118 Compliant Temperature Sensors" + depends on I2C + select REGMAP_I2C + help + If you say yes here you get support for SPD5118 (JEDEC JESD300) + compliant temperature sensors. Such sensors are found on DDR5 memory + modules. + + This driver can also be built as a module. If so, the module + will be called spd5118. + +config SENSORS_SPD5118_DETECT + bool "Enable detect function" + depends on SENSORS_SPD5118 + default (!DMI || !X86) + help + If enabled, the driver auto-detects if a chip in the SPD address + range is compliant to the SPD51888 standard and auto-instantiates + if that is the case. If disabled, SPD5118 compliant devices have + to be instantiated by other means. On X86 systems with DMI support + this will typically be done from DMI DDR detection code in the + I2C SMBus subsystem. Devicetree based systems will instantiate + attached devices if the DIMMs are listed in the devicetree file. + + Disabling the detect function will speed up boot time and reduce + the risk of mis-detecting SPD5118 compliant devices. However, it + may result in missed DIMMs under some circumstances. + + If unsure, say Y. + config SENSORS_TC74 tristate "Microchip TC74" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index e3f25475d1f0..b1c7056c37db 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -31,7 +31,6 @@ obj-$(CONFIG_SENSORS_AD7414) += ad7414.o obj-$(CONFIG_SENSORS_AD7418) += ad7418.o obj-$(CONFIG_SENSORS_ADC128D818) += adc128d818.o obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o -obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o @@ -64,6 +63,7 @@ obj-$(CONFIG_SENSORS_CHIPCAP2) += chipcap2.o obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o obj-$(CONFIG_SENSORS_CORSAIR_CPRO) += corsair-cpro.o obj-$(CONFIG_SENSORS_CORSAIR_PSU) += corsair-psu.o +obj-$(CONFIG_SENSORS_CROS_EC) += cros_ec_hwmon.o obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o @@ -154,7 +154,6 @@ obj-$(CONFIG_SENSORS_MAX31760) += max31760.o obj-$(CONFIG_SENSORS_MAX6620) += max6620.o obj-$(CONFIG_SENSORS_MAX6621) += max6621.o obj-$(CONFIG_SENSORS_MAX6639) += max6639.o -obj-$(CONFIG_SENSORS_MAX6642) += max6642.o obj-$(CONFIG_SENSORS_MAX6650) += max6650.o obj-$(CONFIG_SENSORS_MAX6697) += max6697.o obj-$(CONFIG_SENSORS_MAX31790) += max31790.o @@ -207,6 +206,7 @@ obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o obj-$(CONFIG_SENSORS_SPARX5) += sparx5-temp.o +obj-$(CONFIG_SENSORS_SPD5118) += spd5118.o obj-$(CONFIG_SENSORS_STTS751) += stts751.o obj-$(CONFIG_SENSORS_SURFACE_FAN)+= surface_fan.o obj-$(CONFIG_SENSORS_SY7636A) += sy7636a-hwmon.o diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c index 4829f83ff52e..7a132accdf8a 100644 --- a/drivers/hwmon/ad7418.c +++ b/drivers/hwmon/ad7418.c @@ -230,8 +230,6 @@ static void ad7418_init_client(struct i2c_client *client) } } -static const struct i2c_device_id ad7418_id[]; - static int ad7418_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -252,10 +250,7 @@ static int ad7418_probe(struct i2c_client *client) mutex_init(&data->lock); data->client = client; - if (dev->of_node) - data->type = (uintptr_t)of_device_get_match_data(dev); - else - data->type = i2c_match_id(ad7418_id, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); switch (data->type) { case ad7416: diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c index 8ac6e735ec5c..5e805d4ee76a 100644 --- a/drivers/hwmon/adc128d818.c +++ b/drivers/hwmon/adc128d818.c @@ -175,7 +175,7 @@ static ssize_t adc128_in_store(struct device *dev, mutex_lock(&data->update_lock); /* 10 mV LSB on limit registers */ - regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255); + regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10); data->in[index][nr] = regval << 4; reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr); i2c_smbus_write_byte_data(data->client, reg, regval); @@ -213,7 +213,7 @@ static ssize_t adc128_temp_store(struct device *dev, return err; mutex_lock(&data->update_lock); - regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); + regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); data->temp[index] = regval << 1; i2c_smbus_write_byte_data(data->client, index == 1 ? ADC128_REG_TEMP_MAX diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c deleted file mode 100644 index 7c15398ebb37..000000000000 --- a/drivers/hwmon/adm1021.c +++ /dev/null @@ -1,505 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * adm1021.c - Part of lm_sensors, Linux kernel modules for hardware - * monitoring - * Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and - * Philip Edelbrock <phil@netroedge.com> - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/jiffies.h> -#include <linux/i2c.h> -#include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> -#include <linux/err.h> -#include <linux/mutex.h> - - -/* Addresses to scan */ -static const unsigned short normal_i2c[] = { - 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; - -enum chips { - adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066 }; - -/* adm1021 constants specified below */ - -/* The adm1021 registers */ -/* Read-only */ -/* For nr in 0-1 */ -#define ADM1021_REG_TEMP(nr) (nr) -#define ADM1021_REG_STATUS 0x02 -/* 0x41 = AD, 0x49 = TI, 0x4D = Maxim, 0x23 = Genesys , 0x54 = Onsemi */ -#define ADM1021_REG_MAN_ID 0xFE -/* ADM1021 = 0x0X, ADM1023 = 0x3X */ -#define ADM1021_REG_DEV_ID 0xFF -/* These use different addresses for reading/writing */ -#define ADM1021_REG_CONFIG_R 0x03 -#define ADM1021_REG_CONFIG_W 0x09 -#define ADM1021_REG_CONV_RATE_R 0x04 -#define ADM1021_REG_CONV_RATE_W 0x0A -/* These are for the ADM1023's additional precision on the remote temp sensor */ -#define ADM1023_REG_REM_TEMP_PREC 0x10 -#define ADM1023_REG_REM_OFFSET 0x11 -#define ADM1023_REG_REM_OFFSET_PREC 0x12 -#define ADM1023_REG_REM_TOS_PREC 0x13 -#define ADM1023_REG_REM_THYST_PREC 0x14 -/* limits */ -/* For nr in 0-1 */ -#define ADM1021_REG_TOS_R(nr) (0x05 + 2 * (nr)) -#define ADM1021_REG_TOS_W(nr) (0x0B + 2 * (nr)) -#define ADM1021_REG_THYST_R(nr) (0x06 + 2 * (nr)) -#define ADM1021_REG_THYST_W(nr) (0x0C + 2 * (nr)) -/* write-only */ -#define ADM1021_REG_ONESHOT 0x0F - -/* Initial values */ - -/* - * Note: Even though I left the low and high limits named os and hyst, - * they don't quite work like a thermostat the way the LM75 does. I.e., - * a lower temp than THYST actually triggers an alarm instead of - * clearing it. Weird, ey? --Phil - */ - -/* Each client has this additional data */ -struct adm1021_data { - struct i2c_client *client; - enum chips type; - - const struct attribute_group *groups[3]; - - struct mutex update_lock; - bool valid; /* true if following fields are valid */ - char low_power; /* !=0 if device in low power mode */ - unsigned long last_updated; /* In jiffies */ - - int temp_max[2]; /* Register values */ - int temp_min[2]; - int temp[2]; - u8 alarms; - /* Special values for ADM1023 only */ - u8 remote_temp_offset; - u8 remote_temp_offset_prec; -}; - -/* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */ -static bool read_only; - -static struct adm1021_data *adm1021_update_device(struct device *dev) -{ - struct adm1021_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ + HZ / 2) - || !data->valid) { - int i; - - dev_dbg(dev, "Starting adm1021 update\n"); - - for (i = 0; i < 2; i++) { - data->temp[i] = 1000 * - (s8) i2c_smbus_read_byte_data( - client, ADM1021_REG_TEMP(i)); - data->temp_max[i] = 1000 * - (s8) i2c_smbus_read_byte_data( - client, ADM1021_REG_TOS_R(i)); - if (data->type != lm84) { - data->temp_min[i] = 1000 * - (s8) i2c_smbus_read_byte_data(client, - ADM1021_REG_THYST_R(i)); - } - } - data->alarms = i2c_smbus_read_byte_data(client, - ADM1021_REG_STATUS) & 0x7c; - if (data->type == adm1023) { - /* - * The ADM1023 provides 3 extra bits of precision for - * the remote sensor in extra registers. - */ - data->temp[1] += 125 * (i2c_smbus_read_byte_data( - client, ADM1023_REG_REM_TEMP_PREC) >> 5); - data->temp_max[1] += 125 * (i2c_smbus_read_byte_data( - client, ADM1023_REG_REM_TOS_PREC) >> 5); - data->temp_min[1] += 125 * (i2c_smbus_read_byte_data( - client, ADM1023_REG_REM_THYST_PREC) >> 5); - data->remote_temp_offset = - i2c_smbus_read_byte_data(client, - ADM1023_REG_REM_OFFSET); - data->remote_temp_offset_prec = - i2c_smbus_read_byte_data(client, - ADM1023_REG_REM_OFFSET_PREC); - } - data->last_updated = jiffies; - data->valid = true; - } - - mutex_unlock(&data->update_lock); - - return data; -} - -static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, - char *buf) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct adm1021_data *data = adm1021_update_device(dev); - - return sprintf(buf, "%d\n", data->temp[index]); -} - -static ssize_t temp_max_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct adm1021_data *data = adm1021_update_device(dev); - - return sprintf(buf, "%d\n", data->temp_max[index]); -} - -static ssize_t temp_min_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct adm1021_data *data = adm1021_update_device(dev); - - return sprintf(buf, "%d\n", data->temp_min[index]); -} - -static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - int index = to_sensor_dev_attr(attr)->index; - struct adm1021_data *data = adm1021_update_device(dev); - return sprintf(buf, "%u\n", (data->alarms >> index) & 1); -} - -static ssize_t alarms_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct adm1021_data *data = adm1021_update_device(dev); - return sprintf(buf, "%u\n", data->alarms); -} - -static ssize_t temp_max_store(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct adm1021_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long temp; - int reg_val, err; - - err = kstrtol(buf, 10, &temp); - if (err) - return err; - temp /= 1000; - - mutex_lock(&data->update_lock); - reg_val = clamp_val(temp, -128, 127); - data->temp_max[index] = reg_val * 1000; - if (!read_only) - i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), - reg_val); - mutex_unlock(&data->update_lock); - - return count; -} - -static ssize_t temp_min_store(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct adm1021_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long temp; - int reg_val, err; - - err = kstrtol(buf, 10, &temp); - if (err) - return err; - temp /= 1000; - - mutex_lock(&data->update_lock); - reg_val = clamp_val(temp, -128, 127); - data->temp_min[index] = reg_val * 1000; - if (!read_only) - i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), - reg_val); - mutex_unlock(&data->update_lock); - - return count; -} - -static ssize_t low_power_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct adm1021_data *data = adm1021_update_device(dev); - return sprintf(buf, "%d\n", data->low_power); -} - -static ssize_t low_power_store(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) -{ - struct adm1021_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - char low_power; - unsigned long val; - int err; - - err = kstrtoul(buf, 10, &val); - if (err) - return err; - low_power = val != 0; - - mutex_lock(&data->update_lock); - if (low_power != data->low_power) { - int config = i2c_smbus_read_byte_data( - client, ADM1021_REG_CONFIG_R); - data->low_power = low_power; - i2c_smbus_write_byte_data(client, ADM1021_REG_CONFIG_W, - (config & 0xBF) | (low_power << 6)); - } - mutex_unlock(&data->update_lock); - - return count; -} - - -static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0); -static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0); -static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0); -static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1); -static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1); -static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1); -static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6); -static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 5); -static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 4); -static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, alarm, 3); -static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 2); - -static DEVICE_ATTR_RO(alarms); -static DEVICE_ATTR_RW(low_power); - -static struct attribute *adm1021_attributes[] = { - &sensor_dev_attr_temp1_max.dev_attr.attr, - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp2_max.dev_attr.attr, - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_fault.dev_attr.attr, - &dev_attr_alarms.attr, - &dev_attr_low_power.attr, - NULL -}; - -static const struct attribute_group adm1021_group = { - .attrs = adm1021_attributes, -}; - -static struct attribute *adm1021_min_attributes[] = { - &sensor_dev_attr_temp1_min.dev_attr.attr, - &sensor_dev_attr_temp2_min.dev_attr.attr, - &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, - NULL -}; - -static const struct attribute_group adm1021_min_group = { - .attrs = adm1021_min_attributes, -}; - -/* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1021_detect(struct i2c_client *client, - struct i2c_board_info *info) -{ - struct i2c_adapter *adapter = client->adapter; - const char *type_name; - int reg, conv_rate, status, config, man_id, dev_id; - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - pr_debug("detect failed, smbus byte data not supported!\n"); - return -ENODEV; - } - - status = i2c_smbus_read_byte_data(client, ADM1021_REG_STATUS); - conv_rate = i2c_smbus_read_byte_data(client, - ADM1021_REG_CONV_RATE_R); - config = i2c_smbus_read_byte_data(client, ADM1021_REG_CONFIG_R); - - /* Check unused bits */ - if ((status & 0x03) || (config & 0x3F) || (conv_rate & 0xF8)) { - pr_debug("detect failed, chip not detected!\n"); - return -ENODEV; - } - - /* Determine the chip type. */ - man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID); - dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID); - - if (man_id < 0 || dev_id < 0) - return -ENODEV; - - if (man_id == 0x4d && dev_id == 0x01) { - /* - * dev_id 0x01 matches MAX6680, MAX6695, MAX6696, and possibly - * others. Read register which is unsupported on MAX1617 but - * exists on all those chips and compare with the dev_id - * register. If it matches, it may be a MAX1617A. - */ - reg = i2c_smbus_read_byte_data(client, - ADM1023_REG_REM_TEMP_PREC); - if (reg != dev_id) - return -ENODEV; - type_name = "max1617a"; - } else if (man_id == 0x41) { - if ((dev_id & 0xF0) == 0x30) - type_name = "adm1023"; - else if ((dev_id & 0xF0) == 0x00) - type_name = "adm1021"; - else - return -ENODEV; - } else if (man_id == 0x49) - type_name = "thmc10"; - else if (man_id == 0x23) - type_name = "gl523sm"; - else if (man_id == 0x54) - type_name = "mc1066"; - else { - int lte, rte, lhi, rhi, llo, rlo; - - /* extra checks for LM84 and MAX1617 to avoid misdetections */ - - llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0)); - rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1)); - - /* fail if any of the additional register reads failed */ - if (llo < 0 || rlo < 0) - return -ENODEV; - - lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0)); - rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1)); - lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0)); - rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1)); - - /* - * Fail for negative temperatures and negative high limits. - * This check also catches read errors on the tested registers. - */ - if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0) - return -ENODEV; - - /* fail if all registers hold the same value */ - if (lte == rte && lte == lhi && lte == rhi && lte == llo - && lte == rlo) - return -ENODEV; - - /* - * LM84 Mfr ID is in a different place, - * and it has more unused bits. Registers at 0xfe and 0xff - * are undefined and return the most recently read value, - * here the value of the configuration register. - */ - if (conv_rate == 0x00 - && man_id == config && dev_id == config - && (config & 0x7F) == 0x00 - && (status & 0xAB) == 0x00) { - type_name = "lm84"; - } else { - if ((config & 0x3f) || (status & 0x03)) - return -ENODEV; - /* fail if low limits are larger than high limits */ - if ((s8)llo > lhi || (s8)rlo > rhi) - return -ENODEV; - type_name = "max1617"; - } - } - - pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n", - type_name, i2c_adapter_id(adapter), client->addr); - strscpy(info->type, type_name, I2C_NAME_SIZE); - - return 0; -} - -static void adm1021_init_client(struct i2c_client *client) -{ - /* Enable ADC and disable suspend mode */ - i2c_smbus_write_byte_data(client, ADM1021_REG_CONFIG_W, - i2c_smbus_read_byte_data(client, ADM1021_REG_CONFIG_R) & 0xBF); - /* Set Conversion rate to 1/sec (this can be tinkered with) */ - i2c_smbus_write_byte_data(client, ADM1021_REG_CONV_RATE_W, 0x04); -} - -static const struct i2c_device_id adm1021_id[]; - -static int adm1021_probe(struct i2c_client *client) -{ - struct device *dev = &client->dev; - struct adm1021_data *data; - struct device *hwmon_dev; - - data = devm_kzalloc(dev, sizeof(struct adm1021_data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->client = client; - data->type = i2c_match_id(adm1021_id, client)->driver_data; - mutex_init(&data->update_lock); - - /* Initialize the ADM1021 chip */ - if (data->type != lm84 && !read_only) - adm1021_init_client(client); - - data->groups[0] = &adm1021_group; - if (data->type != lm84) - data->groups[1] = &adm1021_min_group; - - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, data->groups); - - return PTR_ERR_OR_ZERO(hwmon_dev); -} - -static const struct i2c_device_id adm1021_id[] = { - { "adm1021", adm1021 }, - { "adm1023", adm1023 }, - { "max1617", max1617 }, - { "max1617a", max1617a }, - { "thmc10", thmc10 }, - { "lm84", lm84 }, - { "gl523sm", gl523sm }, - { "mc1066", mc1066 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, adm1021_id); - -static struct i2c_driver adm1021_driver = { - .class = I2C_CLASS_HWMON, - .driver = { - .name = "adm1021", - }, - .probe = adm1021_probe, - .id_table = adm1021_id, - .detect = adm1021_detect, - .address_list = normal_i2c, -}; - -module_i2c_driver(adm1021_driver); - -MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " - "Philip Edelbrock <phil@netroedge.com>"); -MODULE_DESCRIPTION("adm1021 driver"); -MODULE_LICENSE("GPL"); - -module_param(read_only, bool, 0); -MODULE_PARM_DESC(read_only, "Don't set any values, read only mode"); diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 88c7e0d62d08..343118532cdb 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -1021,8 +1021,6 @@ static void adm1031_init_client(struct i2c_client *client) data->update_interval = update_intervals[i]; } -static const struct i2c_device_id adm1031_id[]; - static int adm1031_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -1035,7 +1033,7 @@ static int adm1031_probe(struct i2c_client *client) i2c_set_clientdata(client, data); data->client = client; - data->chip_type = i2c_match_id(adm1031_id, client)->driver_data; + data->chip_type = (uintptr_t)i2c_get_match_data(client); mutex_init(&data->update_lock); if (data->chip_type == adm1030) diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index 809e830f52a6..436637264056 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -99,8 +99,6 @@ static const struct regmap_config ads2830_regmap_config = { .val_bits = 8, }; -static const struct i2c_device_id ads7828_device_ids[]; - static int ads7828_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -138,10 +136,7 @@ static int ads7828_probe(struct i2c_client *client) } } - if (client->dev.of_node) - chip = (uintptr_t)of_device_get_match_data(&client->dev); - else - chip = i2c_match_id(ads7828_device_ids, client)->driver_data; + chip = (uintptr_t)i2c_get_match_data(client); /* Bound Vref with min/max values */ vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN, diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 4224ffb30483..bc186c61a2c0 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -1676,7 +1676,6 @@ static int adt7475_probe(struct i2c_client *client) struct device *hwmon_dev; int i, ret = 0, revision, group_num = 0; u8 config3; - const struct i2c_device_id *id = i2c_match_id(adt7475_id, client); data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (data == NULL) @@ -1686,10 +1685,7 @@ static int adt7475_probe(struct i2c_client *client) data->client = client; i2c_set_clientdata(client, data); - if (client->dev.of_node) - chip = (uintptr_t)of_device_get_match_data(&client->dev); - else - chip = id->driver_data; + chip = (uintptr_t)i2c_get_match_data(client); /* Initialize device-specific values */ switch (chip) { @@ -1717,7 +1713,7 @@ static int adt7475_probe(struct i2c_client *client) if (!(config3 & CONFIG3_SMBALERT)) data->has_pwm2 = 1; /* Meaning of this bit is inverted for the ADT7473-1 */ - if (id->driver_data == adt7473 && revision >= 1) + if (chip == adt7473 && revision >= 1) data->has_pwm2 = !data->has_pwm2; data->config4 = adt7475_read(REG_CONFIG4); @@ -1730,12 +1726,12 @@ static int adt7475_probe(struct i2c_client *client) * because 2 different pins (TACH4 and +2.5 Vin) can be used for * this function */ - if (id->driver_data == adt7490) { + if (chip == adt7490) { if ((data->config4 & CONFIG4_PINFUNC) == 0x1 && !(config3 & CONFIG3_THERM)) data->has_fan4 = 1; } - if (id->driver_data == adt7476 || id->driver_data == adt7490) { + if (chip == adt7476 || chip == adt7490) { if (!(config3 & CONFIG3_THERM) || (data->config4 & CONFIG4_PINFUNC) == 0x1) data->has_voltage |= (1 << 0); /* in0 */ @@ -1745,7 +1741,7 @@ static int adt7475_probe(struct i2c_client *client) * On the ADT7476, the +12V input pin may instead be used as VID5, * and VID pins may alternatively be used as GPIO */ - if (id->driver_data == adt7476) { + if (chip == adt7476) { u8 vid = adt7475_read(REG_VID); if (!(vid & VID_VIDSEL)) data->has_voltage |= (1 << 4); /* in4 */ @@ -1829,7 +1825,7 @@ static int adt7475_probe(struct i2c_client *client) } dev_info(&client->dev, "%s device, revision %d\n", - names[id->driver_data], revision); + names[chip], revision); if ((data->has_voltage & 0x11) || data->has_fan4 || data->has_pwm2) dev_info(&client->dev, "Optional features:%s%s%s%s%s\n", (data->has_voltage & (1 << 0)) ? " in0" : "", @@ -1900,7 +1896,7 @@ static void adt7475_read_pwm(struct i2c_client *client, int index) data->pwm[CONTROL][index] &= ~0xE0; data->pwm[CONTROL][index] |= (7 << 5); - i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), + i2c_smbus_write_byte_data(client, PWM_REG(index), data->pwm[INPUT][index]); i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c index f136bf3ff40a..312ef3e98754 100644 --- a/drivers/hwmon/aht10.c +++ b/drivers/hwmon/aht10.c @@ -331,8 +331,7 @@ static const struct hwmon_chip_info aht10_chip_info = { static int aht10_probe(struct i2c_client *client) { - const struct i2c_device_id *id = i2c_match_id(aht10_id, client); - enum aht10_variant variant = id->driver_data; + enum aht10_variant variant = (uintptr_t)i2c_get_match_data(client); struct device *device = &client->dev; struct device *hwmon_dev; struct aht10_data *data; diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index 9b02b304c2f5..ec94392fcb65 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c @@ -6,18 +6,24 @@ * * Based on max6650.c: * Copyright (C) 2007 Hans J. Koch <hjk@hansjkoch.de> + * + * Conversion to regmap and with_info API: + * Copyright (C) 2024 Guenter Roeck <linux@roeck-us.net> */ -#include <linux/kernel.h> /* Needed for KERN_INFO */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/jiffies.h> -#include <linux/i2c.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> -#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/minmax.h> +#include <linux/module.h> #include <linux/mutex.h> +#include <linux/regmap.h> +#include <linux/slab.h> /* * Addresses to scan. @@ -36,519 +42,594 @@ module_param(pwminv, int, 0444); static int init = 1; /*Power-on initialization.*/ module_param(init, int, 0444); -enum chips { amc6821 }; - -#define AMC6821_REG_DEV_ID 0x3D -#define AMC6821_REG_COMP_ID 0x3E -#define AMC6821_REG_CONF1 0x00 -#define AMC6821_REG_CONF2 0x01 -#define AMC6821_REG_CONF3 0x3F -#define AMC6821_REG_CONF4 0x04 -#define AMC6821_REG_STAT1 0x02 -#define AMC6821_REG_STAT2 0x03 -#define AMC6821_REG_TDATA_LOW 0x08 -#define AMC6821_REG_TDATA_HI 0x09 -#define AMC6821_REG_LTEMP_HI 0x0A -#define AMC6821_REG_RTEMP_HI 0x0B -#define AMC6821_REG_LTEMP_LIMIT_MIN 0x15 -#define AMC6821_REG_LTEMP_LIMIT_MAX 0x14 -#define AMC6821_REG_RTEMP_LIMIT_MIN 0x19 -#define AMC6821_REG_RTEMP_LIMIT_MAX 0x18 -#define AMC6821_REG_LTEMP_CRIT 0x1B -#define AMC6821_REG_RTEMP_CRIT 0x1D -#define AMC6821_REG_PSV_TEMP 0x1C -#define AMC6821_REG_DCY 0x22 -#define AMC6821_REG_LTEMP_FAN_CTRL 0x24 -#define AMC6821_REG_RTEMP_FAN_CTRL 0x25 -#define AMC6821_REG_DCY_LOW_TEMP 0x21 - -#define AMC6821_REG_TACH_LLIMITL 0x10 -#define AMC6821_REG_TACH_LLIMITH 0x11 -#define AMC6821_REG_TACH_HLIMITL 0x12 -#define AMC6821_REG_TACH_HLIMITH 0x13 - -#define AMC6821_CONF1_START 0x01 -#define AMC6821_CONF1_FAN_INT_EN 0x02 -#define AMC6821_CONF1_FANIE 0x04 -#define AMC6821_CONF1_PWMINV 0x08 -#define AMC6821_CONF1_FAN_FAULT_EN 0x10 -#define AMC6821_CONF1_FDRC0 0x20 -#define AMC6821_CONF1_FDRC1 0x40 -#define AMC6821_CONF1_THERMOVIE 0x80 - -#define AMC6821_CONF2_PWM_EN 0x01 -#define AMC6821_CONF2_TACH_MODE 0x02 -#define AMC6821_CONF2_TACH_EN 0x04 -#define AMC6821_CONF2_RTFIE 0x08 -#define AMC6821_CONF2_LTOIE 0x10 -#define AMC6821_CONF2_RTOIE 0x20 -#define AMC6821_CONF2_PSVIE 0x40 -#define AMC6821_CONF2_RST 0x80 - -#define AMC6821_CONF3_THERM_FAN_EN 0x80 -#define AMC6821_CONF3_REV_MASK 0x0F - -#define AMC6821_CONF4_OVREN 0x10 -#define AMC6821_CONF4_TACH_FAST 0x20 -#define AMC6821_CONF4_PSPR 0x40 -#define AMC6821_CONF4_MODE 0x80 - -#define AMC6821_STAT1_RPM_ALARM 0x01 -#define AMC6821_STAT1_FANS 0x02 -#define AMC6821_STAT1_RTH 0x04 -#define AMC6821_STAT1_RTL 0x08 -#define AMC6821_STAT1_R_THERM 0x10 -#define AMC6821_STAT1_RTF 0x20 -#define AMC6821_STAT1_LTH 0x40 -#define AMC6821_STAT1_LTL 0x80 - -#define AMC6821_STAT2_RTC 0x08 -#define AMC6821_STAT2_LTC 0x10 -#define AMC6821_STAT2_LPSV 0x20 -#define AMC6821_STAT2_L_THERM 0x40 -#define AMC6821_STAT2_THERM_IN 0x80 - -enum {IDX_TEMP1_INPUT = 0, IDX_TEMP1_MIN, IDX_TEMP1_MAX, - IDX_TEMP1_CRIT, IDX_TEMP2_INPUT, IDX_TEMP2_MIN, - IDX_TEMP2_MAX, IDX_TEMP2_CRIT, - TEMP_IDX_LEN, }; - -static const u8 temp_reg[] = {AMC6821_REG_LTEMP_HI, - AMC6821_REG_LTEMP_LIMIT_MIN, - AMC6821_REG_LTEMP_LIMIT_MAX, - AMC6821_REG_LTEMP_CRIT, - AMC6821_REG_RTEMP_HI, - AMC6821_REG_RTEMP_LIMIT_MIN, - AMC6821_REG_RTEMP_LIMIT_MAX, - AMC6821_REG_RTEMP_CRIT, }; - -enum {IDX_FAN1_INPUT = 0, IDX_FAN1_MIN, IDX_FAN1_MAX, - FAN1_IDX_LEN, }; - -static const u8 fan_reg_low[] = {AMC6821_REG_TDATA_LOW, - AMC6821_REG_TACH_LLIMITL, - AMC6821_REG_TACH_HLIMITL, }; - - -static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI, - AMC6821_REG_TACH_LLIMITH, - AMC6821_REG_TACH_HLIMITH, }; +#define AMC6821_REG_DEV_ID 0x3D +#define AMC6821_REG_COMP_ID 0x3E +#define AMC6821_REG_CONF1 0x00 +#define AMC6821_REG_CONF2 0x01 +#define AMC6821_REG_CONF3 0x3F +#define AMC6821_REG_CONF4 0x04 +#define AMC6821_REG_STAT1 0x02 +#define AMC6821_REG_STAT2 0x03 +#define AMC6821_REG_TEMP_LO 0x06 +#define AMC6821_REG_TDATA_LOW 0x08 +#define AMC6821_REG_TDATA_HI 0x09 +#define AMC6821_REG_LTEMP_HI 0x0A +#define AMC6821_REG_RTEMP_HI 0x0B +#define AMC6821_REG_LTEMP_LIMIT_MIN 0x15 +#define AMC6821_REG_LTEMP_LIMIT_MAX 0x14 +#define AMC6821_REG_RTEMP_LIMIT_MIN 0x19 +#define AMC6821_REG_RTEMP_LIMIT_MAX 0x18 +#define AMC6821_REG_LTEMP_CRIT 0x1B +#define AMC6821_REG_RTEMP_CRIT 0x1D +#define AMC6821_REG_PSV_TEMP 0x1C +#define AMC6821_REG_DCY 0x22 +#define AMC6821_REG_LTEMP_FAN_CTRL 0x24 +#define AMC6821_REG_RTEMP_FAN_CTRL 0x25 +#define AMC6821_REG_DCY_LOW_TEMP 0x21 + +#define AMC6821_REG_TACH_LLIMITL 0x10 +#define AMC6821_REG_TACH_HLIMITL 0x12 +#define AMC6821_REG_TACH_SETTINGL 0x1e + +#define AMC6821_CONF1_START BIT(0) +#define AMC6821_CONF1_FAN_INT_EN BIT(1) +#define AMC6821_CONF1_FANIE BIT(2) +#define AMC6821_CONF1_PWMINV BIT(3) +#define AMC6821_CONF1_FAN_FAULT_EN BIT(4) +#define AMC6821_CONF1_FDRC0 BIT(5) +#define AMC6821_CONF1_FDRC1 BIT(6) +#define AMC6821_CONF1_THERMOVIE BIT(7) + +#define AMC6821_CONF2_PWM_EN BIT(0) +#define AMC6821_CONF2_TACH_MODE BIT(1) +#define AMC6821_CONF2_TACH_EN BIT(2) +#define AMC6821_CONF2_RTFIE BIT(3) +#define AMC6821_CONF2_LTOIE BIT(4) +#define AMC6821_CONF2_RTOIE BIT(5) +#define AMC6821_CONF2_PSVIE BIT(6) +#define AMC6821_CONF2_RST BIT(7) + +#define AMC6821_CONF3_THERM_FAN_EN BIT(7) +#define AMC6821_CONF3_REV_MASK GENMASK(3, 0) + +#define AMC6821_CONF4_OVREN BIT(4) +#define AMC6821_CONF4_TACH_FAST BIT(5) +#define AMC6821_CONF4_PSPR BIT(6) +#define AMC6821_CONF4_MODE BIT(7) + +#define AMC6821_STAT1_RPM_ALARM BIT(0) +#define AMC6821_STAT1_FANS BIT(1) +#define AMC6821_STAT1_RTH BIT(2) +#define AMC6821_STAT1_RTL BIT(3) +#define AMC6821_STAT1_R_THERM BIT(4) +#define AMC6821_STAT1_RTF BIT(5) +#define AMC6821_STAT1_LTH BIT(6) +#define AMC6821_STAT1_LTL BIT(7) + +#define AMC6821_STAT2_RTC BIT(3) +#define AMC6821_STAT2_LTC BIT(4) +#define AMC6821_STAT2_LPSV BIT(5) +#define AMC6821_STAT2_L_THERM BIT(6) +#define AMC6821_STAT2_THERM_IN BIT(7) + +#define AMC6821_TEMP_SLOPE_MASK GENMASK(2, 0) +#define AMC6821_TEMP_LIMIT_MASK GENMASK(7, 3) /* * Client data (each client gets its own) */ struct amc6821_data { - struct i2c_client *client; + struct regmap *regmap; struct mutex update_lock; - bool valid; /* false until following fields are valid */ - unsigned long last_updated; /* in jiffies */ +}; - /* register values */ - int temp[TEMP_IDX_LEN]; +/* + * Return 0 on success or negative error code. + * + * temps returns set of three temperatures, in °C: + * temps[0]: Passive cooling temperature, applies to both channels + * temps[1]: Low temperature, start slope calculations + * temps[2]: High temperature + * + * Channel 0: local, channel 1: remote. + */ +static int amc6821_get_auto_point_temps(struct regmap *regmap, int channel, u8 *temps) +{ + u32 pwm, regval; + int err; - u16 fan[FAN1_IDX_LEN]; - u8 fan1_div; + err = regmap_read(regmap, AMC6821_REG_DCY_LOW_TEMP, &pwm); + if (err) + return err; - u8 pwm1; - u8 temp1_auto_point_temp[3]; - u8 temp2_auto_point_temp[3]; - u8 pwm1_auto_point_pwm[3]; - u8 pwm1_enable; - u8 pwm1_auto_channels_temp; + err = regmap_read(regmap, AMC6821_REG_PSV_TEMP, ®val); + if (err) + return err; + temps[0] = regval; - u8 stat1; - u8 stat2; -}; + err = regmap_read(regmap, + channel ? AMC6821_REG_RTEMP_FAN_CTRL : AMC6821_REG_LTEMP_FAN_CTRL, + ®val); + if (err) + return err; + temps[1] = FIELD_GET(AMC6821_TEMP_LIMIT_MASK, regval) * 4; + + /* slope is 32 >> <slope bits> in °C */ + regval = 32 >> FIELD_GET(AMC6821_TEMP_SLOPE_MASK, regval); + if (regval) + temps[2] = temps[1] + DIV_ROUND_CLOSEST(255 - pwm, regval); + else + temps[2] = 255; + + return 0; +} -static struct amc6821_data *amc6821_update_device(struct device *dev) +static int amc6821_temp_read_values(struct regmap *regmap, u32 attr, int channel, long *val) { - struct amc6821_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - int timeout = HZ; - u8 reg; - int i; + int reg, err; + u32 regval; - mutex_lock(&data->update_lock); + switch (attr) { + case hwmon_temp_input: + reg = channel ? AMC6821_REG_RTEMP_HI : AMC6821_REG_LTEMP_HI; + break; + case hwmon_temp_min: + reg = channel ? AMC6821_REG_RTEMP_LIMIT_MIN : AMC6821_REG_LTEMP_LIMIT_MIN; + break; + case hwmon_temp_max: + reg = channel ? AMC6821_REG_RTEMP_LIMIT_MAX : AMC6821_REG_LTEMP_LIMIT_MAX; + break; + case hwmon_temp_crit: + reg = channel ? AMC6821_REG_RTEMP_CRIT : AMC6821_REG_LTEMP_CRIT; + break; + default: + return -EOPNOTSUPP; + } + err = regmap_read(regmap, reg, ®val); + if (err) + return err; + *val = sign_extend32(regval, 7) * 1000; + return 0; +} - if (time_after(jiffies, data->last_updated + timeout) || - !data->valid) { - - for (i = 0; i < TEMP_IDX_LEN; i++) - data->temp[i] = (int8_t)i2c_smbus_read_byte_data( - client, temp_reg[i]); - - data->stat1 = i2c_smbus_read_byte_data(client, - AMC6821_REG_STAT1); - data->stat2 = i2c_smbus_read_byte_data(client, - AMC6821_REG_STAT2); - - data->pwm1 = i2c_smbus_read_byte_data(client, - AMC6821_REG_DCY); - for (i = 0; i < FAN1_IDX_LEN; i++) { - data->fan[i] = i2c_smbus_read_byte_data( - client, - fan_reg_low[i]); - data->fan[i] += i2c_smbus_read_byte_data( - client, - fan_reg_hi[i]) << 8; - } - data->fan1_div = i2c_smbus_read_byte_data(client, - AMC6821_REG_CONF4); - data->fan1_div = data->fan1_div & AMC6821_CONF4_PSPR ? 4 : 2; - - data->pwm1_auto_point_pwm[0] = 0; - data->pwm1_auto_point_pwm[2] = 255; - data->pwm1_auto_point_pwm[1] = i2c_smbus_read_byte_data(client, - AMC6821_REG_DCY_LOW_TEMP); - - data->temp1_auto_point_temp[0] = - i2c_smbus_read_byte_data(client, - AMC6821_REG_PSV_TEMP); - data->temp2_auto_point_temp[0] = - data->temp1_auto_point_temp[0]; - reg = i2c_smbus_read_byte_data(client, - AMC6821_REG_LTEMP_FAN_CTRL); - data->temp1_auto_point_temp[1] = (reg & 0xF8) >> 1; - reg &= 0x07; - reg = 0x20 >> reg; - if (reg > 0) - data->temp1_auto_point_temp[2] = - data->temp1_auto_point_temp[1] + - (data->pwm1_auto_point_pwm[2] - - data->pwm1_auto_point_pwm[1]) / reg; - else - data->temp1_auto_point_temp[2] = 255; - - reg = i2c_smbus_read_byte_data(client, - AMC6821_REG_RTEMP_FAN_CTRL); - data->temp2_auto_point_temp[1] = (reg & 0xF8) >> 1; - reg &= 0x07; - reg = 0x20 >> reg; - if (reg > 0) - data->temp2_auto_point_temp[2] = - data->temp2_auto_point_temp[1] + - (data->pwm1_auto_point_pwm[2] - - data->pwm1_auto_point_pwm[1]) / reg; - else - data->temp2_auto_point_temp[2] = 255; - - reg = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); - reg = (reg >> 5) & 0x3; - switch (reg) { - case 0: /*open loop: software sets pwm1*/ - data->pwm1_auto_channels_temp = 0; - data->pwm1_enable = 1; +static int amc6821_read_alarms(struct regmap *regmap, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + int reg, mask, err; + u32 regval; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_min_alarm: + reg = AMC6821_REG_STAT1; + mask = channel ? AMC6821_STAT1_RTL : AMC6821_STAT1_LTL; break; - case 2: /*closed loop: remote T (temp2)*/ - data->pwm1_auto_channels_temp = 2; - data->pwm1_enable = 2; + case hwmon_temp_max_alarm: + reg = AMC6821_REG_STAT1; + mask = channel ? AMC6821_STAT1_RTH : AMC6821_STAT1_LTH; break; - case 3: /*closed loop: local and remote T (temp2)*/ - data->pwm1_auto_channels_temp = 3; - data->pwm1_enable = 3; + case hwmon_temp_crit_alarm: + reg = AMC6821_REG_STAT2; + mask = channel ? AMC6821_STAT2_RTC : AMC6821_STAT2_LTC; break; - case 1: /* - * semi-open loop: software sets rpm, chip controls - * pwm1, currently not implemented - */ - data->pwm1_auto_channels_temp = 0; - data->pwm1_enable = 0; + case hwmon_temp_fault: + reg = AMC6821_REG_STAT1; + mask = AMC6821_STAT1_RTF; break; + default: + return -EOPNOTSUPP; } - - data->last_updated = jiffies; - data->valid = true; + break; + case hwmon_fan: + switch (attr) { + case hwmon_fan_fault: + reg = AMC6821_REG_STAT1; + mask = AMC6821_STAT1_FANS; + break; + default: + return -EOPNOTSUPP; + } + break; + default: + return -EOPNOTSUPP; } - mutex_unlock(&data->update_lock); - return data; + err = regmap_read(regmap, reg, ®val); + if (err) + return err; + *val = !!(regval & mask); + return 0; } -static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, - char *buf) +static int amc6821_temp_read(struct device *dev, u32 attr, int channel, long *val) { - struct amc6821_data *data = amc6821_update_device(dev); - int ix = to_sensor_dev_attr(devattr)->index; + struct amc6821_data *data = dev_get_drvdata(dev); - return sprintf(buf, "%d\n", data->temp[ix] * 1000); + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_crit: + return amc6821_temp_read_values(data->regmap, attr, channel, val); + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_fault: + return amc6821_read_alarms(data->regmap, hwmon_temp, attr, channel, val); + default: + return -EOPNOTSUPP; + } } -static ssize_t temp_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int amc6821_temp_write(struct device *dev, u32 attr, int channel, long val) { struct amc6821_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - int ix = to_sensor_dev_attr(attr)->index; - long val; + int reg; - int ret = kstrtol(buf, 10, &val); - if (ret) - return ret; - val = clamp_val(val / 1000, -128, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); - mutex_lock(&data->update_lock); - data->temp[ix] = val; - if (i2c_smbus_write_byte_data(client, temp_reg[ix], data->temp[ix])) { - dev_err(&client->dev, "Register write error, aborting.\n"); - count = -EIO; + switch (attr) { + case hwmon_temp_min: + reg = channel ? AMC6821_REG_RTEMP_LIMIT_MIN : AMC6821_REG_LTEMP_LIMIT_MIN; + break; + case hwmon_temp_max: + reg = channel ? AMC6821_REG_RTEMP_LIMIT_MAX : AMC6821_REG_LTEMP_LIMIT_MAX; + break; + case hwmon_temp_crit: + reg = channel ? AMC6821_REG_RTEMP_CRIT : AMC6821_REG_LTEMP_CRIT; + break; + default: + return -EOPNOTSUPP; } - mutex_unlock(&data->update_lock); - return count; + return regmap_write(data->regmap, reg, val); } -static ssize_t temp_alarm_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int amc6821_pwm_read(struct device *dev, u32 attr, long *val) { - struct amc6821_data *data = amc6821_update_device(dev); - int ix = to_sensor_dev_attr(devattr)->index; - u8 flag; + struct amc6821_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + u32 regval; + int err; - switch (ix) { - case IDX_TEMP1_MIN: - flag = data->stat1 & AMC6821_STAT1_LTL; - break; - case IDX_TEMP1_MAX: - flag = data->stat1 & AMC6821_STAT1_LTH; + switch (attr) { + case hwmon_pwm_enable: + err = regmap_read(regmap, AMC6821_REG_CONF1, ®val); + if (err) + return err; + switch (regval & (AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1)) { + case 0: + *val = 1; /* manual */ + break; + case AMC6821_CONF1_FDRC0: + *val = 4; /* target rpm (fan1_target) controlled */ + break; + case AMC6821_CONF1_FDRC1: + *val = 2; /* remote temp controlled */ + break; + default: + *val = 3; /* max(local, remote) temp controlled */ + break; + } + return 0; + case hwmon_pwm_mode: + err = regmap_read(regmap, AMC6821_REG_CONF2, ®val); + if (err) + return err; + *val = !!(regval & AMC6821_CONF2_TACH_MODE); + return 0; + case hwmon_pwm_auto_channels_temp: + err = regmap_read(regmap, AMC6821_REG_CONF1, ®val); + if (err) + return err; + switch (regval & (AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1)) { + case 0: + case AMC6821_CONF1_FDRC0: + *val = 0; /* manual or target rpm controlled */ + break; + case AMC6821_CONF1_FDRC1: + *val = 2; /* remote temp controlled */ + break; + default: + *val = 3; /* max(local, remote) temp controlled */ + break; + } + return 0; + case hwmon_pwm_input: + err = regmap_read(regmap, AMC6821_REG_DCY, ®val); + if (err) + return err; + *val = regval; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int amc6821_pwm_write(struct device *dev, u32 attr, long val) +{ + struct amc6821_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + u32 mode; + + switch (attr) { + case hwmon_pwm_enable: + switch (val) { + case 1: + mode = 0; + break; + case 2: + mode = AMC6821_CONF1_FDRC1; + break; + case 3: + mode = AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1; + break; + case 4: + mode = AMC6821_CONF1_FDRC0; + break; + default: + return -EINVAL; + } + return regmap_update_bits(regmap, AMC6821_REG_CONF1, + AMC6821_CONF1_FDRC0 | AMC6821_CONF1_FDRC1, + mode); + case hwmon_pwm_mode: + if (val < 0 || val > 1) + return -EINVAL; + return regmap_update_bits(regmap, AMC6821_REG_CONF2, + AMC6821_CONF2_TACH_MODE, + val ? AMC6821_CONF2_TACH_MODE : 0); break; - case IDX_TEMP1_CRIT: - flag = data->stat2 & AMC6821_STAT2_LTC; + case hwmon_pwm_input: + if (val < 0 || val > 255) + return -EINVAL; + return regmap_write(regmap, AMC6821_REG_DCY, val); + default: + return -EOPNOTSUPP; + } +} + +static int amc6821_fan_read_rpm(struct regmap *regmap, u32 attr, long *val) +{ + int reg, err; + u8 regs[2]; + u32 regval; + + switch (attr) { + case hwmon_fan_input: + reg = AMC6821_REG_TDATA_LOW; break; - case IDX_TEMP2_MIN: - flag = data->stat1 & AMC6821_STAT1_RTL; + case hwmon_fan_min: + reg = AMC6821_REG_TACH_LLIMITL; break; - case IDX_TEMP2_MAX: - flag = data->stat1 & AMC6821_STAT1_RTH; + case hwmon_fan_max: + reg = AMC6821_REG_TACH_HLIMITL; break; - case IDX_TEMP2_CRIT: - flag = data->stat2 & AMC6821_STAT2_RTC; + case hwmon_fan_target: + reg = AMC6821_REG_TACH_SETTINGL; break; default: - dev_dbg(dev, "Unknown attr->index (%d).\n", ix); - return -EINVAL; + return -EOPNOTSUPP; } - if (flag) - return sprintf(buf, "1"); - else - return sprintf(buf, "0"); -} -static ssize_t temp2_fault_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct amc6821_data *data = amc6821_update_device(dev); - if (data->stat1 & AMC6821_STAT1_RTF) - return sprintf(buf, "1"); - else - return sprintf(buf, "0"); -} + err = regmap_bulk_read(regmap, reg, regs, 2); + if (err) + return err; -static ssize_t pwm1_show(struct device *dev, struct device_attribute *devattr, - char *buf) -{ - struct amc6821_data *data = amc6821_update_device(dev); - return sprintf(buf, "%d\n", data->pwm1); + regval = (regs[1] << 8) | regs[0]; + *val = regval ? 6000000 / regval : 0; + + return 0; } -static ssize_t pwm1_store(struct device *dev, - struct device_attribute *devattr, const char *buf, - size_t count) +static int amc6821_fan_read(struct device *dev, u32 attr, long *val) { struct amc6821_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long val; - int ret = kstrtol(buf, 10, &val); - if (ret) - return ret; - - mutex_lock(&data->update_lock); - data->pwm1 = clamp_val(val , 0, 255); - i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1); - mutex_unlock(&data->update_lock); - return count; -} + struct regmap *regmap = data->regmap; + u32 regval; + int err; -static ssize_t pwm1_enable_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct amc6821_data *data = amc6821_update_device(dev); - return sprintf(buf, "%d\n", data->pwm1_enable); + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_min: + case hwmon_fan_max: + case hwmon_fan_target: + return amc6821_fan_read_rpm(regmap, attr, val); + case hwmon_fan_fault: + return amc6821_read_alarms(regmap, hwmon_fan, attr, 0, val); + case hwmon_fan_pulses: + err = regmap_read(regmap, AMC6821_REG_CONF4, ®val); + if (err) + return err; + *val = (regval & AMC6821_CONF4_PSPR) ? 4 : 2; + return 0; + default: + return -EOPNOTSUPP; + } } -static ssize_t pwm1_enable_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static int amc6821_fan_write(struct device *dev, u32 attr, long val) { struct amc6821_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long val; - int config = kstrtol(buf, 10, &val); - if (config) - return config; - - mutex_lock(&data->update_lock); - config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); - if (config < 0) { - dev_err(&client->dev, - "Error reading configuration register, aborting.\n"); - count = config; - goto unlock; + struct regmap *regmap = data->regmap; + u8 regs[2]; + int reg; + + if (attr == hwmon_fan_pulses) { + if (val != 2 && val != 4) + return -EINVAL; + return regmap_update_bits(regmap, AMC6821_REG_CONF4, + AMC6821_CONF4_PSPR, + val == 4 ? AMC6821_CONF4_PSPR : 0); } - switch (val) { - case 1: - config &= ~AMC6821_CONF1_FDRC0; - config &= ~AMC6821_CONF1_FDRC1; + if (val < 0) + return -EINVAL; + + switch (attr) { + case hwmon_fan_min: + if (!val) /* no unlimited minimum speed */ + return -EINVAL; + reg = AMC6821_REG_TACH_LLIMITL; break; - case 2: - config &= ~AMC6821_CONF1_FDRC0; - config |= AMC6821_CONF1_FDRC1; + case hwmon_fan_max: + reg = AMC6821_REG_TACH_HLIMITL; break; - case 3: - config |= AMC6821_CONF1_FDRC0; - config |= AMC6821_CONF1_FDRC1; + case hwmon_fan_target: + if (!val) /* no unlimited target speed */ + return -EINVAL; + reg = AMC6821_REG_TACH_SETTINGL; break; default: - count = -EINVAL; - goto unlock; + return -EOPNOTSUPP; } - if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) { - dev_err(&client->dev, - "Configuration register write error, aborting.\n"); - count = -EIO; - } -unlock: - mutex_unlock(&data->update_lock); - return count; -} -static ssize_t pwm1_auto_channels_temp_show(struct device *dev, - struct device_attribute *devattr, - char *buf) -{ - struct amc6821_data *data = amc6821_update_device(dev); - return sprintf(buf, "%d\n", data->pwm1_auto_channels_temp); + val = val ? 6000000 / clamp_val(val, 1, 6000000) : 0; + val = clamp_val(val, 0, 0xffff); + + regs[0] = val & 0xff; + regs[1] = val >> 8; + + return regmap_bulk_write(data->regmap, reg, regs, 2); } static ssize_t temp_auto_point_temp_show(struct device *dev, struct device_attribute *devattr, char *buf) { + struct amc6821_data *data = dev_get_drvdata(dev); int ix = to_sensor_dev_attr_2(devattr)->index; int nr = to_sensor_dev_attr_2(devattr)->nr; - struct amc6821_data *data = amc6821_update_device(dev); - switch (nr) { - case 1: - return sprintf(buf, "%d\n", - data->temp1_auto_point_temp[ix] * 1000); - case 2: - return sprintf(buf, "%d\n", - data->temp2_auto_point_temp[ix] * 1000); - default: - dev_dbg(dev, "Unknown attr->nr (%d).\n", nr); - return -EINVAL; - } + u8 temps[3]; + int err; + + mutex_lock(&data->update_lock); + err = amc6821_get_auto_point_temps(data->regmap, nr, temps); + mutex_unlock(&data->update_lock); + if (err) + return err; + + return sysfs_emit(buf, "%d\n", temps[ix] * 1000); } static ssize_t pwm1_auto_point_pwm_show(struct device *dev, struct device_attribute *devattr, char *buf) { + struct amc6821_data *data = dev_get_drvdata(dev); int ix = to_sensor_dev_attr(devattr)->index; - struct amc6821_data *data = amc6821_update_device(dev); - return sprintf(buf, "%d\n", data->pwm1_auto_point_pwm[ix]); + u32 val; + int err; + + switch (ix) { + case 0: + val = 0; + break; + case 1: + err = regmap_read(data->regmap, AMC6821_REG_DCY_LOW_TEMP, &val); + if (err) + return err; + break; + default: + val = 255; + break; + } + return sysfs_emit(buf, "%d\n", val); } -static inline ssize_t set_slope_register(struct i2c_client *client, - u8 reg, - u8 dpwm, - u8 *ptemp) +/* + * Set TEMP[0-4] (low temperature) and SLP[0-2] (slope) of local or remote + * TEMP-FAN control register. + * + * Return 0 on success or negative error code. + * + * Channel 0: local, channel 1: remote + */ +static inline int set_slope_register(struct regmap *regmap, int channel, u8 *temps) { - int dt; - u8 tmp; + u8 regval = FIELD_PREP(AMC6821_TEMP_LIMIT_MASK, temps[1] / 4); + u8 tmp, dpwm; + int err, dt; + u32 pwm; + + err = regmap_read(regmap, AMC6821_REG_DCY_LOW_TEMP, &pwm); + if (err) + return err; - dt = ptemp[2]-ptemp[1]; + dpwm = 255 - pwm; + + dt = temps[2] - temps[1]; for (tmp = 4; tmp > 0; tmp--) { - if (dt * (0x20 >> tmp) >= dpwm) + if (dt * (32 >> tmp) >= dpwm) break; } - tmp |= (ptemp[1] & 0x7C) << 1; - if (i2c_smbus_write_byte_data(client, - reg, tmp)) { - dev_err(&client->dev, "Register write error, aborting.\n"); - return -EIO; - } - return 0; + regval |= FIELD_PREP(AMC6821_TEMP_SLOPE_MASK, tmp); + + return regmap_write(regmap, + channel ? AMC6821_REG_RTEMP_FAN_CTRL : AMC6821_REG_LTEMP_FAN_CTRL, + regval); } static ssize_t temp_auto_point_temp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct amc6821_data *data = amc6821_update_device(dev); - struct i2c_client *client = data->client; + struct amc6821_data *data = dev_get_drvdata(dev); int ix = to_sensor_dev_attr_2(attr)->index; int nr = to_sensor_dev_attr_2(attr)->nr; - u8 *ptemp; - u8 reg; - int dpwm; + struct regmap *regmap = data->regmap; + u8 temps[3], otemps[3]; long val; - int ret = kstrtol(buf, 10, &val); + int ret; + + ret = kstrtol(buf, 10, &val); if (ret) return ret; - switch (nr) { - case 1: - ptemp = data->temp1_auto_point_temp; - reg = AMC6821_REG_LTEMP_FAN_CTRL; - break; - case 2: - ptemp = data->temp2_auto_point_temp; - reg = AMC6821_REG_RTEMP_FAN_CTRL; - break; - default: - dev_dbg(dev, "Unknown attr->nr (%d).\n", nr); - return -EINVAL; - } - mutex_lock(&data->update_lock); - data->valid = false; + + ret = amc6821_get_auto_point_temps(data->regmap, nr, temps); + if (ret) + goto unlock; switch (ix) { case 0: - ptemp[0] = clamp_val(val / 1000, 0, - data->temp1_auto_point_temp[1]); - ptemp[0] = clamp_val(ptemp[0], 0, - data->temp2_auto_point_temp[1]); - ptemp[0] = clamp_val(ptemp[0], 0, 63); - if (i2c_smbus_write_byte_data( - client, - AMC6821_REG_PSV_TEMP, - ptemp[0])) { - dev_err(&client->dev, - "Register write error, aborting.\n"); - count = -EIO; - } - goto EXIT; + /* + * Passive cooling temperature. Range limit against low limit + * of both channels. + */ + ret = amc6821_get_auto_point_temps(data->regmap, 1 - nr, otemps); + if (ret) + goto unlock; + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 63000), 1000); + val = clamp_val(val, 0, min(temps[1], otemps[1])); + ret = regmap_write(regmap, AMC6821_REG_PSV_TEMP, val); + break; case 1: - ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124); - ptemp[1] &= 0x7C; - ptemp[2] = clamp_val(ptemp[2], ptemp[1] + 1, 255); + /* + * Low limit; must be between passive and high limit, + * and not exceed 124. Step size is 4 degrees C. + */ + val = clamp_val(val, DIV_ROUND_UP(temps[0], 4) * 4000, 124000); + temps[1] = DIV_ROUND_CLOSEST(val, 4000) * 4; + val = temps[1] / 4; + /* Auto-adjust high limit if necessary */ + temps[2] = clamp_val(temps[2], temps[1] + 1, 255); + ret = set_slope_register(regmap, nr, temps); break; case 2: - ptemp[2] = clamp_val(val / 1000, ptemp[1]+1, 255); + /* high limit, must be higher than low limit */ + val = clamp_val(val, (temps[1] + 1) * 1000, 255000); + temps[2] = DIV_ROUND_CLOSEST(val, 1000); + ret = set_slope_register(regmap, nr, temps); break; default: - dev_dbg(dev, "Unknown attr->index (%d).\n", ix); - count = -EINVAL; - goto EXIT; + ret = -EINVAL; + break; } - dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1]; - if (set_slope_register(client, reg, dpwm, ptemp)) - count = -EIO; - -EXIT: +unlock: mutex_unlock(&data->update_lock); - return count; + return ret ? : count; } static ssize_t pwm1_auto_point_pwm_store(struct device *dev, @@ -556,204 +637,52 @@ static ssize_t pwm1_auto_point_pwm_store(struct device *dev, const char *buf, size_t count) { struct amc6821_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - int dpwm; - long val; - int ret = kstrtol(buf, 10, &val); - if (ret) - return ret; - - mutex_lock(&data->update_lock); - data->pwm1_auto_point_pwm[1] = clamp_val(val, 0, 254); - if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP, - data->pwm1_auto_point_pwm[1])) { - dev_err(&client->dev, "Register write error, aborting.\n"); - count = -EIO; - goto EXIT; - } - dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1]; - if (set_slope_register(client, AMC6821_REG_LTEMP_FAN_CTRL, dpwm, - data->temp1_auto_point_temp)) { - count = -EIO; - goto EXIT; - } - if (set_slope_register(client, AMC6821_REG_RTEMP_FAN_CTRL, dpwm, - data->temp2_auto_point_temp)) { - count = -EIO; - goto EXIT; - } - -EXIT: - data->valid = false; - mutex_unlock(&data->update_lock); - return count; -} - -static ssize_t fan_show(struct device *dev, struct device_attribute *devattr, - char *buf) -{ - struct amc6821_data *data = amc6821_update_device(dev); - int ix = to_sensor_dev_attr(devattr)->index; - if (0 == data->fan[ix]) - return sprintf(buf, "0"); - return sprintf(buf, "%d\n", (int)(6000000 / data->fan[ix])); -} + struct regmap *regmap = data->regmap; + int i, ret; + u8 val; -static ssize_t fan1_fault_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct amc6821_data *data = amc6821_update_device(dev); - if (data->stat1 & AMC6821_STAT1_FANS) - return sprintf(buf, "1"); - else - return sprintf(buf, "0"); -} - -static ssize_t fan_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct amc6821_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long val; - int ix = to_sensor_dev_attr(attr)->index; - int ret = kstrtol(buf, 10, &val); + ret = kstrtou8(buf, 10, &val); if (ret) return ret; - val = 1 > val ? 0xFFFF : 6000000/val; mutex_lock(&data->update_lock); - data->fan[ix] = (u16) clamp_val(val, 1, 0xFFFF); - if (i2c_smbus_write_byte_data(client, fan_reg_low[ix], - data->fan[ix] & 0xFF)) { - dev_err(&client->dev, "Register write error, aborting.\n"); - count = -EIO; - goto EXIT; - } - if (i2c_smbus_write_byte_data(client, - fan_reg_hi[ix], data->fan[ix] >> 8)) { - dev_err(&client->dev, "Register write error, aborting.\n"); - count = -EIO; - } -EXIT: - mutex_unlock(&data->update_lock); - return count; -} - -static ssize_t fan1_div_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct amc6821_data *data = amc6821_update_device(dev); - return sprintf(buf, "%d\n", data->fan1_div); -} + ret = regmap_write(regmap, AMC6821_REG_DCY_LOW_TEMP, val); + if (ret) + goto unlock; -static ssize_t fan1_div_store(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - struct amc6821_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long val; - int config = kstrtol(buf, 10, &val); - if (config) - return config; + for (i = 0; i < 2; i++) { + u8 temps[3]; - mutex_lock(&data->update_lock); - config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); - if (config < 0) { - dev_err(&client->dev, - "Error reading configuration register, aborting.\n"); - count = config; - goto EXIT; - } - switch (val) { - case 2: - config &= ~AMC6821_CONF4_PSPR; - data->fan1_div = 2; - break; - case 4: - config |= AMC6821_CONF4_PSPR; - data->fan1_div = 4; - break; - default: - count = -EINVAL; - goto EXIT; - } - if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, config)) { - dev_err(&client->dev, - "Configuration register write error, aborting.\n"); - count = -EIO; + ret = amc6821_get_auto_point_temps(regmap, i, temps); + if (ret) + break; + ret = set_slope_register(regmap, i, temps); + if (ret) + break; } -EXIT: +unlock: mutex_unlock(&data->update_lock); - return count; + return ret ? : count; } -static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, IDX_TEMP1_INPUT); -static SENSOR_DEVICE_ATTR_RW(temp1_min, temp, IDX_TEMP1_MIN); -static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, IDX_TEMP1_MAX); -static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp, IDX_TEMP1_CRIT); -static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, temp_alarm, IDX_TEMP1_MIN); -static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, temp_alarm, IDX_TEMP1_MAX); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, temp_alarm, IDX_TEMP1_CRIT); -static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, IDX_TEMP2_INPUT); -static SENSOR_DEVICE_ATTR_RW(temp2_min, temp, IDX_TEMP2_MIN); -static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, IDX_TEMP2_MAX); -static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp, IDX_TEMP2_CRIT); -static SENSOR_DEVICE_ATTR_RO(temp2_fault, temp2_fault, 0); -static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, temp_alarm, IDX_TEMP2_MIN); -static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, temp_alarm, IDX_TEMP2_MAX); -static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, temp_alarm, IDX_TEMP2_CRIT); -static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, IDX_FAN1_INPUT); -static SENSOR_DEVICE_ATTR_RW(fan1_min, fan, IDX_FAN1_MIN); -static SENSOR_DEVICE_ATTR_RW(fan1_max, fan, IDX_FAN1_MAX); -static SENSOR_DEVICE_ATTR_RO(fan1_fault, fan1_fault, 0); -static SENSOR_DEVICE_ATTR_RW(fan1_div, fan1_div, 0); - -static SENSOR_DEVICE_ATTR_RW(pwm1, pwm1, 0); -static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm1_enable, 0); static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm1_auto_point_pwm, 0); static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point2_pwm, pwm1_auto_point_pwm, 1); static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point3_pwm, pwm1_auto_point_pwm, 2); -static SENSOR_DEVICE_ATTR_RO(pwm1_auto_channels_temp, pwm1_auto_channels_temp, - 0); static SENSOR_DEVICE_ATTR_2_RO(temp1_auto_point1_temp, temp_auto_point_temp, - 1, 0); + 0, 0); static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_point2_temp, temp_auto_point_temp, - 1, 1); + 0, 1); static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_point3_temp, temp_auto_point_temp, - 1, 2); + 0, 2); static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_point1_temp, temp_auto_point_temp, - 2, 0); + 1, 0); static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_point2_temp, temp_auto_point_temp, - 2, 1); + 1, 1); static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_point3_temp, temp_auto_point_temp, - 2, 2); + 1, 2); static struct attribute *amc6821_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp1_min.dev_attr.attr, - &sensor_dev_attr_temp1_max.dev_attr.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, - &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp2_min.dev_attr.attr, - &sensor_dev_attr_temp2_max.dev_attr.attr, - &sensor_dev_attr_temp2_crit.dev_attr.attr, - &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_fault.dev_attr.attr, - &sensor_dev_attr_fan1_input.dev_attr.attr, - &sensor_dev_attr_fan1_min.dev_attr.attr, - &sensor_dev_attr_fan1_max.dev_attr.attr, - &sensor_dev_attr_fan1_fault.dev_attr.attr, - &sensor_dev_attr_fan1_div.dev_attr.attr, - &sensor_dev_attr_pwm1.dev_attr.attr, - &sensor_dev_attr_pwm1_enable.dev_attr.attr, - &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr, @@ -765,13 +694,118 @@ static struct attribute *amc6821_attrs[] = { &sensor_dev_attr_temp2_auto_point3_temp.dev_attr.attr, NULL }; - ATTRIBUTE_GROUPS(amc6821); +static int amc6821_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_temp: + return amc6821_temp_read(dev, attr, channel, val); + case hwmon_fan: + return amc6821_fan_read(dev, attr, val); + case hwmon_pwm: + return amc6821_pwm_read(dev, attr, val); + default: + return -EOPNOTSUPP; + } +} + +static int amc6821_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + switch (type) { + case hwmon_temp: + return amc6821_temp_write(dev, attr, channel, val); + case hwmon_fan: + return amc6821_fan_write(dev, attr, val); + case hwmon_pwm: + return amc6821_pwm_write(dev, attr, val); + default: + return -EOPNOTSUPP; + } +} + +static umode_t amc6821_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_fault: + return 0444; + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_crit: + return 0644; + default: + return 0; + } + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_fault: + return 0444; + case hwmon_fan_pulses: + case hwmon_fan_min: + case hwmon_fan_max: + case hwmon_fan_target: + return 0644; + default: + return 0; + } + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_mode: + case hwmon_pwm_enable: + case hwmon_pwm_input: + return 0644; + case hwmon_pwm_auto_channels_temp: + return 0444; + default: + return 0; + } + default: + return 0; + } +} + +static const struct hwmon_channel_info * const amc6821_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_MIN_ALARM | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_MIN_ALARM | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | + HWMON_T_FAULT), + HWMON_CHANNEL_INFO(fan, + HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_MAX | + HWMON_F_TARGET | HWMON_F_PULSES | HWMON_F_FAULT), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_INPUT | HWMON_PWM_ENABLE | HWMON_PWM_MODE | + HWMON_PWM_AUTO_CHANNELS_TEMP), + NULL +}; + +static const struct hwmon_ops amc6821_hwmon_ops = { + .is_visible = amc6821_is_visible, + .read = amc6821_read, + .write = amc6821_write, +}; + +static const struct hwmon_chip_info amc6821_chip_info = { + .ops = &amc6821_hwmon_ops, + .info = amc6821_info, +}; + /* Return 0 if detection is successful, -ENODEV otherwise */ -static int amc6821_detect( - struct i2c_client *client, - struct i2c_board_info *info) +static int amc6821_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int address = client->addr; @@ -814,121 +848,90 @@ static int amc6821_detect( return 0; } -static int amc6821_init_client(struct i2c_client *client) +static int amc6821_init_client(struct amc6821_data *data) { - int config; - int err = -EIO; + struct regmap *regmap = data->regmap; + int err; if (init) { - config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); - - if (config < 0) { - dev_err(&client->dev, - "Error reading configuration register, aborting.\n"); - return err; - } - - config |= AMC6821_CONF4_MODE; - - if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, - config)) { - dev_err(&client->dev, - "Configuration register write error, aborting.\n"); - return err; - } - - config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF3); - - if (config < 0) { - dev_err(&client->dev, - "Error reading configuration register, aborting.\n"); - return err; - } - - dev_info(&client->dev, "Revision %d\n", config & 0x0f); - - config &= ~AMC6821_CONF3_THERM_FAN_EN; - - if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF3, - config)) { - dev_err(&client->dev, - "Configuration register write error, aborting.\n"); - return err; - } - - config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF2); - - if (config < 0) { - dev_err(&client->dev, - "Error reading configuration register, aborting.\n"); + err = regmap_set_bits(regmap, AMC6821_REG_CONF4, AMC6821_CONF4_MODE); + if (err) return err; - } - - config &= ~AMC6821_CONF2_RTFIE; - config &= ~AMC6821_CONF2_LTOIE; - config &= ~AMC6821_CONF2_RTOIE; - if (i2c_smbus_write_byte_data(client, - AMC6821_REG_CONF2, config)) { - dev_err(&client->dev, - "Configuration register write error, aborting.\n"); + err = regmap_clear_bits(regmap, AMC6821_REG_CONF3, AMC6821_CONF3_THERM_FAN_EN); + if (err) return err; - } - - config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); - - if (config < 0) { - dev_err(&client->dev, - "Error reading configuration register, aborting.\n"); + err = regmap_clear_bits(regmap, AMC6821_REG_CONF2, + AMC6821_CONF2_RTFIE | + AMC6821_CONF2_LTOIE | + AMC6821_CONF2_RTOIE); + if (err) return err; - } - config &= ~AMC6821_CONF1_THERMOVIE; - config &= ~AMC6821_CONF1_FANIE; - config |= AMC6821_CONF1_START; - if (pwminv) - config |= AMC6821_CONF1_PWMINV; - else - config &= ~AMC6821_CONF1_PWMINV; - - if (i2c_smbus_write_byte_data( - client, AMC6821_REG_CONF1, config)) { - dev_err(&client->dev, - "Configuration register write error, aborting.\n"); + err = regmap_update_bits(regmap, AMC6821_REG_CONF1, + AMC6821_CONF1_THERMOVIE | AMC6821_CONF1_FANIE | + AMC6821_CONF1_START | AMC6821_CONF1_PWMINV, + AMC6821_CONF1_START | + (pwminv ? AMC6821_CONF1_PWMINV : 0)); + if (err) return err; - } } return 0; } +static bool amc6821_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case AMC6821_REG_STAT1: + case AMC6821_REG_STAT2: + case AMC6821_REG_TEMP_LO: + case AMC6821_REG_TDATA_LOW: + case AMC6821_REG_LTEMP_HI: + case AMC6821_REG_RTEMP_HI: + case AMC6821_REG_TDATA_HI: + return true; + default: + return false; + } +} + +static const struct regmap_config amc6821_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = AMC6821_REG_CONF3, + .volatile_reg = amc6821_volatile_reg, + .cache_type = REGCACHE_MAPLE, +}; + static int amc6821_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct amc6821_data *data; struct device *hwmon_dev; + struct regmap *regmap; int err; data = devm_kzalloc(dev, sizeof(struct amc6821_data), GFP_KERNEL); if (!data) return -ENOMEM; - data->client = client; - mutex_init(&data->update_lock); + regmap = devm_regmap_init_i2c(client, &amc6821_regmap_config); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "Failed to initialize regmap\n"); + data->regmap = regmap; - /* - * Initialize the amc6821 chip - */ - err = amc6821_init_client(client); + err = amc6821_init_client(data); if (err) return err; - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, - amc6821_groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, &amc6821_chip_info, + amc6821_groups); return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct i2c_device_id amc6821_id[] = { - { "amc6821", amc6821 }, + { "amc6821", 0 }, { } }; @@ -937,7 +940,6 @@ MODULE_DEVICE_TABLE(i2c, amc6821_id); static const struct of_device_id __maybe_unused amc6821_of_match[] = { { .compatible = "ti,amc6821", - .data = (void *)amc6821, }, { } }; diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c index 36f9e38000d5..6bb8d7b1d219 100644 --- a/drivers/hwmon/asus-ec-sensors.c +++ b/drivers/hwmon/asus-ec-sensors.c @@ -322,6 +322,14 @@ static const struct ec_board_info board_info_pro_art_x570_creator_wifi = { .family = family_amd_500_series, }; +static const struct ec_board_info board_info_pro_art_x670E_creator_wifi = { + .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE | + SENSOR_TEMP_MB | SENSOR_TEMP_VRM | + SENSOR_TEMP_T_SENSOR, + .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH, + .family = family_amd_600_series, +}; + static const struct ec_board_info board_info_pro_art_b550_creator = { .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR | @@ -486,6 +494,8 @@ static const struct dmi_system_id dmi_table[] = { &board_info_prime_x570_pro), DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI", &board_info_pro_art_x570_creator_wifi), + DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X670E-CREATOR WIFI", + &board_info_pro_art_x670E_creator_wifi), DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt B550-CREATOR", &board_info_pro_art_b550_creator), DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE", diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index d778a2aaefec..3751c1e3eddd 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -1389,4 +1389,5 @@ static void __exit atk0110_exit(void) module_init(atk0110_init); module_exit(atk0110_exit); +MODULE_DESCRIPTION("ASUS ATK0110 driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c index 3e63666a61bd..e1a7f7aa7f80 100644 --- a/drivers/hwmon/corsair-cpro.c +++ b/drivers/hwmon/corsair-cpro.c @@ -10,11 +10,13 @@ #include <linux/bitops.h> #include <linux/completion.h> +#include <linux/debugfs.h> #include <linux/hid.h> #include <linux/hwmon.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> @@ -28,6 +30,8 @@ #define LABEL_LENGTH 11 #define REQ_TIMEOUT 300 +#define CTL_GET_FW_VER 0x02 /* returns the firmware version in bytes 1-3 */ +#define CTL_GET_BL_VER 0x06 /* returns the bootloader version in bytes 1-2 */ #define CTL_GET_TMP_CNCT 0x10 /* * returns in bytes 1-4 for each temp sensor: * 0 not connected @@ -78,6 +82,7 @@ struct ccp_device { struct hid_device *hdev; struct device *hwmon_dev; + struct dentry *debugfs; /* For reinitializing the completion below */ spinlock_t wait_input_report_lock; struct completion wait_input_report; @@ -88,6 +93,8 @@ struct ccp_device { DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS); DECLARE_BITMAP(fan_cnct, NUM_FANS); char fan_label[6][LABEL_LENGTH]; + u8 firmware_ver[3]; + u8 bootloader_ver[2]; }; /* converts response error in buffer to errno */ @@ -496,6 +503,83 @@ static int get_temp_cnct(struct ccp_device *ccp) return 0; } +/* read firmware version */ +static int get_fw_version(struct ccp_device *ccp) +{ + int ret; + + ret = send_usb_cmd(ccp, CTL_GET_FW_VER, 0, 0, 0); + if (ret) { + hid_notice(ccp->hdev, "Failed to read firmware version.\n"); + return ret; + } + ccp->firmware_ver[0] = ccp->buffer[1]; + ccp->firmware_ver[1] = ccp->buffer[2]; + ccp->firmware_ver[2] = ccp->buffer[3]; + + return 0; +} + +/* read bootloader version */ +static int get_bl_version(struct ccp_device *ccp) +{ + int ret; + + ret = send_usb_cmd(ccp, CTL_GET_BL_VER, 0, 0, 0); + if (ret) { + hid_notice(ccp->hdev, "Failed to read bootloader version.\n"); + return ret; + } + ccp->bootloader_ver[0] = ccp->buffer[1]; + ccp->bootloader_ver[1] = ccp->buffer[2]; + + return 0; +} + +static int firmware_show(struct seq_file *seqf, void *unused) +{ + struct ccp_device *ccp = seqf->private; + + seq_printf(seqf, "%d.%d.%d\n", + ccp->firmware_ver[0], + ccp->firmware_ver[1], + ccp->firmware_ver[2]); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(firmware); + +static int bootloader_show(struct seq_file *seqf, void *unused) +{ + struct ccp_device *ccp = seqf->private; + + seq_printf(seqf, "%d.%d\n", + ccp->bootloader_ver[0], + ccp->bootloader_ver[1]); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(bootloader); + +static void ccp_debugfs_init(struct ccp_device *ccp) +{ + char name[32]; + int ret; + + scnprintf(name, sizeof(name), "corsaircpro-%s", dev_name(&ccp->hdev->dev)); + ccp->debugfs = debugfs_create_dir(name, NULL); + + ret = get_fw_version(ccp); + if (!ret) + debugfs_create_file("firmware_version", 0444, + ccp->debugfs, ccp, &firmware_fops); + + ret = get_bl_version(ccp); + if (!ret) + debugfs_create_file("bootloader_version", 0444, + ccp->debugfs, ccp, &bootloader_fops); +} + static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct ccp_device *ccp; @@ -542,6 +626,9 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id) ret = get_fan_cnct(ccp); if (ret) goto out_hw_close; + + ccp_debugfs_init(ccp); + ccp->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsaircpro", ccp, &ccp_chip_info, NULL); if (IS_ERR(ccp->hwmon_dev)) { @@ -562,6 +649,7 @@ static void ccp_remove(struct hid_device *hdev) { struct ccp_device *ccp = hid_get_drvdata(hdev); + debugfs_remove_recursive(ccp->debugfs); hwmon_device_unregister(ccp->hwmon_dev); hid_hw_close(hdev); hid_hw_stop(hdev); @@ -582,6 +670,7 @@ static struct hid_driver ccp_driver = { }; MODULE_DEVICE_TABLE(hid, ccp_devices); +MODULE_DESCRIPTION("Corsair Commander Pro controller driver"); MODULE_LICENSE("GPL"); static int __init ccp_init(void) diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c index 2c7c92272fe3..f8f22b8a67cd 100644 --- a/drivers/hwmon/corsair-psu.c +++ b/drivers/hwmon/corsair-psu.c @@ -875,15 +875,16 @@ static const struct hid_device_id corsairpsu_idtable[] = { { HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */ { HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */ { HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */ - { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Series 2022 */ - { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */ + { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Legacy */ + { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i Legacy */ { HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */ { HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */ { HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */ { HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */ { HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */ { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i Series 2023 */ - { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Series 2022 and 2023 */ + { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Legacy and Series 2023 */ + { HID_USB_DEVICE(0x1b1c, 0x1c23) }, /* Corsair HX1200i Series 2023 */ { }, }; MODULE_DEVICE_TABLE(hid, corsairpsu_idtable); diff --git a/drivers/hwmon/cros_ec_hwmon.c b/drivers/hwmon/cros_ec_hwmon.c new file mode 100644 index 000000000000..5514cf780b8b --- /dev/null +++ b/drivers/hwmon/cros_ec_hwmon.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ChromeOS EC driver for hwmon + * + * Copyright (C) 2024 Thomas Weißschuh <linux@weissschuh.net> + */ + +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/platform_data/cros_ec_commands.h> +#include <linux/platform_data/cros_ec_proto.h> +#include <linux/types.h> +#include <linux/units.h> + +#define DRV_NAME "cros-ec-hwmon" + +struct cros_ec_hwmon_priv { + struct cros_ec_device *cros_ec; + const char *temp_sensor_names[EC_TEMP_SENSOR_ENTRIES + EC_TEMP_SENSOR_B_ENTRIES]; + u8 usable_fans; +}; + +static int cros_ec_hwmon_read_fan_speed(struct cros_ec_device *cros_ec, u8 index, u16 *speed) +{ + int ret; + __le16 __speed; + + ret = cros_ec_cmd_readmem(cros_ec, EC_MEMMAP_FAN + index * 2, 2, &__speed); + if (ret < 0) + return ret; + + *speed = le16_to_cpu(__speed); + return 0; +} + +static int cros_ec_hwmon_read_temp(struct cros_ec_device *cros_ec, u8 index, u8 *temp) +{ + unsigned int offset; + int ret; + + if (index < EC_TEMP_SENSOR_ENTRIES) + offset = EC_MEMMAP_TEMP_SENSOR + index; + else + offset = EC_MEMMAP_TEMP_SENSOR_B + index - EC_TEMP_SENSOR_ENTRIES; + + ret = cros_ec_cmd_readmem(cros_ec, offset, 1, temp); + if (ret < 0) + return ret; + return 0; +} + +static bool cros_ec_hwmon_is_error_fan(u16 speed) +{ + return speed == EC_FAN_SPEED_NOT_PRESENT || speed == EC_FAN_SPEED_STALLED; +} + +static bool cros_ec_hwmon_is_error_temp(u8 temp) +{ + return temp == EC_TEMP_SENSOR_NOT_PRESENT || + temp == EC_TEMP_SENSOR_ERROR || + temp == EC_TEMP_SENSOR_NOT_POWERED || + temp == EC_TEMP_SENSOR_NOT_CALIBRATED; +} + +static long cros_ec_hwmon_temp_to_millicelsius(u8 temp) +{ + return kelvin_to_millicelsius((((long)temp) + EC_TEMP_SENSOR_OFFSET)); +} + +static int cros_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct cros_ec_hwmon_priv *priv = dev_get_drvdata(dev); + int ret = -EOPNOTSUPP; + u16 speed; + u8 temp; + + if (type == hwmon_fan) { + if (attr == hwmon_fan_input) { + ret = cros_ec_hwmon_read_fan_speed(priv->cros_ec, channel, &speed); + if (ret == 0) { + if (cros_ec_hwmon_is_error_fan(speed)) + ret = -ENODATA; + else + *val = speed; + } + } else if (attr == hwmon_fan_fault) { + ret = cros_ec_hwmon_read_fan_speed(priv->cros_ec, channel, &speed); + if (ret == 0) + *val = cros_ec_hwmon_is_error_fan(speed); + } + } else if (type == hwmon_temp) { + if (attr == hwmon_temp_input) { + ret = cros_ec_hwmon_read_temp(priv->cros_ec, channel, &temp); + if (ret == 0) { + if (cros_ec_hwmon_is_error_temp(temp)) + ret = -ENODATA; + else + *val = cros_ec_hwmon_temp_to_millicelsius(temp); + } + } else if (attr == hwmon_temp_fault) { + ret = cros_ec_hwmon_read_temp(priv->cros_ec, channel, &temp); + if (ret == 0) + *val = cros_ec_hwmon_is_error_temp(temp); + } + } + + return ret; +} + +static int cros_ec_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct cros_ec_hwmon_priv *priv = dev_get_drvdata(dev); + + if (type == hwmon_temp && attr == hwmon_temp_label) { + *str = priv->temp_sensor_names[channel]; + return 0; + } + + return -EOPNOTSUPP; +} + +static umode_t cros_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct cros_ec_hwmon_priv *priv = data; + + if (type == hwmon_fan) { + if (priv->usable_fans & BIT(channel)) + return 0444; + } else if (type == hwmon_temp) { + if (priv->temp_sensor_names[channel]) + return 0444; + } + + return 0; +} + +static const struct hwmon_channel_info * const cros_ec_hwmon_info[] = { + HWMON_CHANNEL_INFO(fan, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT, + HWMON_F_INPUT | HWMON_F_FAULT), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL), + NULL +}; + +static const struct hwmon_ops cros_ec_hwmon_ops = { + .read = cros_ec_hwmon_read, + .read_string = cros_ec_hwmon_read_string, + .is_visible = cros_ec_hwmon_is_visible, +}; + +static const struct hwmon_chip_info cros_ec_hwmon_chip_info = { + .ops = &cros_ec_hwmon_ops, + .info = cros_ec_hwmon_info, +}; + +static void cros_ec_hwmon_probe_temp_sensors(struct device *dev, struct cros_ec_hwmon_priv *priv, + u8 thermal_version) +{ + struct ec_params_temp_sensor_get_info req = {}; + struct ec_response_temp_sensor_get_info resp; + size_t candidates, i, sensor_name_size; + int ret; + u8 temp; + + if (thermal_version < 2) + candidates = EC_TEMP_SENSOR_ENTRIES; + else + candidates = ARRAY_SIZE(priv->temp_sensor_names); + + for (i = 0; i < candidates; i++) { + if (cros_ec_hwmon_read_temp(priv->cros_ec, i, &temp) < 0) + continue; + + if (temp == EC_TEMP_SENSOR_NOT_PRESENT) + continue; + + req.id = i; + ret = cros_ec_cmd(priv->cros_ec, 0, EC_CMD_TEMP_SENSOR_GET_INFO, + &req, sizeof(req), &resp, sizeof(resp)); + if (ret < 0) + continue; + + sensor_name_size = strnlen(resp.sensor_name, sizeof(resp.sensor_name)); + priv->temp_sensor_names[i] = devm_kasprintf(dev, GFP_KERNEL, "%.*s", + (int)sensor_name_size, + resp.sensor_name); + } +} + +static void cros_ec_hwmon_probe_fans(struct cros_ec_hwmon_priv *priv) +{ + u16 speed; + size_t i; + int ret; + + for (i = 0; i < EC_FAN_SPEED_ENTRIES; i++) { + ret = cros_ec_hwmon_read_fan_speed(priv->cros_ec, i, &speed); + if (ret == 0 && speed != EC_FAN_SPEED_NOT_PRESENT) + priv->usable_fans |= BIT(i); + } +} + +static int cros_ec_hwmon_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent); + struct cros_ec_device *cros_ec = ec_dev->ec_dev; + struct cros_ec_hwmon_priv *priv; + struct device *hwmon_dev; + u8 thermal_version; + int ret; + + ret = cros_ec_cmd_readmem(cros_ec, EC_MEMMAP_THERMAL_VERSION, 1, &thermal_version); + if (ret < 0) + return ret; + + /* Covers both fan and temp sensors */ + if (thermal_version == 0) + return -ENODEV; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->cros_ec = cros_ec; + + cros_ec_hwmon_probe_temp_sensors(dev, priv, thermal_version); + cros_ec_hwmon_probe_fans(priv); + + hwmon_dev = devm_hwmon_device_register_with_info(dev, "cros_ec", priv, + &cros_ec_hwmon_chip_info, NULL); + + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct platform_device_id cros_ec_hwmon_id[] = { + { DRV_NAME, 0 }, + {} +}; + +static struct platform_driver cros_ec_hwmon_driver = { + .driver.name = DRV_NAME, + .probe = cros_ec_hwmon_probe, + .id_table = cros_ec_hwmon_id, +}; +module_platform_driver(cros_ec_hwmon_driver); + +MODULE_DEVICE_TABLE(platform, cros_ec_hwmon_id); +MODULE_DESCRIPTION("ChromeOS EC Hardware Monitoring Driver"); +MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 942526bd4775..0362a13f6525 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -1264,6 +1264,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = { }, }, { + .ident = "Dell OptiPlex 7060", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7060"), + }, + }, + { .ident = "Dell Precision", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index 3dcef221041d..1a9b28dc91e6 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -2461,8 +2461,6 @@ static int dme1737_i2c_detect(struct i2c_client *client, return 0; } -static const struct i2c_device_id dme1737_id[]; - static int dme1737_i2c_probe(struct i2c_client *client) { struct dme1737_data *data; @@ -2474,7 +2472,7 @@ static int dme1737_i2c_probe(struct i2c_client *client) return -ENOMEM; i2c_set_clientdata(client, data); - data->type = i2c_match_id(dme1737_id, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); data->client = client; data->name = client->name; mutex_init(&data->update_lock); diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index bffbc8040171..42ec34cb8a5f 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c @@ -342,8 +342,6 @@ static const struct attribute_group ds1621_group = { }; __ATTRIBUTE_GROUPS(ds1621); -static const struct i2c_device_id ds1621_id[]; - static int ds1621_probe(struct i2c_client *client) { struct ds1621_data *data; @@ -356,7 +354,7 @@ static int ds1621_probe(struct i2c_client *client) mutex_init(&data->update_lock); - data->kind = i2c_match_id(ds1621_id, client)->driver_data; + data->kind = (uintptr_t)i2c_get_match_data(client); data->client = client; /* Initialize the DS1621 chip */ diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index 8c572bb64f5d..7e867f132420 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -111,31 +111,6 @@ struct f75375_data { s8 temp_max_hyst[2]; }; -static int f75375_detect(struct i2c_client *client, - struct i2c_board_info *info); -static int f75375_probe(struct i2c_client *client); -static void f75375_remove(struct i2c_client *client); - -static const struct i2c_device_id f75375_id[] = { - { "f75373", f75373 }, - { "f75375", f75375 }, - { "f75387", f75387 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, f75375_id); - -static struct i2c_driver f75375_driver = { - .class = I2C_CLASS_HWMON, - .driver = { - .name = "f75375", - }, - .probe = f75375_probe, - .remove = f75375_remove, - .id_table = f75375_id, - .detect = f75375_detect, - .address_list = normal_i2c, -}; - static inline int f75375_read8(struct i2c_client *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); @@ -830,7 +805,7 @@ static int f75375_probe(struct i2c_client *client) i2c_set_clientdata(client, data); mutex_init(&data->update_lock); - data->kind = i2c_match_id(f75375_id, client)->driver_data; + data->kind = (uintptr_t)i2c_get_match_data(client); err = sysfs_create_group(&client->dev.kobj, &f75375_group); if (err) @@ -901,6 +876,25 @@ static int f75375_detect(struct i2c_client *client, return 0; } +static const struct i2c_device_id f75375_id[] = { + { "f75373", f75373 }, + { "f75375", f75375 }, + { "f75387", f75387 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, f75375_id); + +static struct i2c_driver f75375_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "f75375", + }, + .probe = f75375_probe, + .remove = f75375_remove, + .id_table = f75375_id, + .detect = f75375_detect, + .address_list = normal_i2c, +}; module_i2c_driver(f75375_driver); MODULE_AUTHOR("Riku Voipio"); diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index b30512a705a7..1811f84d835e 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -1087,7 +1087,7 @@ static int fschmd_probe(struct i2c_client *client) "Heracles", "Heimdall", "Hades", "Syleus" }; static const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 }; int i, err; - enum chips kind = i2c_match_id(fschmd_id, client)->driver_data; + enum chips kind = (uintptr_t)i2c_get_match_data(client); data = kzalloc(sizeof(struct fschmd_data), GFP_KERNEL); if (!data) diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c index af1228708e25..4fa3aa1271da 100644 --- a/drivers/hwmon/g762.c +++ b/drivers/hwmon/g762.c @@ -44,6 +44,7 @@ #define DRVNAME "g762" static const struct i2c_device_id g762_id[] = { + { "g761" }, { "g762" }, { "g763" }, { } @@ -69,6 +70,7 @@ enum g762_regs { #define G762_REG_FAN_CMD1_PWM_POLARITY 0x02 /* PWM polarity */ #define G762_REG_FAN_CMD1_PULSE_PER_REV 0x01 /* pulse per fan revolution */ +#define G761_REG_FAN_CMD2_FAN_CLOCK 0x20 /* choose internal clock*/ #define G762_REG_FAN_CMD2_GEAR_MODE_1 0x08 /* fan gear mode */ #define G762_REG_FAN_CMD2_GEAR_MODE_0 0x04 #define G762_REG_FAN_CMD2_FAN_STARTV_1 0x02 /* fan startup voltage */ @@ -115,6 +117,7 @@ enum g762_regs { struct g762_data { struct i2c_client *client; + bool internal_clock; struct clk *clk; /* update mutex */ @@ -566,6 +569,7 @@ static int do_set_fan_startv(struct device *dev, unsigned long val) #ifdef CONFIG_OF static const struct of_device_id g762_dt_match[] = { + { .compatible = "gmt,g761" }, { .compatible = "gmt,g762" }, { .compatible = "gmt,g763" }, { }, @@ -597,6 +601,21 @@ static int g762_of_clock_enable(struct i2c_client *client) if (!client->dev.of_node) return 0; + data = i2c_get_clientdata(client); + + /* + * Skip CLK detection and handling if we use internal clock. + * This is only valid for g761. + */ + data->internal_clock = of_device_is_compatible(client->dev.of_node, + "gmt,g761") && + !of_property_present(client->dev.of_node, + "clocks"); + if (data->internal_clock) { + do_set_clk_freq(&client->dev, 32768); + return 0; + } + clk = of_clk_get(client->dev.of_node, 0); if (IS_ERR(clk)) { dev_err(&client->dev, "failed to get clock\n"); @@ -616,7 +635,6 @@ static int g762_of_clock_enable(struct i2c_client *client) goto clk_unprep; } - data = i2c_get_clientdata(client); data->clk = clk; ret = devm_add_action(&client->dev, g762_of_clock_disable, data); @@ -1025,16 +1043,26 @@ ATTRIBUTE_GROUPS(g762); static inline int g762_fan_init(struct device *dev) { struct g762_data *data = g762_update_client(dev); + int ret; if (IS_ERR(data)) return PTR_ERR(data); + /* internal_clock can only be set with compatible g761 */ + if (data->internal_clock) + data->fan_cmd2 |= G761_REG_FAN_CMD2_FAN_CLOCK; + data->fan_cmd1 |= G762_REG_FAN_CMD1_DET_FAN_FAIL; data->fan_cmd1 |= G762_REG_FAN_CMD1_DET_FAN_OOC; data->valid = false; - return i2c_smbus_write_byte_data(data->client, G762_REG_FAN_CMD1, - data->fan_cmd1); + ret = i2c_smbus_write_byte_data(data->client, G762_REG_FAN_CMD1, + data->fan_cmd1); + if (ret) + return ret; + + return i2c_smbus_write_byte_data(data->client, G762_REG_FAN_CMD2, + data->fan_cmd2); } static int g762_probe(struct i2c_client *client) @@ -1056,15 +1084,16 @@ static int g762_probe(struct i2c_client *client) data->client = client; mutex_init(&data->update_lock); - /* Enable fan failure detection and fan out of control protection */ - ret = g762_fan_init(dev); + /* Get configuration via DT ... */ + ret = g762_of_clock_enable(client); if (ret) return ret; - /* Get configuration via DT ... */ - ret = g762_of_clock_enable(client); + /* Enable fan failure detection and fan out of control protection */ + ret = g762_fan_init(dev); if (ret) return ret; + ret = g762_of_prop_import(client); if (ret) return ret; diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c index 1501ceb551e7..cb2f01dc4326 100644 --- a/drivers/hwmon/gsc-hwmon.c +++ b/drivers/hwmon/gsc-hwmon.c @@ -39,7 +39,7 @@ struct gsc_hwmon_data { struct hwmon_chip_info chip; }; -static struct regmap_bus gsc_hwmon_regmap_bus = { +static const struct regmap_bus gsc_hwmon_regmap_bus = { .reg_read = gsc_read, .reg_write = gsc_write, }; @@ -249,7 +249,6 @@ gsc_hwmon_get_devtree_pdata(struct device *dev) { struct gsc_hwmon_platform_data *pdata; struct gsc_hwmon_channel *ch; - struct fwnode_handle *child; struct device_node *fan; int nchannels; @@ -276,25 +275,21 @@ gsc_hwmon_get_devtree_pdata(struct device *dev) ch = pdata->channels; /* allocate structures for channels and count instances of each type */ - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { if (fwnode_property_read_string(child, "label", &ch->name)) { dev_err(dev, "channel without label\n"); - fwnode_handle_put(child); return ERR_PTR(-EINVAL); } if (fwnode_property_read_u32(child, "reg", &ch->reg)) { dev_err(dev, "channel without reg\n"); - fwnode_handle_put(child); return ERR_PTR(-EINVAL); } if (fwnode_property_read_u32(child, "gw,mode", &ch->mode)) { dev_err(dev, "channel without mode\n"); - fwnode_handle_put(child); return ERR_PTR(-EINVAL); } if (ch->mode > mode_max) { dev_err(dev, "invalid channel mode\n"); - fwnode_handle_put(child); return ERR_PTR(-EINVAL); } diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 3b259c425ab7..a362080d41fa 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/gfp.h> #include <linux/hwmon.h> +#include <linux/i2c.h> #include <linux/idr.h> #include <linux/kstrtox.h> #include <linux/list.h> @@ -136,7 +137,7 @@ static void hwmon_dev_release(struct device *dev) kfree(hwdev); } -static struct class hwmon_class = { +static const struct class hwmon_class = { .name = "hwmon", .dev_groups = hwmon_dev_attr_groups, .dev_release = hwmon_dev_release, @@ -309,6 +310,114 @@ static int hwmon_attr_base(enum hwmon_sensor_types type) return 1; } +#if IS_REACHABLE(CONFIG_I2C) + +/* + * PEC support + * + * The 'pec' attribute is attached to I2C client devices. It is only provided + * if the i2c controller supports PEC. + * + * The mutex ensures that PEC configuration between i2c device and the hardware + * is consistent. Use a single mutex because attribute writes are supposed to be + * rare, and maintaining a separate mutex for each hardware monitoring device + * would add substantial complexity to the driver for little if any gain. + * + * The hardware monitoring device is identified as child of the i2c client + * device. This assumes that only a single hardware monitoring device is + * attached to an i2c client device. + */ + +static DEFINE_MUTEX(hwmon_pec_mutex); + +static int hwmon_match_device(struct device *dev, void *data) +{ + return dev->class == &hwmon_class; +} + +static ssize_t pec_show(struct device *dev, struct device_attribute *dummy, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + + return sysfs_emit(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC)); +} + +static ssize_t pec_store(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct hwmon_device *hwdev; + struct device *hdev; + bool val; + int err; + + err = kstrtobool(buf, &val); + if (err < 0) + return err; + + hdev = device_find_child(dev, NULL, hwmon_match_device); + if (!hdev) + return -ENODEV; + + mutex_lock(&hwmon_pec_mutex); + + /* + * If there is no write function, we assume that chip specific + * handling is not required. + */ + hwdev = to_hwmon_device(hdev); + if (hwdev->chip->ops->write) { + err = hwdev->chip->ops->write(hdev, hwmon_chip, hwmon_chip_pec, 0, val); + if (err && err != -EOPNOTSUPP) + goto unlock; + } + + if (!val) + client->flags &= ~I2C_CLIENT_PEC; + else + client->flags |= I2C_CLIENT_PEC; + + err = count; +unlock: + mutex_unlock(&hwmon_pec_mutex); + put_device(hdev); + + return err; +} + +static DEVICE_ATTR_RW(pec); + +static void hwmon_remove_pec(void *dev) +{ + device_remove_file(dev, &dev_attr_pec); +} + +static int hwmon_pec_register(struct device *hdev) +{ + struct i2c_client *client = i2c_verify_client(hdev->parent); + int err; + + if (!client) + return -EINVAL; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) + return 0; + + err = device_create_file(&client->dev, &dev_attr_pec); + if (err) + return err; + + return devm_add_action_or_reset(hdev, hwmon_remove_pec, &client->dev); +} + +#else /* CONFIG_I2C */ +static int hwmon_pec_register(struct device *hdev) +{ + return -EINVAL; +} +#endif /* CONFIG_I2C */ + /* sysfs attribute management */ static ssize_t hwmon_attr_show(struct device *dev, @@ -397,10 +506,6 @@ static struct attribute *hwmon_genattr(const void *drvdata, const char *name; bool is_string = is_string_attr(type, attr); - /* The attribute is invisible if there is no template string */ - if (!template) - return ERR_PTR(-ENOENT); - mode = ops->is_visible(drvdata, type, attr, index); if (!mode) return ERR_PTR(-ENOENT); @@ -712,8 +817,8 @@ static int hwmon_genattrs(const void *drvdata, attr = __ffs(attr_mask); attr_mask &= ~BIT(attr); - if (attr >= template_size) - return -EINVAL; + if (attr >= template_size || !templates[attr]) + continue; /* attribute is invisible */ a = hwmon_genattr(drvdata, info->type, attr, i, templates[attr], ops); if (IS_ERR(a)) { @@ -849,16 +954,26 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, INIT_LIST_HEAD(&hwdev->tzdata); if (hdev->of_node && chip && chip->ops->read && - chip->info[0]->type == hwmon_chip && - (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) { - err = hwmon_thermal_register_sensors(hdev); - if (err) { - device_unregister(hdev); - /* - * Don't worry about hwdev; hwmon_dev_release(), called - * from device_unregister(), will free it. - */ - goto ida_remove; + chip->info[0]->type == hwmon_chip) { + u32 config = chip->info[0]->config[0]; + + if (config & HWMON_C_REGISTER_TZ) { + err = hwmon_thermal_register_sensors(hdev); + if (err) { + device_unregister(hdev); + /* + * Don't worry about hwdev; hwmon_dev_release(), + * called from device_unregister(), will free it. + */ + goto ida_remove; + } + } + if (config & HWMON_C_PEC) { + err = hwmon_pec_register(hdev); + if (err) { + device_unregister(hdev); + goto ida_remove; + } } } diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c index 4c8a80847891..fab32e1e15f2 100644 --- a/drivers/hwmon/iio_hwmon.c +++ b/drivers/hwmon/iio_hwmon.c @@ -49,16 +49,17 @@ static ssize_t iio_hwmon_read_val(struct device *dev, struct iio_channel *chan = &state->channels[sattr->index]; enum iio_chan_type type; - ret = iio_read_channel_processed(chan, &result); - if (ret < 0) - return ret; - ret = iio_get_channel_type(chan, &type); if (ret < 0) return ret; if (type == IIO_POWER) - result *= 1000; /* mili-Watts to micro-Watts conversion */ + /* mili-Watts to micro-Watts conversion */ + ret = iio_read_channel_processed_scale(chan, &result, 1000); + else + ret = iio_read_channel_processed(chan, &result); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", result); } diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c index 855626f1bc01..2d9f12f68d50 100644 --- a/drivers/hwmon/ina238.c +++ b/drivers/hwmon/ina238.c @@ -96,7 +96,7 @@ #define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */ #define INA238_DIE_TEMP_LSB 125 /* 125 mC/lsb */ -static struct regmap_config ina238_regmap_config = { +static const struct regmap_config ina238_regmap_config = { .max_register = INA238_REGISTERS, .reg_bits = 8, .val_bits = 16, diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index d8415d1f21fc..9ab4205622e2 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -73,6 +73,11 @@ #define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9) #define INA226_SHIFT_AVG(val) ((val) << 9) +#define INA226_ALERT_POLARITY_MASK 0x0002 +#define INA226_SHIFT_ALERT_POLARITY(val) ((val) << 1) +#define INA226_ALERT_POL_LOW 0 +#define INA226_ALERT_POL_HIGH 1 + /* bit number of alert functions in Mask/Enable Register */ #define INA226_SHUNT_OVER_VOLTAGE_BIT 15 #define INA226_SHUNT_UNDER_VOLTAGE_BIT 14 @@ -178,6 +183,14 @@ static u16 ina226_interval_to_reg(int interval) return INA226_SHIFT_AVG(avg_bits); } +static int ina2xx_set_alert_polarity(struct ina2xx_data *data, + unsigned long val) +{ + return regmap_update_bits(data->regmap, INA226_MASK_ENABLE, + INA226_ALERT_POLARITY_MASK, + INA226_SHIFT_ALERT_POLARITY(val)); +} + /* * Calibration register is set to the best value, which eliminates * truncation errors on calculating current register in hardware. @@ -612,8 +625,6 @@ static const struct attribute_group ina226_group = { .attrs = ina226_attrs, }; -static const struct i2c_device_id ina2xx_id[]; - static int ina2xx_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -623,10 +634,7 @@ static int ina2xx_probe(struct i2c_client *client) int ret, group = 0; enum ina2xx_ids chip; - if (client->dev.of_node) - chip = (uintptr_t)of_device_get_match_data(&client->dev); - else - chip = i2c_match_id(ina2xx_id, client)->driver_data; + chip = (uintptr_t)i2c_get_match_data(client); data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -659,6 +667,25 @@ static int ina2xx_probe(struct i2c_client *client) if (ret) return dev_err_probe(dev, ret, "failed to enable vs regulator\n"); + if (chip == ina226) { + if (of_property_read_bool(dev->of_node, "ti,alert-polarity-active-high")) { + ret = ina2xx_set_alert_polarity(data, + INA226_ALERT_POL_HIGH); + if (ret < 0) { + return dev_err_probe(dev, ret, + "failed to set alert polarity active high\n"); + } + } else { + /* Set default value i.e active low */ + ret = ina2xx_set_alert_polarity(data, + INA226_ALERT_POL_LOW); + if (ret < 0) { + return dev_err_probe(dev, ret, + "failed to set alert polarity active low\n"); + } + } + } + ret = ina2xx_init(data); if (ret < 0) { dev_err(dev, "error configuring the device: %d\n", ret); diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 7092f8f025b8..a260cff750a5 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -79,20 +79,9 @@ static const unsigned short normal_i2c[] = { #define AT30TS00_DEVID 0x8201 #define AT30TS00_DEVID_MASK 0xffff -#define AT30TSE004_DEVID 0x2200 -#define AT30TSE004_DEVID_MASK 0xffff - -/* Giantec */ -#define GT30TS00_DEVID 0x2200 -#define GT30TS00_DEVID_MASK 0xff00 - #define GT34TS02_DEVID 0x3300 #define GT34TS02_DEVID_MASK 0xff00 -/* IDT */ -#define TSE2004_DEVID 0x2200 -#define TSE2004_DEVID_MASK 0xff00 - #define TS3000_DEVID 0x2900 /* Also matches TSE2002 */ #define TS3000_DEVID_MASK 0xff00 @@ -116,9 +105,6 @@ static const unsigned short normal_i2c[] = { #define MCP98243_DEVID 0x2100 #define MCP98243_DEVID_MASK 0xfffc -#define MCP98244_DEVID 0x2200 -#define MCP98244_DEVID_MASK 0xfffc - #define MCP9843_DEVID 0x0000 /* Also matches mcp9805 */ #define MCP9843_DEVID_MASK 0xfffe @@ -136,12 +122,6 @@ static const unsigned short normal_i2c[] = { #define CAT34TS02C_DEVID 0x0a00 #define CAT34TS02C_DEVID_MASK 0xfff0 -#define CAT34TS04_DEVID 0x2200 -#define CAT34TS04_DEVID_MASK 0xfff0 - -#define N34TS04_DEVID 0x2230 -#define N34TS04_DEVID_MASK 0xfff0 - /* ST Microelectronics */ #define STTS424_DEVID 0x0101 #define STTS424_DEVID_MASK 0xffff @@ -152,15 +132,12 @@ static const unsigned short normal_i2c[] = { #define STTS2002_DEVID 0x0300 #define STTS2002_DEVID_MASK 0xffff -#define STTS2004_DEVID 0x2201 -#define STTS2004_DEVID_MASK 0xffff - #define STTS3000_DEVID 0x0200 #define STTS3000_DEVID_MASK 0xffff -/* Seiko Instruments */ -#define S34TS04A_DEVID 0x2221 -#define S34TS04A_DEVID_MASK 0xffff +/* TSE2004 compliant sensors */ +#define TSE2004_DEVID 0x2200 +#define TSE2004_DEVID_MASK 0xff00 static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 }; @@ -173,8 +150,8 @@ struct jc42_chips { static struct jc42_chips jc42_chips[] = { { ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK }, { ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK }, - { ATMEL_MANID2, AT30TSE004_DEVID, AT30TSE004_DEVID_MASK }, - { GT_MANID, GT30TS00_DEVID, GT30TS00_DEVID_MASK }, + { ATMEL_MANID2, TSE2004_DEVID, TSE2004_DEVID_MASK }, + { GT_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK }, { GT_MANID2, GT34TS02_DEVID, GT34TS02_DEVID_MASK }, { IDT_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK }, { IDT_MANID, TS3000_DEVID, TS3000_DEVID_MASK }, @@ -184,19 +161,19 @@ static struct jc42_chips jc42_chips[] = { { MCP_MANID, MCP9808_DEVID, MCP9808_DEVID_MASK }, { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK }, { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK }, - { MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK }, + { MCP_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK }, { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK }, { NXP_MANID, SE97_DEVID, SE97_DEVID_MASK }, { ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK }, { ONS_MANID, CAT34TS02C_DEVID, CAT34TS02C_DEVID_MASK }, - { ONS_MANID, CAT34TS04_DEVID, CAT34TS04_DEVID_MASK }, - { ONS_MANID, N34TS04_DEVID, N34TS04_DEVID_MASK }, + { ONS_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK }, + { ONS_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK }, { NXP_MANID, SE98_DEVID, SE98_DEVID_MASK }, - { SI_MANID, S34TS04A_DEVID, S34TS04A_DEVID_MASK }, + { SI_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK }, { STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK }, { STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK }, { STM_MANID, STTS2002_DEVID, STTS2002_DEVID_MASK }, - { STM_MANID, STTS2004_DEVID, STTS2004_DEVID_MASK }, + { STM_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK }, { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK }, }; @@ -436,7 +413,11 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info) if (cap < 0 || config < 0 || manid < 0 || devid < 0) return -ENODEV; - if ((cap & 0xff00) || (config & 0xf800)) + if ((cap & 0xff00) || (config & 0xf820)) + return -ENODEV; + + if ((devid & TSE2004_DEVID_MASK) == TSE2004_DEVID && + (cap & 0x00e7) != 0x00e7) return -ENODEV; for (i = 0; i < ARRAY_SIZE(jc42_chips); i++) { diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 8092312c0a87..543526bac042 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -101,7 +101,6 @@ struct k10temp_data { #define TCCD_BIT(x) ((x) + 2) #define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel)) -#define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT) struct tctl_offset { u8 model; @@ -153,8 +152,16 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval) { - amd_smn_read(amd_pci_dev_to_node_id(pdev), - ZEN_REPORTED_TEMP_CTRL_BASE, regval); + if (amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_REPORTED_TEMP_CTRL_BASE, regval)) + *regval = 0; +} + +static int read_ccd_temp_reg(struct k10temp_data *data, int ccd, u32 *regval) +{ + u16 node_id = amd_pci_dev_to_node_id(data->pdev); + + return amd_smn_read(node_id, ZEN_CCD_TEMP(data->ccd_offset, ccd), regval); } static long get_raw_temp(struct k10temp_data *data) @@ -205,6 +212,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, long *val) { struct k10temp_data *data = dev_get_drvdata(dev); + int ret = -EOPNOTSUPP; u32 regval; switch (attr) { @@ -221,13 +229,15 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, *val = 0; break; case 2 ... 13: /* Tccd{1-12} */ - amd_smn_read(amd_pci_dev_to_node_id(data->pdev), - ZEN_CCD_TEMP(data->ccd_offset, channel - 2), - ®val); + ret = read_ccd_temp_reg(data, channel - 2, ®val); + + if (ret) + return ret; + *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; break; default: - return -EOPNOTSUPP; + return ret; } break; case hwmon_temp_max: @@ -243,7 +253,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, - ((regval >> 24) & 0xf)) * 500 + 52000; break; default: - return -EOPNOTSUPP; + return ret; } return 0; } @@ -259,11 +269,11 @@ static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, } } -static umode_t k10temp_is_visible(const void *_data, +static umode_t k10temp_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel) { - const struct k10temp_data *data = _data; + const struct k10temp_data *data = drvdata; struct pci_dev *pdev = data->pdev; u32 reg; @@ -374,15 +384,25 @@ static const struct hwmon_chip_info k10temp_chip_info = { .info = k10temp_info, }; -static void k10temp_get_ccd_support(struct pci_dev *pdev, - struct k10temp_data *data, int limit) +static void k10temp_get_ccd_support(struct k10temp_data *data, int limit) { u32 regval; int i; for (i = 0; i < limit; i++) { - amd_smn_read(amd_pci_dev_to_node_id(pdev), - ZEN_CCD_TEMP(data->ccd_offset, i), ®val); + /* + * Ignore inaccessible CCDs. + * + * Some systems will return a register value of 0, and the TEMP_VALID + * bit check below will naturally fail. + * + * Other systems will return a PCI_ERROR_RESPONSE (0xFFFFFFFF) for + * the register value. And this will incorrectly pass the TEMP_VALID + * bit check. + */ + if (read_ccd_temp_reg(data, i, ®val)) + continue; + if (regval & ZEN_CCD_TEMP_VALID) data->show_temp |= BIT(TCCD_BIT(i)); } @@ -434,18 +454,18 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) case 0x11: /* Zen APU */ case 0x18: /* Zen+ APU */ data->ccd_offset = 0x154; - k10temp_get_ccd_support(pdev, data, 4); + k10temp_get_ccd_support(data, 4); break; case 0x31: /* Zen2 Threadripper */ case 0x60: /* Renoir */ case 0x68: /* Lucienne */ case 0x71: /* Zen2 */ data->ccd_offset = 0x154; - k10temp_get_ccd_support(pdev, data, 8); + k10temp_get_ccd_support(data, 8); break; case 0xa0 ... 0xaf: data->ccd_offset = 0x300; - k10temp_get_ccd_support(pdev, data, 8); + k10temp_get_ccd_support(data, 8); break; } } else if (boot_cpu_data.x86 == 0x19) { @@ -459,21 +479,21 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) case 0x21: /* Zen3 Ryzen Desktop */ case 0x50 ... 0x5f: /* Green Sardine */ data->ccd_offset = 0x154; - k10temp_get_ccd_support(pdev, data, 8); + k10temp_get_ccd_support(data, 8); break; case 0x40 ... 0x4f: /* Yellow Carp */ data->ccd_offset = 0x300; - k10temp_get_ccd_support(pdev, data, 8); + k10temp_get_ccd_support(data, 8); break; case 0x60 ... 0x6f: case 0x70 ... 0x7f: data->ccd_offset = 0x308; - k10temp_get_ccd_support(pdev, data, 8); + k10temp_get_ccd_support(data, 8); break; case 0x10 ... 0x1f: case 0xa0 ... 0xaf: data->ccd_offset = 0x300; - k10temp_get_ccd_support(pdev, data, 12); + k10temp_get_ccd_support(data, 12); break; } } else if (boot_cpu_data.x86 == 0x1a) { diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 0878a044dd8e..035176a98ce9 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -1104,10 +1104,7 @@ static int lm63_probe(struct i2c_client *client) mutex_init(&data->update_lock); /* Set the device type */ - if (client->dev.of_node) - data->kind = (uintptr_t)of_device_get_match_data(&client->dev); - else - data->kind = i2c_match_id(lm63_id, client)->driver_data; + data->kind = (uintptr_t)i2c_get_match_data(client); if (data->kind == lm64) data->temp2_offset = 16000; diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c index 481e4e1f8f4f..0d5a250cb672 100644 --- a/drivers/hwmon/lm70.c +++ b/drivers/hwmon/lm70.c @@ -169,11 +169,7 @@ static int lm70_probe(struct spi_device *spi) struct lm70 *p_lm70; int chip; - if (dev_fwnode(&spi->dev)) - chip = (int)(uintptr_t)device_get_match_data(&spi->dev); - else - chip = spi_get_device_id(spi)->driver_data; - + chip = (kernel_ulong_t)spi_get_device_match_data(spi); /* signaling is SPI_MODE_0 */ if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0) diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index e00750718536..2c2205aec7d4 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -625,20 +625,12 @@ static void lm75_remove(void *data) i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf); } -static const struct i2c_device_id lm75_ids[]; - static int lm75_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct device *hwmon_dev; struct lm75_data *data; int status, err; - enum lm75_type kind; - - if (client->dev.of_node) - kind = (uintptr_t)of_device_get_match_data(&client->dev); - else - kind = i2c_match_id(lm75_ids, client)->driver_data; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) @@ -649,7 +641,7 @@ static int lm75_probe(struct i2c_client *client) return -ENOMEM; data->client = client; - data->kind = kind; + data->kind = (uintptr_t)i2c_get_match_data(client); data->vs = devm_regulator_get(dev, "vs"); if (IS_ERR(data->vs)) diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index b739c354311b..8b53bb312069 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -627,8 +627,6 @@ static int lm78_i2c_detect(struct i2c_client *client, return -ENODEV; } -static const struct i2c_device_id lm78_i2c_id[]; - static int lm78_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -640,7 +638,7 @@ static int lm78_i2c_probe(struct i2c_client *client) return -ENOMEM; data->client = client; - data->type = i2c_match_id(lm78_i2c_id, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); /* Initialize the LM78 chip */ lm78_init_device(data); diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c index b333c9bde4e6..f800fe2ef18b 100644 --- a/drivers/hwmon/lm83.c +++ b/drivers/hwmon/lm83.c @@ -417,13 +417,6 @@ static int lm83_detect(struct i2c_client *client, return 0; } -static const struct i2c_device_id lm83_id[] = { - { "lm83", lm83 }, - { "lm82", lm82 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, lm83_id); - static int lm83_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -438,7 +431,7 @@ static int lm83_probe(struct i2c_client *client) if (IS_ERR(data->regmap)) return PTR_ERR(data->regmap); - data->type = i2c_match_id(lm83_id, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, &lm83_chip_info, NULL); @@ -449,6 +442,13 @@ static int lm83_probe(struct i2c_client *client) * Driver data (common to all clients) */ +static const struct i2c_device_id lm83_id[] = { + { "lm83", lm83 }, + { "lm82", lm82 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm83_id); + static struct i2c_driver lm83_driver = { .class = I2C_CLASS_HWMON, .driver = { diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index 68c210002357..1c244ed75122 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -1544,8 +1544,6 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) return 0; } -static const struct i2c_device_id lm85_id[]; - static int lm85_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -1558,10 +1556,7 @@ static int lm85_probe(struct i2c_client *client) return -ENOMEM; data->client = client; - if (client->dev.of_node) - data->type = (uintptr_t)of_device_get_match_data(&client->dev); - else - data->type = i2c_match_id(lm85_id, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); mutex_init(&data->update_lock); /* Fill in the chip specific driver values */ diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index e0d7454a301c..ca5c52b38c0f 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -1270,42 +1270,6 @@ static int lm90_update_device(struct device *dev) return 0; } -/* pec used for devices with PEC support */ -static ssize_t pec_show(struct device *dev, struct device_attribute *dummy, - char *buf) -{ - struct i2c_client *client = to_i2c_client(dev); - - return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC)); -} - -static ssize_t pec_store(struct device *dev, struct device_attribute *dummy, - const char *buf, size_t count) -{ - struct i2c_client *client = to_i2c_client(dev); - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err < 0) - return err; - - switch (val) { - case 0: - client->flags &= ~I2C_CLIENT_PEC; - break; - case 1: - client->flags |= I2C_CLIENT_PEC; - break; - default: - return -EINVAL; - } - - return count; -} - -static DEVICE_ATTR_RW(pec); - static int lm90_temp_get_resolution(struct lm90_data *data, int index) { switch (index) { @@ -2659,11 +2623,6 @@ static irqreturn_t lm90_irq_thread(int irq, void *dev_id) return IRQ_NONE; } -static void lm90_remove_pec(void *dev) -{ - device_remove_file(dev, &dev_attr_pec); -} - static int lm90_probe_channel_from_dt(struct i2c_client *client, struct device_node *child, struct lm90_data *data) @@ -2764,10 +2723,7 @@ static int lm90_probe(struct i2c_client *client) INIT_WORK(&data->report_work, lm90_report_alarms); /* Set the device type */ - if (client->dev.of_node) - data->kind = (uintptr_t)of_device_get_match_data(&client->dev); - else - data->kind = i2c_match_id(lm90_id, client)->driver_data; + data->kind = (uintptr_t)i2c_get_match_data(client); /* * Different devices have different alarm bits triggering the @@ -2802,6 +2758,8 @@ static int lm90_probe(struct i2c_client *client) data->chip_config[0] |= HWMON_C_UPDATE_INTERVAL; if (data->flags & LM90_HAVE_FAULTQUEUE) data->chip_config[0] |= HWMON_C_TEMP_SAMPLES; + if (data->flags & (LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC)) + data->chip_config[0] |= HWMON_C_PEC; data->info[1] = &data->temp_info; info = &data->temp_info; @@ -2878,19 +2836,6 @@ static int lm90_probe(struct i2c_client *client) return err; } - /* - * The 'pec' attribute is attached to the i2c device and thus created - * separately. - */ - if (data->flags & (LM90_HAVE_PEC | LM90_HAVE_PARTIAL_PEC)) { - err = device_create_file(dev, &dev_attr_pec); - if (err) - return err; - err = devm_add_action_or_reset(dev, lm90_remove_pec, dev); - if (err) - return err; - } - hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, &data->chip, NULL); diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c index 67b9d7636ee4..9a7afdb49895 100644 --- a/drivers/hwmon/lm95234.c +++ b/drivers/hwmon/lm95234.c @@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000), + 1000); mutex_lock(&data->update_lock); data->tcrit2[index] = val; @@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000); mutex_lock(&data->update_lock); data->tcrit1[index] = val; @@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev, if (ret < 0) return ret; - val = DIV_ROUND_CLOSEST(val, 1000); + val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000); val = clamp_val((int)data->tcrit1[index] - val, 0, 31); mutex_lock(&data->update_lock); @@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr, return ret; /* Accuracy is 1/2 degrees C */ - val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500); mutex_lock(&data->update_lock); data->toffset[index] = val; @@ -677,10 +678,9 @@ static int lm95234_init_client(struct i2c_client *client) return 0; } -static const struct i2c_device_id lm95234_id[]; - static int lm95234_probe(struct i2c_client *client) { + enum chips type = (uintptr_t)i2c_get_match_data(client); struct device *dev = &client->dev; struct lm95234_data *data; struct device *hwmon_dev; @@ -699,7 +699,7 @@ static int lm95234_probe(struct i2c_client *client) return err; data->groups[0] = &lm95234_common_group; - if (i2c_match_id(lm95234_id, client)->driver_data == lm95234) + if (type == lm95234) data->groups[1] = &lm95234_group; hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c index 06750bb93c23..573cd8f5721b 100644 --- a/drivers/hwmon/ltc2991.c +++ b/drivers/hwmon/ltc2991.c @@ -225,8 +225,8 @@ static umode_t ltc2991_is_visible(const void *data, case hwmon_temp: switch (attr) { case hwmon_temp_input: - if (st->temp_en[channel] || - channel == LTC2991_T_INT_CH_NR) + if (channel == LTC2991_T_INT_CH_NR || + st->temp_en[channel]) return 0444; break; } @@ -284,7 +284,6 @@ static const struct regmap_config ltc2991_regmap_config = { static int ltc2991_init(struct ltc2991_state *st, struct device *dev) { - struct fwnode_handle *child; int ret; u32 val, addr; u8 v5_v8_reg_data = 0, v1_v4_reg_data = 0; @@ -294,17 +293,13 @@ static int ltc2991_init(struct ltc2991_state *st, struct device *dev) return dev_err_probe(dev, ret, "failed to enable regulator\n"); - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { ret = fwnode_property_read_u32(child, "reg", &addr); - if (ret < 0) { - fwnode_handle_put(child); + if (ret < 0) return ret; - } - if (addr > 3) { - fwnode_handle_put(child); + if (addr > 3) return -EINVAL; - } ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c index aa38c45adc09..7ce9a89f93a0 100644 --- a/drivers/hwmon/max16065.c +++ b/drivers/hwmon/max16065.c @@ -493,8 +493,6 @@ static const struct attribute_group max16065_max_group = { .is_visible = max16065_secondary_is_visible, }; -static const struct i2c_device_id max16065_id[]; - static int max16065_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; @@ -505,7 +503,7 @@ static int max16065_probe(struct i2c_client *client) bool have_secondary; /* true if chip has secondary limits */ bool secondary_is_max = false; /* secondary limits reflect max */ int groups = 0; - const struct i2c_device_id *id = i2c_match_id(max16065_id, client); + enum chips chip = (uintptr_t)i2c_get_match_data(client); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_READ_WORD_DATA)) @@ -518,9 +516,9 @@ static int max16065_probe(struct i2c_client *client) data->client = client; mutex_init(&data->update_lock); - data->num_adc = max16065_num_adc[id->driver_data]; - data->have_current = max16065_have_current[id->driver_data]; - have_secondary = max16065_have_secondary[id->driver_data]; + data->num_adc = max16065_num_adc[chip]; + data->have_current = max16065_have_current[chip]; + have_secondary = max16065_have_secondary[chip]; if (have_secondary) { val = i2c_smbus_read_byte_data(client, MAX16065_SW_ENABLE); diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c index c4a02edefbee..9fc583ebb11b 100644 --- a/drivers/hwmon/max1668.c +++ b/drivers/hwmon/max1668.c @@ -391,8 +391,6 @@ static int max1668_detect(struct i2c_client *client, return 0; } -static const struct i2c_device_id max1668_id[]; - static int max1668_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; @@ -408,7 +406,7 @@ static int max1668_probe(struct i2c_client *client) return -ENOMEM; data->client = client; - data->type = i2c_match_id(max1668_id, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); mutex_init(&data->update_lock); /* sysfs hooks */ diff --git a/drivers/hwmon/max31827.c b/drivers/hwmon/max31827.c index f8a13b30f100..48e8f8ba4d05 100644 --- a/drivers/hwmon/max31827.c +++ b/drivers/hwmon/max31827.c @@ -24,6 +24,7 @@ #define MAX31827_CONFIGURATION_1SHOT_MASK BIT(0) #define MAX31827_CONFIGURATION_CNV_RATE_MASK GENMASK(3, 1) +#define MAX31827_CONFIGURATION_PEC_EN_MASK BIT(4) #define MAX31827_CONFIGURATION_TIMEOUT_MASK BIT(5) #define MAX31827_CONFIGURATION_RESOLUTION_MASK GENMASK(7, 6) #define MAX31827_CONFIGURATION_ALRM_POL_MASK BIT(8) @@ -46,6 +47,11 @@ #define MAX31827_M_DGR_TO_16_BIT(x) (((x) << 4) / 1000) #define MAX31827_DEVICE_ENABLE(x) ((x) ? 0xA : 0x0) +/* + * The enum passed in the .data pointer of struct of_device_id must + * start with a value != 0 since that is a requirement for using + * device_get_match_data(). + */ enum chips { max31827 = 1, max31828, max31829 }; enum max31827_cnv { @@ -382,7 +388,8 @@ static int max31827_write(struct device *dev, enum hwmon_sensor_types type, } case hwmon_chip: - if (attr == hwmon_chip_update_interval) { + switch (attr) { + case hwmon_chip_update_interval: if (!st->enable) return -EINVAL; @@ -410,14 +417,18 @@ static int max31827_write(struct device *dev, enum hwmon_sensor_types type, return ret; st->update_interval = val; - } - break; + return 0; + case hwmon_chip_pec: + return regmap_update_bits(st->regmap, MAX31827_CONFIGURATION_REG, + MAX31827_CONFIGURATION_PEC_EN_MASK, + val ? MAX31827_CONFIGURATION_PEC_EN_MASK : 0); + default: + return -EOPNOTSUPP; + } default: return -EOPNOTSUPP; } - - return 0; } static ssize_t temp1_resolution_show(struct device *dev, @@ -583,7 +594,7 @@ static const struct hwmon_channel_info *max31827_info[] = { HWMON_T_MIN_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_MAX_ALARM), - HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL), + HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL | HWMON_C_PEC), NULL, }; diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c index cbb595fe47aa..f54720d3d2ce 100644 --- a/drivers/hwmon/max6639.c +++ b/drivers/hwmon/max6639.c @@ -21,6 +21,7 @@ #include <linux/mutex.h> #include <linux/platform_data/max6639.h> #include <linux/regmap.h> +#include <linux/util_macros.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END }; @@ -55,13 +56,17 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END }; #define MAX6639_GCONFIG_PWM_FREQ_HI 0x08 #define MAX6639_FAN_CONFIG1_PWM 0x80 - +#define MAX6639_FAN_CONFIG3_FREQ_MASK 0x03 #define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40 #define MAX6639_NUM_CHANNELS 2 static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 }; +/* Supported PWM frequency */ +static const unsigned int freq_table[] = { 20, 33, 50, 100, 5000, 8333, 12500, + 25000 }; + #define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \ 0 : (rpm_ranges[rpm_range] * 30) / (val)) #define TEMP_LIMIT_TO_REG(val) clamp_val((val) / 1000, 0, 255) @@ -71,21 +76,19 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 }; */ struct max6639_data { struct regmap *regmap; + struct mutex update_lock; /* Register values initialized only once */ - u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */ - u8 rpm_range; /* Index in above rpm_ranges table */ + u8 ppr[MAX6639_NUM_CHANNELS]; /* Pulses per rotation 0..3 for 1..4 ppr */ + u8 rpm_range[MAX6639_NUM_CHANNELS]; /* Index in above rpm_ranges table */ /* Optional regulator for FAN supply */ struct regulator *reg; }; -static ssize_t temp_input_show(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int max6639_temp_read_input(struct device *dev, int channel, long *temp) { - long temp; struct max6639_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); unsigned int val; int res; @@ -93,251 +96,444 @@ static ssize_t temp_input_show(struct device *dev, * Lock isn't needed as MAX6639_REG_TEMP wpnt change for at least 250ms after reading * MAX6639_REG_TEMP_EXT */ - res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(attr->index), &val); + res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(channel), &val); if (res < 0) return res; - temp = val >> 5; - res = regmap_read(data->regmap, MAX6639_REG_TEMP(attr->index), &val); + *temp = val >> 5; + res = regmap_read(data->regmap, MAX6639_REG_TEMP(channel), &val); if (res < 0) return res; - temp |= val << 3; - temp *= 125; + *temp |= val << 3; + *temp *= 125; - return sprintf(buf, "%ld\n", temp); + return 0; } -static ssize_t temp_fault_show(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int max6639_temp_read_fault(struct device *dev, int channel, long *fault) { struct max6639_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); unsigned int val; int res; - res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(attr->index), &val); + res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(channel), &val); if (res < 0) return res; - return sprintf(buf, "%d\n", val & 1); + *fault = val & 1; + + return 0; } -static ssize_t temp_max_show(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int max6639_temp_read_max(struct device *dev, int channel, long *max) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); struct max6639_data *data = dev_get_drvdata(dev); unsigned int val; int res; - res = regmap_read(data->regmap, MAX6639_REG_THERM_LIMIT(attr->index), &val); + res = regmap_read(data->regmap, MAX6639_REG_THERM_LIMIT(channel), &val); if (res < 0) return res; - return sprintf(buf, "%d\n", (val * 1000)); + *max = (long)val * 1000; + + return 0; } -static ssize_t temp_max_store(struct device *dev, - struct device_attribute *dev_attr, - const char *buf, size_t count) +static int max6639_temp_read_crit(struct device *dev, int channel, long *crit) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); struct max6639_data *data = dev_get_drvdata(dev); - unsigned long val; + unsigned int val; int res; - res = kstrtoul(buf, 10, &val); - if (res) + res = regmap_read(data->regmap, MAX6639_REG_ALERT_LIMIT(channel), &val); + if (res < 0) return res; - regmap_write(data->regmap, MAX6639_REG_THERM_LIMIT(attr->index), - TEMP_LIMIT_TO_REG(val)); - return count; + *crit = (long)val * 1000; + + return 0; } -static ssize_t temp_crit_show(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int max6639_temp_read_emergency(struct device *dev, int channel, long *emerg) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); struct max6639_data *data = dev_get_drvdata(dev); unsigned int val; int res; - res = regmap_read(data->regmap, MAX6639_REG_ALERT_LIMIT(attr->index), &val); + res = regmap_read(data->regmap, MAX6639_REG_OT_LIMIT(channel), &val); if (res < 0) return res; - return sprintf(buf, "%d\n", (val * 1000)); + *emerg = (long)val * 1000; + + return 0; } -static ssize_t temp_crit_store(struct device *dev, - struct device_attribute *dev_attr, - const char *buf, size_t count) +static int max6639_get_status(struct device *dev, unsigned int *status) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); struct max6639_data *data = dev_get_drvdata(dev); - unsigned long val; + unsigned int val; int res; - res = kstrtoul(buf, 10, &val); - if (res) + res = regmap_read(data->regmap, MAX6639_REG_STATUS, &val); + if (res < 0) return res; - regmap_write(data->regmap, MAX6639_REG_ALERT_LIMIT(attr->index), - TEMP_LIMIT_TO_REG(val)); - return count; + *status = val; + + return 0; } -static ssize_t temp_emergency_show(struct device *dev, - struct device_attribute *dev_attr, - char *buf) +static int max6639_temp_set_max(struct max6639_data *data, int channel, long val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); - struct max6639_data *data = dev_get_drvdata(dev); - unsigned int val; int res; - res = regmap_read(data->regmap, MAX6639_REG_OT_LIMIT(attr->index), &val); - if (res < 0) - return res; - - return sprintf(buf, "%d\n", (val * 1000)); + res = regmap_write(data->regmap, MAX6639_REG_THERM_LIMIT(channel), + TEMP_LIMIT_TO_REG(val)); + return res; } -static ssize_t temp_emergency_store(struct device *dev, - struct device_attribute *dev_attr, - const char *buf, size_t count) +static int max6639_temp_set_crit(struct max6639_data *data, int channel, long val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); - struct max6639_data *data = dev_get_drvdata(dev); - unsigned long val; int res; - res = kstrtoul(buf, 10, &val); - if (res) - return res; + res = regmap_write(data->regmap, MAX6639_REG_ALERT_LIMIT(channel), TEMP_LIMIT_TO_REG(val)); - regmap_write(data->regmap, MAX6639_REG_OT_LIMIT(attr->index), TEMP_LIMIT_TO_REG(val)); + return res; +} - return count; +static int max6639_temp_set_emergency(struct max6639_data *data, int channel, long val) +{ + int res; + + res = regmap_write(data->regmap, MAX6639_REG_OT_LIMIT(channel), TEMP_LIMIT_TO_REG(val)); + + return res; } -static ssize_t pwm_show(struct device *dev, struct device_attribute *dev_attr, - char *buf) +static int max6639_read_fan(struct device *dev, u32 attr, int channel, + long *fan_val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); struct max6639_data *data = dev_get_drvdata(dev); unsigned int val; int res; - res = regmap_read(data->regmap, MAX6639_REG_TARGTDUTY(attr->index), &val); - if (res < 0) - return res; + switch (attr) { + case hwmon_fan_input: + res = regmap_read(data->regmap, MAX6639_REG_FAN_CNT(channel), &val); + if (res < 0) + return res; + *fan_val = FAN_FROM_REG(val, data->rpm_range[channel]); + return 0; + case hwmon_fan_fault: + res = max6639_get_status(dev, &val); + if (res < 0) + return res; + *fan_val = !!(val & BIT(1 - channel)); + return 0; + case hwmon_fan_pulses: + *fan_val = data->ppr[channel]; + return 0; + default: + return -EOPNOTSUPP; + } +} - return sprintf(buf, "%d\n", val * 255 / 120); +static int max6639_set_ppr(struct max6639_data *data, int channel, u8 ppr) +{ + /* Decrement the PPR value and shift left by 6 to match the register format */ + return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), ppr-- << 6); } -static ssize_t pwm_store(struct device *dev, - struct device_attribute *dev_attr, const char *buf, - size_t count) +static int max6639_write_fan(struct device *dev, u32 attr, int channel, + long val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); struct max6639_data *data = dev_get_drvdata(dev); - unsigned long val; - int res; + int err; - res = kstrtoul(buf, 10, &val); - if (res) - return res; + switch (attr) { + case hwmon_fan_pulses: + if (val <= 0 || val > 4) + return -EINVAL; - val = clamp_val(val, 0, 255); + mutex_lock(&data->update_lock); + /* Set Fan pulse per revolution */ + err = max6639_set_ppr(data, channel, val); + if (err < 0) { + mutex_unlock(&data->update_lock); + return err; + } + data->ppr[channel] = val; - regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(attr->index), val * 120 / 255); + mutex_unlock(&data->update_lock); + return 0; + default: + return -EOPNOTSUPP; + } +} - return count; +static umode_t max6639_fan_is_visible(const void *_data, u32 attr, int channel) +{ + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_fault: + return 0444; + case hwmon_fan_pulses: + return 0644; + default: + return 0; + } } -static ssize_t fan_input_show(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int max6639_read_pwm(struct device *dev, u32 attr, int channel, + long *pwm_val) { struct max6639_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); unsigned int val; int res; + u8 i; + + switch (attr) { + case hwmon_pwm_input: + res = regmap_read(data->regmap, MAX6639_REG_TARGTDUTY(channel), &val); + if (res < 0) + return res; + *pwm_val = val * 255 / 120; + return 0; + case hwmon_pwm_freq: + mutex_lock(&data->update_lock); + res = regmap_read(data->regmap, MAX6639_REG_FAN_CONFIG3(channel), &val); + if (res < 0) { + mutex_unlock(&data->update_lock); + return res; + } + i = val & MAX6639_FAN_CONFIG3_FREQ_MASK; - res = regmap_read(data->regmap, MAX6639_REG_FAN_CNT(attr->index), &val); - if (res < 0) - return res; + res = regmap_read(data->regmap, MAX6639_REG_GCONFIG, &val); + if (res < 0) { + mutex_unlock(&data->update_lock); + return res; + } + + if (val & MAX6639_GCONFIG_PWM_FREQ_HI) + i |= 0x4; + i &= 0x7; + *pwm_val = freq_table[i]; - return sprintf(buf, "%d\n", FAN_FROM_REG(val, data->rpm_range)); + mutex_unlock(&data->update_lock); + return 0; + default: + return -EOPNOTSUPP; + } } -static ssize_t alarm_show(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static int max6639_write_pwm(struct device *dev, u32 attr, int channel, + long val) { struct max6639_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); - unsigned int val; + int err; + u8 i; + + switch (attr) { + case hwmon_pwm_input: + if (val < 0 || val > 255) + return -EINVAL; + err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(channel), + val * 120 / 255); + return err; + case hwmon_pwm_freq: + val = clamp_val(val, 0, 25000); + + i = find_closest(val, freq_table, ARRAY_SIZE(freq_table)); + + mutex_lock(&data->update_lock); + err = regmap_update_bits(data->regmap, MAX6639_REG_FAN_CONFIG3(channel), + MAX6639_FAN_CONFIG3_FREQ_MASK, i); + if (err < 0) { + mutex_unlock(&data->update_lock); + return err; + } + + if (i >> 2) + err = regmap_set_bits(data->regmap, MAX6639_REG_GCONFIG, + MAX6639_GCONFIG_PWM_FREQ_HI); + else + err = regmap_clear_bits(data->regmap, MAX6639_REG_GCONFIG, + MAX6639_GCONFIG_PWM_FREQ_HI); + + mutex_unlock(&data->update_lock); + return err; + default: + return -EOPNOTSUPP; + } +} + +static umode_t max6639_pwm_is_visible(const void *_data, u32 attr, int channel) +{ + switch (attr) { + case hwmon_pwm_input: + case hwmon_pwm_freq: + return 0644; + default: + return 0; + } +} + +static int max6639_read_temp(struct device *dev, u32 attr, int channel, + long *val) +{ + unsigned int status; int res; - res = regmap_read(data->regmap, MAX6639_REG_STATUS, &val); - if (res < 0) + switch (attr) { + case hwmon_temp_input: + res = max6639_temp_read_input(dev, channel, val); return res; + case hwmon_temp_fault: + res = max6639_temp_read_fault(dev, channel, val); + return res; + case hwmon_temp_max: + res = max6639_temp_read_max(dev, channel, val); + return res; + case hwmon_temp_crit: + res = max6639_temp_read_crit(dev, channel, val); + return res; + case hwmon_temp_emergency: + res = max6639_temp_read_emergency(dev, channel, val); + return res; + case hwmon_temp_max_alarm: + res = max6639_get_status(dev, &status); + if (res < 0) + return res; + *val = !!(status & BIT(3 - channel)); + return 0; + case hwmon_temp_crit_alarm: + res = max6639_get_status(dev, &status); + if (res < 0) + return res; + *val = !!(status & BIT(7 - channel)); + return 0; + case hwmon_temp_emergency_alarm: + res = max6639_get_status(dev, &status); + if (res < 0) + return res; + *val = !!(status & BIT(5 - channel)); + return 0; + default: + return -EOPNOTSUPP; + } +} - return sprintf(buf, "%d\n", !!(val & (1 << attr->index))); +static int max6639_write_temp(struct device *dev, u32 attr, int channel, + long val) +{ + struct max6639_data *data = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_temp_max: + return max6639_temp_set_max(data, channel, val); + case hwmon_temp_crit: + return max6639_temp_set_crit(data, channel, val); + case hwmon_temp_emergency: + return max6639_temp_set_emergency(data, channel, val); + default: + return -EOPNOTSUPP; + } } -static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0); -static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1); -static SENSOR_DEVICE_ATTR_RO(temp1_fault, temp_fault, 0); -static SENSOR_DEVICE_ATTR_RO(temp2_fault, temp_fault, 1); -static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0); -static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1); -static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0); -static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1); -static SENSOR_DEVICE_ATTR_RW(temp1_emergency, temp_emergency, 0); -static SENSOR_DEVICE_ATTR_RW(temp2_emergency, temp_emergency, 1); -static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0); -static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1); -static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0); -static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1); -static SENSOR_DEVICE_ATTR_RO(fan1_fault, alarm, 1); -static SENSOR_DEVICE_ATTR_RO(fan2_fault, alarm, 0); -static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 3); -static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 2); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 7); -static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 6); -static SENSOR_DEVICE_ATTR_RO(temp1_emergency_alarm, alarm, 5); -static SENSOR_DEVICE_ATTR_RO(temp2_emergency_alarm, alarm, 4); - - -static struct attribute *max6639_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp1_fault.dev_attr.attr, - &sensor_dev_attr_temp2_fault.dev_attr.attr, - &sensor_dev_attr_temp1_max.dev_attr.attr, - &sensor_dev_attr_temp2_max.dev_attr.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp2_crit.dev_attr.attr, - &sensor_dev_attr_temp1_emergency.dev_attr.attr, - &sensor_dev_attr_temp2_emergency.dev_attr.attr, - &sensor_dev_attr_pwm1.dev_attr.attr, - &sensor_dev_attr_pwm2.dev_attr.attr, - &sensor_dev_attr_fan1_input.dev_attr.attr, - &sensor_dev_attr_fan2_input.dev_attr.attr, - &sensor_dev_attr_fan1_fault.dev_attr.attr, - &sensor_dev_attr_fan2_fault.dev_attr.attr, - &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr, +static umode_t max6639_temp_is_visible(const void *_data, u32 attr, int channel) +{ + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_fault: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_emergency_alarm: + return 0444; + case hwmon_temp_max: + case hwmon_temp_crit: + case hwmon_temp_emergency: + return 0644; + default: + return 0; + } +} + +static int max6639_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_fan: + return max6639_read_fan(dev, attr, channel, val); + case hwmon_pwm: + return max6639_read_pwm(dev, attr, channel, val); + case hwmon_temp: + return max6639_read_temp(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} + +static int max6639_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + switch (type) { + case hwmon_fan: + return max6639_write_fan(dev, attr, channel, val); + case hwmon_pwm: + return max6639_write_pwm(dev, attr, channel, val); + case hwmon_temp: + return max6639_write_temp(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} + +static umode_t max6639_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_fan: + return max6639_fan_is_visible(data, attr, channel); + case hwmon_pwm: + return max6639_pwm_is_visible(data, attr, channel); + case hwmon_temp: + return max6639_temp_is_visible(data, attr, channel); + default: + return 0; + } +} + +static const struct hwmon_channel_info * const max6639_info[] = { + HWMON_CHANNEL_INFO(fan, + HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_PULSES, + HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_PULSES), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_INPUT | HWMON_PWM_FREQ, + HWMON_PWM_INPUT | HWMON_PWM_FREQ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_MAX | HWMON_T_MAX_ALARM | + HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY | + HWMON_T_EMERGENCY_ALARM, + HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_MAX | HWMON_T_MAX_ALARM | + HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY | + HWMON_T_EMERGENCY_ALARM), NULL }; -ATTRIBUTE_GROUPS(max6639); + +static const struct hwmon_ops max6639_hwmon_ops = { + .is_visible = max6639_is_visible, + .read = max6639_read, + .write = max6639_write, +}; + +static const struct hwmon_chip_info max6639_chip_info = { + .ops = &max6639_hwmon_ops, + .info = max6639_info, +}; /* * returns respective index in rpm_ranges table @@ -355,11 +551,6 @@ static int rpm_range_to_reg(int range) return 1; /* default: 4000 RPM */ } -static int max6639_set_ppr(struct max6639_data *data, u8 channel, u8 ppr) -{ - return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), ppr << 6); -} - static int max6639_init_client(struct i2c_client *client, struct max6639_data *data) { @@ -380,30 +571,34 @@ static int max6639_init_client(struct i2c_client *client, ppr = max6639_info->ppr; else ppr = 2; - ppr -= 1; + + data->ppr[0] = ppr; + data->ppr[1] = ppr; if (max6639_info) rpm_range = rpm_range_to_reg(max6639_info->rpm_range); - data->rpm_range = rpm_range; + data->rpm_range[0] = rpm_range; + data->rpm_range[1] = rpm_range; for (i = 0; i < MAX6639_NUM_CHANNELS; i++) { - /* Set Fan pulse per revolution */ - err = max6639_set_ppr(data, i, ppr); + err = max6639_set_ppr(data, i, data->ppr[i]); if (err) return err; /* Fans config PWM, RPM */ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG1(i), - MAX6639_FAN_CONFIG1_PWM | rpm_range); + MAX6639_FAN_CONFIG1_PWM | data->rpm_range[i]); if (err) return err; /* Fans PWM polarity high by default */ - if (max6639_info && max6639_info->pwm_polarity == 0) - err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00); - else - err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02); + if (max6639_info) { + if (max6639_info->pwm_polarity == 0) + err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00); + else + err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02); + } if (err) return err; @@ -529,14 +724,17 @@ static int max6639_probe(struct i2c_client *client) } } + mutex_init(&data->update_lock); + /* Initialize the max6639 chip */ err = max6639_init_client(client, data); if (err < 0) return err; - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, - max6639_groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, &max6639_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); } diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c deleted file mode 100644 index 9302ab233910..000000000000 --- a/drivers/hwmon/max6642.c +++ /dev/null @@ -1,314 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Driver for +/-1 degree C, SMBus-Compatible Remote/Local Temperature Sensor - * with Overtemperature Alarm - * - * Copyright (C) 2011 AppearTV AS - * - * Derived from: - * - * Based on the max1619 driver. - * Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net> - * Jean Delvare <jdelvare@suse.de> - * - * The MAX6642 is a sensor chip made by Maxim. - * It reports up to two temperatures (its own plus up to - * one external one). Complete datasheet can be - * obtained from Maxim's website at: - * http://datasheets.maxim-ic.com/en/ds/MAX6642.pdf - */ - - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/jiffies.h> -#include <linux/i2c.h> -#include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> -#include <linux/err.h> -#include <linux/mutex.h> -#include <linux/sysfs.h> - -static const unsigned short normal_i2c[] = { - 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; - -/* - * The MAX6642 registers - */ - -#define MAX6642_REG_R_MAN_ID 0xFE -#define MAX6642_REG_R_CONFIG 0x03 -#define MAX6642_REG_W_CONFIG 0x09 -#define MAX6642_REG_R_STATUS 0x02 -#define MAX6642_REG_R_LOCAL_TEMP 0x00 -#define MAX6642_REG_R_LOCAL_TEMPL 0x11 -#define MAX6642_REG_R_LOCAL_HIGH 0x05 -#define MAX6642_REG_W_LOCAL_HIGH 0x0B -#define MAX6642_REG_R_REMOTE_TEMP 0x01 -#define MAX6642_REG_R_REMOTE_TEMPL 0x10 -#define MAX6642_REG_R_REMOTE_HIGH 0x07 -#define MAX6642_REG_W_REMOTE_HIGH 0x0D - -/* - * Conversions - */ - -static int temp_from_reg10(int val) -{ - return val * 250; -} - -static int temp_from_reg(int val) -{ - return val * 1000; -} - -static int temp_to_reg(int val) -{ - return val / 1000; -} - -/* - * Client data (each client gets its own) - */ - -struct max6642_data { - struct i2c_client *client; - struct mutex update_lock; - bool valid; /* zero until following fields are valid */ - unsigned long last_updated; /* in jiffies */ - - /* registers values */ - u16 temp_input[2]; /* local/remote */ - u16 temp_high[2]; /* local/remote */ - u8 alarms; -}; - -/* - * Real code - */ - -static void max6642_init_client(struct max6642_data *data, - struct i2c_client *client) -{ - u8 config; - - /* - * Start the conversions. - */ - config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG); - if (config & 0x40) - i2c_smbus_write_byte_data(client, MAX6642_REG_W_CONFIG, - config & 0xBF); /* run */ - - data->temp_high[0] = i2c_smbus_read_byte_data(client, - MAX6642_REG_R_LOCAL_HIGH); - data->temp_high[1] = i2c_smbus_read_byte_data(client, - MAX6642_REG_R_REMOTE_HIGH); -} - -/* Return 0 if detection is successful, -ENODEV otherwise */ -static int max6642_detect(struct i2c_client *client, - struct i2c_board_info *info) -{ - struct i2c_adapter *adapter = client->adapter; - u8 reg_config, reg_status, man_id; - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; - - /* identification */ - man_id = i2c_smbus_read_byte_data(client, MAX6642_REG_R_MAN_ID); - if (man_id != 0x4D) - return -ENODEV; - - /* sanity check */ - if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D - || i2c_smbus_read_byte_data(client, 0x06) != 0x4D - || i2c_smbus_read_byte_data(client, 0xff) != 0x4D) - return -ENODEV; - - /* - * We read the config and status register, the 4 lower bits in the - * config register should be zero and bit 5, 3, 1 and 0 should be - * zero in the status register. - */ - reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG); - if ((reg_config & 0x0f) != 0x00) - return -ENODEV; - - /* in between, another round of sanity checks */ - if (i2c_smbus_read_byte_data(client, 0x04) != reg_config - || i2c_smbus_read_byte_data(client, 0x06) != reg_config - || i2c_smbus_read_byte_data(client, 0xff) != reg_config) - return -ENODEV; - - reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS); - if ((reg_status & 0x2b) != 0x00) - return -ENODEV; - - strscpy(info->type, "max6642", I2C_NAME_SIZE); - - return 0; -} - -static struct max6642_data *max6642_update_device(struct device *dev) -{ - struct max6642_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - u16 val, tmp; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { - dev_dbg(dev, "Updating max6642 data.\n"); - val = i2c_smbus_read_byte_data(client, - MAX6642_REG_R_LOCAL_TEMPL); - tmp = (val >> 6) & 3; - val = i2c_smbus_read_byte_data(client, - MAX6642_REG_R_LOCAL_TEMP); - val = (val << 2) | tmp; - data->temp_input[0] = val; - val = i2c_smbus_read_byte_data(client, - MAX6642_REG_R_REMOTE_TEMPL); - tmp = (val >> 6) & 3; - val = i2c_smbus_read_byte_data(client, - MAX6642_REG_R_REMOTE_TEMP); - val = (val << 2) | tmp; - data->temp_input[1] = val; - data->alarms = i2c_smbus_read_byte_data(client, - MAX6642_REG_R_STATUS); - - data->last_updated = jiffies; - data->valid = true; - } - - mutex_unlock(&data->update_lock); - - return data; -} - -/* - * Sysfs stuff - */ - -static ssize_t temp_max10_show(struct device *dev, - struct device_attribute *dev_attr, char *buf) -{ - struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); - struct max6642_data *data = max6642_update_device(dev); - - return sprintf(buf, "%d\n", - temp_from_reg10(data->temp_input[attr->index])); -} - -static ssize_t temp_max_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr); - struct max6642_data *data = max6642_update_device(dev); - - return sprintf(buf, "%d\n", temp_from_reg(data->temp_high[attr2->nr])); -} - -static ssize_t temp_max_store(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr); - struct max6642_data *data = dev_get_drvdata(dev); - unsigned long val; - int err; - - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - mutex_lock(&data->update_lock); - data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255); - i2c_smbus_write_byte_data(data->client, attr2->index, - data->temp_high[attr2->nr]); - mutex_unlock(&data->update_lock); - return count; -} - -static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - int bitnr = to_sensor_dev_attr(attr)->index; - struct max6642_data *data = max6642_update_device(dev); - return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1); -} - -static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_max10, 0); -static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_max10, 1); -static SENSOR_DEVICE_ATTR_2_RW(temp1_max, temp_max, 0, - MAX6642_REG_W_LOCAL_HIGH); -static SENSOR_DEVICE_ATTR_2_RW(temp2_max, temp_max, 1, - MAX6642_REG_W_REMOTE_HIGH); -static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 2); -static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6); -static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 4); - -static struct attribute *max6642_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp1_max.dev_attr.attr, - &sensor_dev_attr_temp2_max.dev_attr.attr, - - &sensor_dev_attr_temp2_fault.dev_attr.attr, - &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, - NULL -}; -ATTRIBUTE_GROUPS(max6642); - -static int max6642_probe(struct i2c_client *client) -{ - struct device *dev = &client->dev; - struct max6642_data *data; - struct device *hwmon_dev; - - data = devm_kzalloc(dev, sizeof(struct max6642_data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->client = client; - mutex_init(&data->update_lock); - - /* Initialize the MAX6642 chip */ - max6642_init_client(data, client); - - hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, - client->name, data, - max6642_groups); - return PTR_ERR_OR_ZERO(hwmon_dev); -} - -/* - * Driver data (common to all clients) - */ - -static const struct i2c_device_id max6642_id[] = { - { "max6642" }, - { } -}; -MODULE_DEVICE_TABLE(i2c, max6642_id); - -static struct i2c_driver max6642_driver = { - .class = I2C_CLASS_HWMON, - .driver = { - .name = "max6642", - }, - .probe = max6642_probe, - .id_table = max6642_id, - .detect = max6642_detect, - .address_list = normal_i2c, -}; - -module_i2c_driver(max6642_driver); - -MODULE_AUTHOR("Per Dalen <per.dalen@appeartv.com>"); -MODULE_DESCRIPTION("MAX6642 sensor driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index d161ba0e7813..20981f9443dd 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c @@ -311,6 +311,7 @@ static ssize_t temp_store(struct device *dev, return ret; mutex_lock(&data->update_lock); + temp = clamp_val(temp, -1000000, 1000000); /* prevent underflow */ temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset; temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127); data->temp[nr][index] = temp; @@ -428,14 +429,14 @@ static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, alarm, 20); static SENSOR_DEVICE_ATTR_RO(temp7_max_alarm, alarm, 21); static SENSOR_DEVICE_ATTR_RO(temp8_max_alarm, alarm, 23); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 14); +static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 15); static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8); static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 9); static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 10); static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, alarm, 11); static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, alarm, 12); static SENSOR_DEVICE_ATTR_RO(temp7_crit_alarm, alarm, 13); -static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 15); +static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 14); static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 1); static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2); @@ -684,8 +685,6 @@ done: return 0; } -static const struct i2c_device_id max6697_id[]; - static int max6697_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; @@ -701,10 +700,7 @@ static int max6697_probe(struct i2c_client *client) if (!data) return -ENOMEM; - if (client->dev.of_node) - data->type = (uintptr_t)of_device_get_match_data(&client->dev); - else - data->type = i2c_match_id(max6697_id, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); data->chip = &max6697_chip_data[data->type]; data->client = client; mutex_init(&data->update_lock); diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c index 9814eaf24564..bcddf6804d3a 100644 --- a/drivers/hwmon/mcp3021.c +++ b/drivers/hwmon/mcp3021.c @@ -116,13 +116,12 @@ static const struct hwmon_chip_info mcp3021_chip_info = { .info = mcp3021_info, }; -static const struct i2c_device_id mcp3021_id[]; - static int mcp3021_probe(struct i2c_client *client) { struct mcp3021_data *data = NULL; struct device_node *np = client->dev.of_node; struct device *hwmon_dev; + enum chips type; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; @@ -149,7 +148,8 @@ static int mcp3021_probe(struct i2c_client *client) data->vdd = MCP3021_VDD_REF_DEFAULT; } - switch (i2c_match_id(mcp3021_id, client)->driver_data) { + type = (uintptr_t)i2c_get_match_data(client); + switch (type) { case mcp3021: data->sar_shift = MCP3021_SAR_SHIFT; data->sar_mask = MCP3021_SAR_MASK; diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c index 50a8b9c3f94d..7848198f8996 100644 --- a/drivers/hwmon/mr75203.c +++ b/drivers/hwmon/mr75203.c @@ -925,4 +925,5 @@ static struct platform_driver moortec_pvt_driver = { }; module_platform_driver(moortec_pvt_driver); +MODULE_DESCRIPTION("Moortec Semiconductor MR75203 PVT Controller driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c index 0d016fedb9c2..f71615e06a8f 100644 --- a/drivers/hwmon/nct6683.c +++ b/drivers/hwmon/nct6683.c @@ -1236,6 +1236,8 @@ static int nct6683_probe(struct platform_device *pdev) default: if (!force) return -ENODEV; + dev_warn(dev, "Enabling support for unknown customer ID 0x%04x\n", data->customer_id); + break; } nct6683_init_device(data); diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c index 9fbab8f02334..934fed3dd586 100644 --- a/drivers/hwmon/nct6775-core.c +++ b/drivers/hwmon/nct6775-core.c @@ -2262,7 +2262,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); mutex_lock(&data->update_lock); data->temp_offset[nr] = val; diff --git a/drivers/hwmon/nct6775.h b/drivers/hwmon/nct6775.h index d31e7a030216..296eff99d003 100644 --- a/drivers/hwmon/nct6775.h +++ b/drivers/hwmon/nct6775.h @@ -4,7 +4,7 @@ #include <linux/types.h> -enum kinds { nct6106 = 1, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792, +enum kinds { nct6106, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793, nct6795, nct6796, nct6797, nct6798, nct6799 }; enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c index 7aa586eb74be..df6fa72a6b59 100644 --- a/drivers/hwmon/nzxt-smart2.c +++ b/drivers/hwmon/nzxt-smart2.c @@ -799,6 +799,7 @@ static const struct hid_device_id nzxt_smart2_hid_id_table[] = { { HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */ { HID_USB_DEVICE(0x1e71, 0x2011) }, /* NZXT RGB & Fan Controller (6 RGB) */ { HID_USB_DEVICE(0x1e71, 0x2019) }, /* NZXT RGB & Fan Controller (6 RGB) */ + { HID_USB_DEVICE(0x1e71, 0x2020) }, /* NZXT RGB & Fan Controller (6 RGB) */ {}, }; diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index 08e82c457356..a4f02cad92fd 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -337,6 +337,15 @@ config SENSORS_MP2888 This driver can also be built as a module. If so, the module will be called mp2888. +config SENSORS_MP2891 + tristate "MPS MP2891" + help + If you say yes here you get hardware monitoring support for MPS + MP2891 Dual Loop Digital Multi-Phase Controller. + + This driver can also be built as a module. If so, the module will + be called mp2891. + config SENSORS_MP2975 tristate "MPS MP2975" help @@ -346,6 +355,15 @@ config SENSORS_MP2975 This driver can also be built as a module. If so, the module will be called mp2975. +config SENSORS_MP2993 + tristate "MPS MP2993" + help + If you say yes here you get hardware monitoring support for MPS + MP2993 Dual Loop Digital Multi-Phase Controller. + + This driver can also be built as a module. If so, the module will + be called mp2993. + config SENSORS_MP2975_REGULATOR depends on SENSORS_MP2975 && REGULATOR bool "Regulator support for MPS MP2975" @@ -362,6 +380,15 @@ config SENSORS_MP5023 This driver can also be built as a module. If so, the module will be called mp5023. +config SENSORS_MP5920 + tristate "MPS MP5920" + help + If you say yes here you get hardware monitoring support for Monolithic + MP5920. + + This driver can also be built as a module. If so, the module will + be called mp5920. + config SENSORS_MP5990 tristate "MPS MP5990" help @@ -371,6 +398,15 @@ config SENSORS_MP5990 This driver can also be built as a module. If so, the module will be called mp5990. +config SENSORS_MP9941 + tristate "MPS MP9941" + help + If you say yes here you get hardware monitoring support for MPS + MP9941. + + This driver can also be built as a module. If so, the module will + be called mp9941. + config SENSORS_MPQ7932_REGULATOR bool "Regulator support for MPQ7932" depends on SENSORS_MPQ7932 && REGULATOR diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index 2279b3327bbf..d00bcc758b97 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -36,9 +36,13 @@ obj-$(CONFIG_SENSORS_MAX34440) += max34440.o obj-$(CONFIG_SENSORS_MAX8688) += max8688.o obj-$(CONFIG_SENSORS_MP2856) += mp2856.o obj-$(CONFIG_SENSORS_MP2888) += mp2888.o +obj-$(CONFIG_SENSORS_MP2891) += mp2891.o obj-$(CONFIG_SENSORS_MP2975) += mp2975.o +obj-$(CONFIG_SENSORS_MP2993) += mp2993.o obj-$(CONFIG_SENSORS_MP5023) += mp5023.o +obj-$(CONFIG_SENSORS_MP5920) += mp5920.o obj-$(CONFIG_SENSORS_MP5990) += mp5990.o +obj-$(CONFIG_SENSORS_MP9941) += mp9941.o obj-$(CONFIG_SENSORS_MPQ7932) += mpq7932.o obj-$(CONFIG_SENSORS_MPQ8785) += mpq8785.o obj-$(CONFIG_SENSORS_PLI1209BC) += pli1209bc.o diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c index cfffa4cdc0df..c36c124d1a2d 100644 --- a/drivers/hwmon/pmbus/lm25066.c +++ b/drivers/hwmon/pmbus/lm25066.c @@ -17,7 +17,7 @@ #include <linux/of.h> #include "pmbus.h" -enum chips { lm25056 = 1, lm25066, lm5064, lm5066, lm5066i }; +enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i }; #define LM25066_READ_VAUX 0xd0 #define LM25066_MFR_READ_IIN 0xd1 diff --git a/drivers/hwmon/pmbus/ltc4286.c b/drivers/hwmon/pmbus/ltc4286.c index 9e7ceeb7e789..aabd0bcdfeee 100644 --- a/drivers/hwmon/pmbus/ltc4286.c +++ b/drivers/hwmon/pmbus/ltc4286.c @@ -58,8 +58,8 @@ static struct pmbus_driver_info ltc4286_info = { }; static const struct i2c_device_id ltc4286_id[] = { - { "ltc4286", 0 }, - { "ltc4287", 1 }, + { "ltc4286", }, + { "ltc4287", }, {} }; MODULE_DEVICE_TABLE(i2c, ltc4286_id); diff --git a/drivers/hwmon/pmbus/mp2856.c b/drivers/hwmon/pmbus/mp2856.c index 6969350f5d7d..41bb86667091 100644 --- a/drivers/hwmon/pmbus/mp2856.c +++ b/drivers/hwmon/pmbus/mp2856.c @@ -46,7 +46,7 @@ #define MP2856_PAGE_NUM 2 -enum chips { mp2856 = 1, mp2857 }; +enum chips { mp2856, mp2857 }; static const int mp2856_max_phases[][MP2856_PAGE_NUM] = { [mp2856] = { MP2856_MAX_PHASE_RAIL1, MP2856_MAX_PHASE_RAIL2 }, @@ -66,7 +66,6 @@ struct mp2856_data { int vout_format[MP2856_PAGE_NUM]; int curr_sense_gain[MP2856_PAGE_NUM]; int max_phases[MP2856_PAGE_NUM]; - enum chips chip_id; }; #define to_mp2856_data(x) container_of(x, struct mp2856_data, info) @@ -397,6 +396,7 @@ static int mp2856_probe(struct i2c_client *client) { struct pmbus_driver_info *info; struct mp2856_data *data; + enum chips chip_id; int ret; data = devm_kzalloc(&client->dev, sizeof(struct mp2856_data), @@ -404,9 +404,9 @@ static int mp2856_probe(struct i2c_client *client) if (!data) return -ENOMEM; - data->chip_id = (enum chips)(uintptr_t)i2c_get_match_data(client); + chip_id = (kernel_ulong_t)i2c_get_match_data(client); - memcpy(data->max_phases, mp2856_max_phases[data->chip_id], + memcpy(data->max_phases, mp2856_max_phases[chip_id], sizeof(data->max_phases)); memcpy(&data->info, &mp2856_info, sizeof(*info)); diff --git a/drivers/hwmon/pmbus/mp2891.c b/drivers/hwmon/pmbus/mp2891.c new file mode 100644 index 000000000000..bb28b15a9103 --- /dev/null +++ b/drivers/hwmon/pmbus/mp2891.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP2891) + */ + +#include <linux/bitfield.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include "pmbus.h" + +/* + * Vender specific registers, the register MFR_SVI3_IOUT_PRT(0x65), + * MFR_VOUT_LOOP_CTRL(0xBD), READ_PIN_EST(0x94)and READ_IIN_EST(0x95) + * redefine the standard PMBUS register. The MFR_SVI3_IOUT_PRT(0x65) + * is used to identify the iout scale and the MFR_VOUT_LOOP_CTRL(0xBD) + * is used to identify the vout scale. The READ_PIN_EST(0x94) is used + * to read input power per rail. The MP2891 does not have standard + * READ_IIN register(0x89), the iin telemetry can be obtained through + * the vendor redefined register READ_IIN_EST(0x95). + */ +#define MFR_VOUT_LOOP_CTRL 0xBD +#define READ_PIN_EST 0x94 +#define READ_IIN_EST 0x95 +#define MFR_SVI3_IOUT_PRT 0x65 + +#define MP2891_TEMP_LIMIT_OFFSET 40 +#define MP2891_PIN_LIMIT_UINT 2 +#define MP2891_IOUT_LIMIT_UINT 8 +#define MP2891_IOUT_SCALE_DIV 32 +#define MP2891_VOUT_SCALE_DIV 100 +#define MP2891_OVUV_DELTA_SCALE 50 +#define MP2891_OV_LIMIT_SCALE 20 +#define MP2891_UV_LIMIT_SCALE 5 + +#define MP2891_PAGE_NUM 2 + +#define MP2891_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \ + PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP | \ + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | \ + PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_VOUT | \ + PMBUS_HAVE_STATUS_IOUT | \ + PMBUS_HAVE_STATUS_INPUT | \ + PMBUS_HAVE_STATUS_TEMP) + +#define MP2891_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | \ + PMBUS_HAVE_TEMP | PMBUS_HAVE_POUT | \ + PMBUS_HAVE_PIN | PMBUS_HAVE_IIN | \ + PMBUS_HAVE_STATUS_VOUT | \ + PMBUS_HAVE_STATUS_IOUT | \ + PMBUS_HAVE_STATUS_INPUT | \ + PMBUS_HAVE_STATUS_TEMP) + +struct mp2891_data { + struct pmbus_driver_info info; + int vout_scale[MP2891_PAGE_NUM]; + int iout_scale[MP2891_PAGE_NUM]; +}; + +#define to_mp2891_data(x) container_of(x, struct mp2891_data, info) + +/* Converts a LINEAR11 value to DIRECT format */ +static u16 mp2891_reg2data_linear11(u16 word) +{ + s16 exponent; + s32 mantissa; + s64 val; + + exponent = ((s16)word) >> 11; + mantissa = ((s16)((word & 0x7ff) << 5)) >> 5; + val = mantissa; + + if (exponent >= 0) + val <<= exponent; + else + val >>= -exponent; + + return val; +} + +static int +mp2891_identify_vout_scale(struct i2c_client *client, struct pmbus_driver_info *info, + int page) +{ + struct mp2891_data *data = to_mp2891_data(info); + int ret; + + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_word_data(client, MFR_VOUT_LOOP_CTRL); + if (ret < 0) + return ret; + + /* + * The output voltage is equal to the READ_VOUT(0x8B) register value multiplied + * by vout_scale. + * Obtain vout scale from the register MFR_VOUT_LOOP_CTRL, bits 15-14,bit 13. + * If MFR_VOUT_LOOP_CTRL[13] = 1, the vout scale is below: + * 2.5mV/LSB + * If MFR_VOUT_LOOP_CTRL[13] = 0, the vout scale is decided by + * MFR_VOUT_LOOP_CTRL[15:14]: + * 00b - 6.25mV/LSB, 01b - 5mV/LSB, 10b - 2mV/LSB, 11b - 1mV + */ + if (ret & GENMASK(13, 13)) { + data->vout_scale[page] = 250; + } else { + ret = FIELD_GET(GENMASK(15, 14), ret); + if (ret == 0) + data->vout_scale[page] = 625; + else if (ret == 1) + data->vout_scale[page] = 500; + else if (ret == 2) + data->vout_scale[page] = 200; + else + data->vout_scale[page] = 100; + } + + return 0; +} + +static int +mp2891_identify_iout_scale(struct i2c_client *client, struct pmbus_driver_info *info, + int page) +{ + struct mp2891_data *data = to_mp2891_data(info); + int ret; + + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_word_data(client, MFR_SVI3_IOUT_PRT); + if (ret < 0) + return ret; + + /* + * The output current is equal to the READ_IOUT(0x8C) register value + * multiplied by iout_scale. + * Obtain iout_scale from the register MFR_SVI3_IOUT_PRT[2:0]. + * The value is selected as below: + * 000b - 1A/LSB, 001b - (1/32)A/LSB, 010b - (1/16)A/LSB, + * 011b - (1/8)A/LSB, 100b - (1/4)A/LSB, 101b - (1/2)A/LSB + * 110b - 1A/LSB, 111b - 2A/LSB + */ + switch (ret & GENMASK(2, 0)) { + case 0: + case 6: + data->iout_scale[page] = 32; + break; + case 1: + data->iout_scale[page] = 1; + break; + case 2: + data->iout_scale[page] = 2; + break; + case 3: + data->iout_scale[page] = 4; + break; + case 4: + data->iout_scale[page] = 8; + break; + case 5: + data->iout_scale[page] = 16; + break; + default: + data->iout_scale[page] = 64; + break; + } + + return 0; +} + +static int mp2891_identify(struct i2c_client *client, struct pmbus_driver_info *info) +{ + int ret; + + /* Identify vout scale for rail 1. */ + ret = mp2891_identify_vout_scale(client, info, 0); + if (ret < 0) + return ret; + + /* Identify vout scale for rail 2. */ + ret = mp2891_identify_vout_scale(client, info, 1); + if (ret < 0) + return ret; + + /* Identify iout scale for rail 1. */ + ret = mp2891_identify_iout_scale(client, info, 0); + if (ret < 0) + return ret; + + /* Identify iout scale for rail 2. */ + return mp2891_identify_iout_scale(client, info, 1); +} + +static int mp2891_read_byte_data(struct i2c_client *client, int page, int reg) +{ + int ret; + + switch (reg) { + case PMBUS_VOUT_MODE: + /* + * The MP2891 does not follow standard PMBus protocol completely, the + * PMBUS_VOUT_MODE(0x20) in MP2891 is reserved and 0x00 is always + * returned when the register is read. But the calculation of vout in + * this driver is based on direct format. As a result, the format of + * vout is enforced to direct. + */ + ret = PB_VOUT_MODE_DIRECT; + break; + default: + ret = -ENODATA; + break; + } + + return ret; +} + +static int mp2891_read_word_data(struct i2c_client *client, int page, + int phase, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct mp2891_data *data = to_mp2891_data(info); + int ret; + + switch (reg) { + case PMBUS_READ_VIN: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = ret & GENMASK(9, 0); + break; + case PMBUS_READ_IIN: + /* + * The MP2891 does not have standard PMBUS_READ_IIN register(0x89), + * the iin telemetry can be obtained through the vender redefined + * register READ_IIN_EST(0x95). The MP2891 PMBUS_READ_IIN register + * is linear11 format, But the pout scale is set to 1A/Lsb(using + * r/m/b scale). As a result, the iin read from MP2891 should be + * calculated to A, then return the result to pmbus core. + */ + ret = pmbus_read_word_data(client, page, phase, READ_IIN_EST); + if (ret < 0) + return ret; + + ret = mp2891_reg2data_linear11(ret); + break; + case PMBUS_READ_PIN: + /* + * The MP2891 has standard PMBUS_READ_PIN register(0x97), but this + * is not used to read the input power per rail. The input power + * per rail is read through the vender redefined register + * READ_PIN_EST(0x94). The MP2891 PMBUS_READ_PIN register is linear11 + * format, But the pout scale is set to 1W/Lsb(using r/m/b scale). + * As a result, the pin read from MP2891 should be calculated to W, + * then return the result to pmbus core. + */ + ret = pmbus_read_word_data(client, page, phase, READ_PIN_EST); + if (ret < 0) + return ret; + + ret = mp2891_reg2data_linear11(ret); + break; + case PMBUS_READ_POUT: + /* + * The MP2891 PMBUS_READ_POUT register is linear11 format, and the + * exponent is not a constant value. But the pout scale is set to + * 1W/Lsb(using r/m/b scale). As a result, the pout read from MP2891 + * should be calculated to W, then return the result to pmbus core. + */ + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = mp2891_reg2data_linear11(ret); + break; + case PMBUS_READ_VOUT: + case PMBUS_VOUT_UV_WARN_LIMIT: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST(ret * data->vout_scale[page], MP2891_VOUT_SCALE_DIV); + break; + case PMBUS_READ_IOUT: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST((ret & GENMASK(10, 0)) * data->iout_scale[page], + MP2891_IOUT_SCALE_DIV); + break; + case PMBUS_OT_FAULT_LIMIT: + case PMBUS_OT_WARN_LIMIT: + /* + * The scale of MP2891 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT + * is 1°C/LSB and they have 40°C offset. + */ + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = (ret & GENMASK(7, 0)) - MP2891_TEMP_LIMIT_OFFSET; + break; + case PMBUS_VIN_OV_FAULT_LIMIT: + /* + * The MP2891 PMBUS_VIN_OV_FAULT_LIMIT scale is 125mV/Lsb. + * but the vin scale is set to 31.25mV/Lsb(using r/m/b scale). + * As a result, the limit value should be multiplied by 4. + */ + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = (ret & GENMASK(7, 0)) * 4; + break; + case PMBUS_VOUT_UV_FAULT_LIMIT: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + if (FIELD_GET(GENMASK(11, 8), ret)) + ret = FIELD_GET(GENMASK(7, 0), ret) * MP2891_UV_LIMIT_SCALE - + (FIELD_GET(GENMASK(11, 8), ret) + 1) * MP2891_OVUV_DELTA_SCALE; + else + ret = FIELD_GET(GENMASK(7, 0), ret) * MP2891_UV_LIMIT_SCALE; + + ret = ret < 0 ? 0 : ret; + break; + case PMBUS_VOUT_OV_FAULT_LIMIT: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + if (FIELD_GET(GENMASK(11, 8), ret)) + ret = FIELD_GET(GENMASK(7, 0), ret) * MP2891_OV_LIMIT_SCALE + + (FIELD_GET(GENMASK(11, 8), ret) + 1) * MP2891_OVUV_DELTA_SCALE; + else + ret = FIELD_GET(GENMASK(7, 0), ret) * MP2891_OV_LIMIT_SCALE; + break; + case PMBUS_IOUT_OC_WARN_LIMIT: + case PMBUS_IOUT_OC_FAULT_LIMIT: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) * data->iout_scale[page] * + MP2891_IOUT_LIMIT_UINT, MP2891_IOUT_SCALE_DIV); + break; + case PMBUS_IIN_OC_WARN_LIMIT: + /* + * The scale of PMBUS_IIN_OC_WARN_LIMIT is 0.5A/Lsb, but the iin scale + * is set to 1A/Lsb(using r/m/b scale), so the word data should be + * divided by 2. + */ + ret = pmbus_read_word_data(client, 0, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST((ret & GENMASK(9, 0)), 2); + break; + case PMBUS_PIN_OP_WARN_LIMIT: + /* + * The scale of PMBUS_PIN_OP_WARN_LIMIT is 2W/Lsb, but the pin scale + * is set to 1W/Lsb(using r/m/b scale), so the word data should be + * multiplied by 2. + */ + ret = pmbus_read_word_data(client, 0, phase, reg); + if (ret < 0) + return ret; + + ret = (ret & GENMASK(9, 0)) * MP2891_PIN_LIMIT_UINT; + break; + case PMBUS_READ_TEMPERATURE_1: + case PMBUS_VIN_UV_FAULT_LIMIT: + case PMBUS_VIN_UV_WARN_LIMIT: + ret = -ENODATA; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mp2891_write_word_data(struct i2c_client *client, int page, int reg, + u16 word) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct mp2891_data *data = to_mp2891_data(info); + int ret; + + switch (reg) { + case PMBUS_VOUT_UV_WARN_LIMIT: + ret = pmbus_write_word_data(client, page, reg, + DIV_ROUND_CLOSEST(word * MP2891_VOUT_SCALE_DIV, + data->vout_scale[page])); + break; + case PMBUS_VOUT_UV_FAULT_LIMIT: + /* + * The PMBUS_VOUT_UV_FAULT_LIMIT[7:0] is the limit value, and bit8-bit15 + * should not be changed. + */ + ret = pmbus_read_word_data(client, page, 0xff, reg); + if (ret < 0) + return ret; + + if (FIELD_GET(GENMASK(11, 8), ret)) + ret = pmbus_write_word_data(client, page, reg, + (ret & ~GENMASK(7, 0)) | + FIELD_PREP(GENMASK(7, 0), + DIV_ROUND_CLOSEST(word + + (FIELD_GET(GENMASK(11, 8), ret) + 1) * + MP2891_OVUV_DELTA_SCALE, + MP2891_UV_LIMIT_SCALE))); + else + ret = pmbus_write_word_data(client, page, reg, + (ret & ~GENMASK(7, 0)) | + FIELD_PREP(GENMASK(7, 0), + DIV_ROUND_CLOSEST(word, + MP2891_UV_LIMIT_SCALE))); + break; + case PMBUS_VOUT_OV_FAULT_LIMIT: + /* + * The PMBUS_VOUT_OV_FAULT_LIMIT[7:0] is the limit value, and bit8-bit15 + * should not be changed. + */ + ret = pmbus_read_word_data(client, page, 0xff, reg); + if (ret < 0) + return ret; + + if (FIELD_GET(GENMASK(11, 8), ret)) + ret = pmbus_write_word_data(client, page, reg, + (ret & ~GENMASK(7, 0)) | + FIELD_PREP(GENMASK(7, 0), + DIV_ROUND_CLOSEST(word - + (FIELD_GET(GENMASK(11, 8), ret) + 1) * + MP2891_OVUV_DELTA_SCALE, + MP2891_OV_LIMIT_SCALE))); + else + ret = pmbus_write_word_data(client, page, reg, + (ret & ~GENMASK(7, 0)) | + FIELD_PREP(GENMASK(7, 0), + DIV_ROUND_CLOSEST(word, + MP2891_OV_LIMIT_SCALE))); + break; + case PMBUS_VIN_OV_FAULT_LIMIT: + /* + * The PMBUS_VIN_OV_FAULT_LIMIT[7:0] is the limit value, and bit8-bit15 + * should not be changed. The scale of PMBUS_VIN_OV_FAULT_LIMIT is 125mV/Lsb, + * but the vin scale is set to 31.25mV/Lsb(using r/m/b scale), so the word data + * should be divided by 4. + */ + ret = pmbus_read_word_data(client, page, 0xff, reg); + if (ret < 0) + return ret; + + ret = pmbus_write_word_data(client, page, reg, + (ret & ~GENMASK(7, 0)) | + FIELD_PREP(GENMASK(7, 0), + DIV_ROUND_CLOSEST(word, 4))); + break; + case PMBUS_OT_FAULT_LIMIT: + case PMBUS_OT_WARN_LIMIT: + /* + * The scale of MP2891 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT + * have 40°C offset. The bit0-bit7 is the limit value, and bit8-bit15 + * should not be changed. + */ + ret = pmbus_read_word_data(client, page, 0xff, reg); + if (ret < 0) + return ret; + + ret = pmbus_write_word_data(client, page, reg, + (ret & ~GENMASK(7, 0)) | + FIELD_PREP(GENMASK(7, 0), word + MP2891_TEMP_LIMIT_OFFSET)); + break; + case PMBUS_IOUT_OC_WARN_LIMIT: + case PMBUS_IOUT_OC_FAULT_LIMIT: + ret = pmbus_write_word_data(client, page, reg, + DIV_ROUND_CLOSEST(word * MP2891_IOUT_SCALE_DIV, + MP2891_IOUT_LIMIT_UINT * + data->iout_scale[page])); + break; + case PMBUS_IIN_OC_WARN_LIMIT: + /* + * The scale of PMBUS_IIN_OC_WARN_LIMIT is 0.5A/Lsb, but the iin scale + * is set to 1A/Lsb(using r/m/b scale), so the word data should be + * multiplied by 2. + */ + ret = pmbus_write_word_data(client, page, reg, word * 2); + break; + case PMBUS_PIN_OP_WARN_LIMIT: + /* + * The scale of PMBUS_PIN_OP_WARN_LIMIT is 2W/Lsb, but the pin scale + * is set to 1W/Lsb(using r/m/b scale), so the word data should be + * divided by 2. + */ + ret = pmbus_write_word_data(client, page, reg, + DIV_ROUND_CLOSEST(word, MP2891_PIN_LIMIT_UINT)); + break; + case PMBUS_VIN_UV_FAULT_LIMIT: + case PMBUS_VIN_UV_WARN_LIMIT: + ret = -ENODATA; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct pmbus_driver_info mp2891_info = { + .pages = MP2891_PAGE_NUM, + .format[PSC_VOLTAGE_IN] = direct, + .format[PSC_CURRENT_IN] = direct, + .format[PSC_CURRENT_OUT] = direct, + .format[PSC_TEMPERATURE] = direct, + .format[PSC_POWER] = direct, + .format[PSC_VOLTAGE_OUT] = direct, + + /* set vin scale 31.25mV/Lsb */ + .m[PSC_VOLTAGE_IN] = 32, + .R[PSC_VOLTAGE_IN] = 0, + .b[PSC_VOLTAGE_IN] = 0, + + /* set temp scale 1000m°C/Lsb */ + .m[PSC_TEMPERATURE] = 1, + .R[PSC_TEMPERATURE] = 0, + .b[PSC_TEMPERATURE] = 0, + + .m[PSC_CURRENT_IN] = 1, + .R[PSC_CURRENT_IN] = 0, + .b[PSC_CURRENT_IN] = 0, + + .m[PSC_CURRENT_OUT] = 1, + .R[PSC_CURRENT_OUT] = 0, + .b[PSC_CURRENT_OUT] = 0, + + .m[PSC_POWER] = 1, + .R[PSC_POWER] = 0, + .b[PSC_POWER] = 0, + + .m[PSC_VOLTAGE_OUT] = 1, + .R[PSC_VOLTAGE_OUT] = 3, + .b[PSC_VOLTAGE_OUT] = 0, + + .func[0] = MP2891_RAIL1_FUNC, + .func[1] = MP2891_RAIL2_FUNC, + .read_word_data = mp2891_read_word_data, + .write_word_data = mp2891_write_word_data, + .read_byte_data = mp2891_read_byte_data, + .identify = mp2891_identify, +}; + +static int mp2891_probe(struct i2c_client *client) +{ + struct mp2891_data *data; + + data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + memcpy(&data->info, &mp2891_info, sizeof(mp2891_info)); + + return pmbus_do_probe(client, &data->info); +} + +static const struct i2c_device_id mp2891_id[] = { + {"mp2891", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, mp2891_id); + +static const struct of_device_id __maybe_unused mp2891_of_match[] = { + {.compatible = "mps,mp2891"}, + {} +}; +MODULE_DEVICE_TABLE(of, mp2891_of_match); + +static struct i2c_driver mp2891_driver = { + .driver = { + .name = "mp2891", + .of_match_table = mp2891_of_match, + }, + .probe = mp2891_probe, + .id_table = mp2891_id, +}; + +module_i2c_driver(mp2891_driver); + +MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>"); +MODULE_DESCRIPTION("PMBus driver for MPS MP2891"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(PMBUS); diff --git a/drivers/hwmon/pmbus/mp2993.c b/drivers/hwmon/pmbus/mp2993.c new file mode 100644 index 000000000000..944593e13231 --- /dev/null +++ b/drivers/hwmon/pmbus/mp2993.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP2993) + */ + +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include "pmbus.h" + +#define MP2993_VOUT_OVUV_UINT 125 +#define MP2993_VOUT_OVUV_DIV 64 +#define MP2993_VIN_LIMIT_UINT 1 +#define MP2993_VIN_LIMIT_DIV 8 +#define MP2993_READ_VIN_UINT 1 +#define MP2993_READ_VIN_DIV 32 + +#define MP2993_PAGE_NUM 2 + +#define MP2993_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \ + PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \ + PMBUS_HAVE_TEMP | PMBUS_HAVE_PIN | \ + PMBUS_HAVE_IIN | \ + PMBUS_HAVE_STATUS_VOUT | \ + PMBUS_HAVE_STATUS_IOUT | \ + PMBUS_HAVE_STATUS_TEMP | \ + PMBUS_HAVE_STATUS_INPUT) + +#define MP2993_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | \ + PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | \ + PMBUS_HAVE_STATUS_VOUT | \ + PMBUS_HAVE_STATUS_IOUT | \ + PMBUS_HAVE_STATUS_TEMP | \ + PMBUS_HAVE_STATUS_INPUT) + +/* Converts a linear11 data exponent to a specified value */ +static u16 mp2993_linear11_exponent_transfer(u16 word, u16 expect_exponent) +{ + s16 exponent, mantissa, target_exponent; + + exponent = ((s16)word) >> 11; + mantissa = ((s16)((word & 0x7ff) << 5)) >> 5; + target_exponent = (s16)((expect_exponent & 0x1f) << 11) >> 11; + + if (exponent > target_exponent) + mantissa = mantissa << (exponent - target_exponent); + else + mantissa = mantissa >> (target_exponent - exponent); + + return (mantissa & 0x7ff) | ((expect_exponent << 11) & 0xf800); +} + +static int +mp2993_set_vout_format(struct i2c_client *client, int page, int format) +{ + int ret; + + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); + if (ret < 0) + return ret; + + return i2c_smbus_write_byte_data(client, PMBUS_VOUT_MODE, format); +} + +static int mp2993_identify(struct i2c_client *client, struct pmbus_driver_info *info) +{ + int ret; + + /* Set vout to direct format for rail1. */ + ret = mp2993_set_vout_format(client, 0, PB_VOUT_MODE_DIRECT); + if (ret < 0) + return ret; + + /* Set vout to direct format for rail2. */ + return mp2993_set_vout_format(client, 1, PB_VOUT_MODE_DIRECT); +} + +static int mp2993_read_word_data(struct i2c_client *client, int page, int phase, + int reg) +{ + int ret; + + switch (reg) { + case PMBUS_VOUT_OV_FAULT_LIMIT: + case PMBUS_VOUT_UV_FAULT_LIMIT: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST(ret * MP2993_VOUT_OVUV_UINT, MP2993_VOUT_OVUV_DIV); + break; + case PMBUS_OT_FAULT_LIMIT: + case PMBUS_OT_WARN_LIMIT: + /* + * The MP2993 ot fault limit value and ot warn limit value + * per rail are always the same, so only PMBUS_OT_FAULT_LIMIT + * and PMBUS_OT_WARN_LIMIT register in page 0 are defined to + * indicates the limit value. + */ + ret = pmbus_read_word_data(client, 0, phase, reg); + break; + case PMBUS_READ_VIN: + /* The MP2993 vin scale is (1/32V)/Lsb */ + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST((ret & GENMASK(9, 0)) * MP2993_READ_VIN_UINT, + MP2993_READ_VIN_DIV); + break; + case PMBUS_VIN_OV_FAULT_LIMIT: + case PMBUS_VIN_OV_WARN_LIMIT: + case PMBUS_VIN_UV_WARN_LIMIT: + case PMBUS_VIN_UV_FAULT_LIMIT: + /* The MP2993 vin limit scale is (1/8V)/Lsb */ + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) * MP2993_VIN_LIMIT_UINT, + MP2993_VIN_LIMIT_DIV); + break; + case PMBUS_READ_IOUT: + case PMBUS_READ_IIN: + case PMBUS_IIN_OC_WARN_LIMIT: + case PMBUS_IOUT_OC_FAULT_LIMIT: + case PMBUS_IOUT_OC_WARN_LIMIT: + case PMBUS_READ_VOUT: + case PMBUS_READ_PIN: + case PMBUS_READ_POUT: + case PMBUS_READ_TEMPERATURE_1: + ret = -ENODATA; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mp2993_write_word_data(struct i2c_client *client, int page, int reg, + u16 word) +{ + int ret; + + switch (reg) { + case PMBUS_VOUT_OV_FAULT_LIMIT: + case PMBUS_VOUT_UV_FAULT_LIMIT: + ret = DIV_ROUND_CLOSEST(word * MP2993_VOUT_OVUV_DIV, MP2993_VOUT_OVUV_UINT); + ret = pmbus_write_word_data(client, 0, reg, ret); + break; + case PMBUS_OT_FAULT_LIMIT: + case PMBUS_OT_WARN_LIMIT: + /* + * The MP2993 ot fault limit value and ot warn limit value + * per rail are always the same, so only PMBUS_OT_FAULT_LIMIT + * and PMBUS_OT_WARN_LIMIT register in page 0 are defined to + * config the ot limit value. + */ + ret = pmbus_write_word_data(client, 0, reg, word); + break; + case PMBUS_VIN_OV_FAULT_LIMIT: + case PMBUS_VIN_OV_WARN_LIMIT: + case PMBUS_VIN_UV_WARN_LIMIT: + case PMBUS_VIN_UV_FAULT_LIMIT: + /* The MP2993 vin limit scale is (1/8V)/Lsb */ + ret = pmbus_write_word_data(client, 0, reg, + DIV_ROUND_CLOSEST(word * MP2993_VIN_LIMIT_DIV, + MP2993_VIN_LIMIT_UINT)); + break; + case PMBUS_IIN_OC_WARN_LIMIT: + /* + * The PMBUS_IIN_OC_WARN_LIMIT of MP2993 is linear11 format, + * and the exponent is a constant value(5'b00000), so the + * exponent of word parameter should be converted to 5'b00000. + */ + ret = pmbus_write_word_data(client, page, reg, + mp2993_linear11_exponent_transfer(word, 0x00)); + break; + // + case PMBUS_IOUT_OC_FAULT_LIMIT: + case PMBUS_IOUT_OC_WARN_LIMIT: + /* + * The PMBUS_IOUT_OC_FAULT_LIMIT and PMBUS_IOUT_OC_WARN_LIMIT + * of MP2993 can be regarded as linear11 format, and the + * exponent is a 5'b00001 or 5'b00000. To ensure a larger + * range of limit value, so the exponent of word parameter + * should be converted to 5'b00001. + */ + ret = pmbus_write_word_data(client, page, reg, + mp2993_linear11_exponent_transfer(word, 0x01)); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static struct pmbus_driver_info mp2993_info = { + .pages = MP2993_PAGE_NUM, + .format[PSC_VOLTAGE_IN] = direct, + .format[PSC_CURRENT_IN] = linear, + .format[PSC_CURRENT_OUT] = linear, + .format[PSC_TEMPERATURE] = direct, + .format[PSC_POWER] = linear, + .format[PSC_VOLTAGE_OUT] = direct, + + .m[PSC_VOLTAGE_OUT] = 1, + .R[PSC_VOLTAGE_OUT] = 3, + .b[PSC_VOLTAGE_OUT] = 0, + + .m[PSC_VOLTAGE_IN] = 1, + .R[PSC_VOLTAGE_IN] = 0, + .b[PSC_VOLTAGE_IN] = 0, + + .m[PSC_TEMPERATURE] = 1, + .R[PSC_TEMPERATURE] = 0, + .b[PSC_TEMPERATURE] = 0, + + .func[0] = MP2993_RAIL1_FUNC, + .func[1] = MP2993_RAIL2_FUNC, + .read_word_data = mp2993_read_word_data, + .write_word_data = mp2993_write_word_data, + .identify = mp2993_identify, +}; + +static int mp2993_probe(struct i2c_client *client) +{ + return pmbus_do_probe(client, &mp2993_info); +} + +static const struct i2c_device_id mp2993_id[] = { + {"mp2993", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, mp2993_id); + +static const struct of_device_id __maybe_unused mp2993_of_match[] = { + {.compatible = "mps,mp2993"}, + {} +}; +MODULE_DEVICE_TABLE(of, mp2993_of_match); + +static struct i2c_driver mp2993_driver = { + .driver = { + .name = "mp2993", + .of_match_table = mp2993_of_match, + }, + .probe = mp2993_probe, + .id_table = mp2993_id, +}; + +module_i2c_driver(mp2993_driver); + +MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>"); +MODULE_DESCRIPTION("PMBus driver for MPS MP2993"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(PMBUS); diff --git a/drivers/hwmon/pmbus/mp5920.c b/drivers/hwmon/pmbus/mp5920.c new file mode 100644 index 000000000000..f6d7527ade7d --- /dev/null +++ b/drivers/hwmon/pmbus/mp5920.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Hardware monitoring driver for MP5920 and compatible chips. + */ + +#include <linux/i2c.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include "pmbus.h" + +static struct pmbus_driver_info mp5920_info = { + .pages = 1, + .format[PSC_VOLTAGE_IN] = direct, + .format[PSC_VOLTAGE_OUT] = direct, + .format[PSC_CURRENT_OUT] = direct, + .format[PSC_POWER] = direct, + .format[PSC_TEMPERATURE] = direct, + .m[PSC_VOLTAGE_IN] = 2266, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -1, + .m[PSC_VOLTAGE_OUT] = 2266, + .b[PSC_VOLTAGE_OUT] = 0, + .R[PSC_VOLTAGE_OUT] = -1, + .m[PSC_CURRENT_OUT] = 546, + .b[PSC_CURRENT_OUT] = 0, + .R[PSC_CURRENT_OUT] = -2, + .m[PSC_POWER] = 5840, + .b[PSC_POWER] = 0, + .R[PSC_POWER] = -3, + .m[PSC_TEMPERATURE] = 1067, + .b[PSC_TEMPERATURE] = 20500, + .R[PSC_TEMPERATURE] = -2, + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | + PMBUS_HAVE_TEMP, +}; + +static int mp5920_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + int ret; + u8 buf[I2C_SMBUS_BLOCK_MAX]; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_WORD_DATA)) + return -ENODEV; + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to read PMBUS_MFR_MODEL\n"); + + if (ret != 6 || strncmp(buf, "MP5920", 6)) { + return dev_err_probe(dev, -ENODEV, "Model '%.*s' not supported\n", + min_t(int, ret, sizeof(buf)), buf); + } + + return pmbus_do_probe(client, &mp5920_info); +} + +static const struct of_device_id mp5920_of_match[] = { + { .compatible = "mps,mp5920" }, + { } +}; + +MODULE_DEVICE_TABLE(of, mp5920_of_match); + +static const struct i2c_device_id mp5920_id[] = { + { "mp5920" }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, mp5920_id); + +static struct i2c_driver mp5920_driver = { + .driver = { + .name = "mp5920", + .of_match_table = mp5920_of_match, + }, + .probe = mp5920_probe, + .id_table = mp5920_id, +}; + +module_i2c_driver(mp5920_driver); + +MODULE_AUTHOR("Tony Ao <tony_ao@wiwynn.com>"); +MODULE_AUTHOR("Alex Vdovydchenko <xzeol@yahoo.com>"); +MODULE_DESCRIPTION("PMBus driver for MP5920 HSC"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(PMBUS); diff --git a/drivers/hwmon/pmbus/mp9941.c b/drivers/hwmon/pmbus/mp9941.c new file mode 100644 index 000000000000..543955cfce67 --- /dev/null +++ b/drivers/hwmon/pmbus/mp9941.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP9941) + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include "pmbus.h" + +/* + * Vender specific registers. The MFR_ICC_MAX(0x02) is used to + * config the iin scale. The MFR_RESO_SET(0xC7) is used to + * config the vout format. The MFR_VR_MULTI_CONFIG_R1(0x0D) is + * used to identify the vout vid step. + */ +#define MFR_ICC_MAX 0x02 +#define MFR_RESO_SET 0xC7 +#define MFR_VR_MULTI_CONFIG_R1 0x0D + +#define MP9941_VIN_LIMIT_UINT 1 +#define MP9941_VIN_LIMIT_DIV 8 +#define MP9941_READ_VIN_UINT 1 +#define MP9941_READ_VIN_DIV 32 + +#define MP9941_PAGE_NUM 1 + +#define MP9941_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \ + PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \ + PMBUS_HAVE_TEMP | PMBUS_HAVE_PIN | \ + PMBUS_HAVE_IIN | \ + PMBUS_HAVE_STATUS_VOUT | \ + PMBUS_HAVE_STATUS_IOUT | \ + PMBUS_HAVE_STATUS_TEMP | \ + PMBUS_HAVE_STATUS_INPUT) + +struct mp9941_data { + struct pmbus_driver_info info; + int vid_resolution; +}; + +#define to_mp9941_data(x) container_of(x, struct mp9941_data, info) + +static int mp9941_set_vout_format(struct i2c_client *client) +{ + int ret; + + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_word_data(client, MFR_RESO_SET); + if (ret < 0) + return ret; + + /* + * page = 0, MFR_RESO_SET[7:6] defines the vout format + * 2'b11 set the vout format as direct + */ + ret = (ret & ~GENMASK(7, 6)) | FIELD_PREP(GENMASK(7, 6), 3); + + return i2c_smbus_write_word_data(client, MFR_RESO_SET, ret); +} + +static int +mp9941_identify_vid_resolution(struct i2c_client *client, struct pmbus_driver_info *info) +{ + struct mp9941_data *data = to_mp9941_data(info); + int ret; + + /* + * page = 2, MFR_VR_MULTI_CONFIG_R1[4:4] defines rail1 vid step value + * 1'b0 represents the vid step value is 10mV + * 1'b1 represents the vid step value is 5mV + */ + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_word_data(client, MFR_VR_MULTI_CONFIG_R1); + if (ret < 0) + return ret; + + if (FIELD_GET(GENMASK(4, 4), ret)) + data->vid_resolution = 5; + else + data->vid_resolution = 10; + + return 0; +} + +static int mp9941_identify_iin_scale(struct i2c_client *client) +{ + int ret; + + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_word_data(client, MFR_RESO_SET); + if (ret < 0) + return ret; + + ret = (ret & ~GENMASK(3, 2)) | FIELD_PREP(GENMASK(3, 2), 0); + + ret = i2c_smbus_write_word_data(client, MFR_RESO_SET, ret); + if (ret < 0) + return ret; + + /* + * page = 2, MFR_ICC_MAX[15:13] defines the iin scale + * 3'b000 set the iout scale as 0.5A/Lsb + */ + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_word_data(client, MFR_ICC_MAX); + if (ret < 0) + return ret; + + ret = (ret & ~GENMASK(15, 13)) | FIELD_PREP(GENMASK(15, 13), 0); + + return i2c_smbus_write_word_data(client, MFR_ICC_MAX, ret); +} + +static int mp9941_identify(struct i2c_client *client, struct pmbus_driver_info *info) +{ + int ret; + + ret = mp9941_identify_iin_scale(client); + if (ret < 0) + return ret; + + ret = mp9941_identify_vid_resolution(client, info); + if (ret < 0) + return ret; + + return mp9941_set_vout_format(client); +} + +static int mp9941_read_word_data(struct i2c_client *client, int page, int phase, + int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct mp9941_data *data = to_mp9941_data(info); + int ret; + + switch (reg) { + case PMBUS_READ_VIN: + /* The MP9941 vin scale is (1/32V)/Lsb */ + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST((ret & GENMASK(9, 0)) * MP9941_READ_VIN_UINT, + MP9941_READ_VIN_DIV); + break; + case PMBUS_READ_IIN: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = ret & GENMASK(10, 0); + break; + case PMBUS_VIN_OV_FAULT_LIMIT: + /* The MP9941 vin ov limit scale is (1/8V)/Lsb */ + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) * MP9941_VIN_LIMIT_UINT, + MP9941_VIN_LIMIT_DIV); + break; + case PMBUS_IIN_OC_WARN_LIMIT: + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = ret & GENMASK(7, 0); + break; + case PMBUS_VOUT_UV_FAULT_LIMIT: + case PMBUS_MFR_VOUT_MIN: + case PMBUS_MFR_VOUT_MAX: + /* + * The vout scale is set to 1mV/Lsb(using r/m/b scale). + * But the vout uv limit and vout max/min scale is 1VID/Lsb, + * so the vout uv limit and vout max/min value should be + * multiplied by vid resolution. + */ + ret = pmbus_read_word_data(client, page, phase, reg); + if (ret < 0) + return ret; + + ret = ret * data->vid_resolution; + break; + case PMBUS_READ_IOUT: + case PMBUS_READ_POUT: + case PMBUS_READ_TEMPERATURE_1: + case PMBUS_READ_VOUT: + case PMBUS_READ_PIN: + case PMBUS_OT_FAULT_LIMIT: + case PMBUS_OT_WARN_LIMIT: + ret = -ENODATA; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mp9941_write_word_data(struct i2c_client *client, int page, int reg, + u16 word) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct mp9941_data *data = to_mp9941_data(info); + int ret; + + switch (reg) { + case PMBUS_VIN_OV_FAULT_LIMIT: + /* The MP9941 vin ov limit scale is (1/8V)/Lsb */ + ret = pmbus_write_word_data(client, page, reg, + DIV_ROUND_CLOSEST(word * MP9941_VIN_LIMIT_DIV, + MP9941_VIN_LIMIT_UINT)); + break; + case PMBUS_VOUT_UV_FAULT_LIMIT: + case PMBUS_MFR_VOUT_MIN: + case PMBUS_MFR_VOUT_MAX: + ret = pmbus_write_word_data(client, page, reg, + DIV_ROUND_CLOSEST(word, data->vid_resolution)); + break; + case PMBUS_IIN_OC_WARN_LIMIT: + case PMBUS_OT_FAULT_LIMIT: + case PMBUS_OT_WARN_LIMIT: + ret = -ENODATA; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct pmbus_driver_info mp9941_info = { + .pages = MP9941_PAGE_NUM, + .format[PSC_VOLTAGE_IN] = direct, + .format[PSC_CURRENT_IN] = direct, + .format[PSC_CURRENT_OUT] = linear, + .format[PSC_POWER] = linear, + .format[PSC_TEMPERATURE] = direct, + .format[PSC_VOLTAGE_OUT] = direct, + + .m[PSC_TEMPERATURE] = 1, + .R[PSC_TEMPERATURE] = 0, + .b[PSC_TEMPERATURE] = 0, + + .m[PSC_VOLTAGE_IN] = 1, + .R[PSC_VOLTAGE_IN] = 0, + .b[PSC_VOLTAGE_IN] = 0, + + .m[PSC_CURRENT_IN] = 2, + .R[PSC_CURRENT_IN] = 0, + .b[PSC_CURRENT_IN] = 0, + + .m[PSC_VOLTAGE_OUT] = 1, + .R[PSC_VOLTAGE_OUT] = 3, + .b[PSC_VOLTAGE_OUT] = 0, + + .func[0] = MP9941_RAIL1_FUNC, + .read_word_data = mp9941_read_word_data, + .write_word_data = mp9941_write_word_data, + .identify = mp9941_identify, +}; + +static int mp9941_probe(struct i2c_client *client) +{ + struct mp9941_data *data; + + data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + memcpy(&data->info, &mp9941_info, sizeof(mp9941_info)); + + return pmbus_do_probe(client, &data->info); +} + +static const struct i2c_device_id mp9941_id[] = { + {"mp9941", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, mp9941_id); + +static const struct of_device_id __maybe_unused mp9941_of_match[] = { + {.compatible = "mps,mp9941"}, + {} +}; +MODULE_DEVICE_TABLE(of, mp9941_of_match); + +static struct i2c_driver mp9941_driver = { + .driver = { + .name = "mp9941", + .of_match_table = mp9941_of_match, + }, + .probe = mp9941_probe, + .id_table = mp9941_id, +}; + +module_i2c_driver(mp9941_driver); + +MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>"); +MODULE_DESCRIPTION("PMBus driver for MPS MP9941"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(PMBUS); diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c index 2388d0565e7e..5f9ca6543530 100644 --- a/drivers/hwmon/powr1220.c +++ b/drivers/hwmon/powr1220.c @@ -279,12 +279,11 @@ static const struct hwmon_chip_info powr1220_chip_info = { .info = powr1220_info, }; -static const struct i2c_device_id powr1220_ids[]; - static int powr1220_probe(struct i2c_client *client) { struct powr1220_data *data; struct device *hwmon_dev; + enum powr1xxx_chips chip; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; @@ -293,7 +292,8 @@ static int powr1220_probe(struct i2c_client *client) if (!data) return -ENOMEM; - switch (i2c_match_id(powr1220_ids, client)->driver_data) { + chip = (uintptr_t)i2c_get_match_data(client); + switch (chip) { case powr1014: data->max_channels = 10; break; diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c index c0d02fbcdb76..650b0bcc2359 100644 --- a/drivers/hwmon/sht3x.c +++ b/drivers/hwmon/sht3x.c @@ -882,15 +882,6 @@ static const struct hwmon_chip_info sht3x_chip_info = { .info = sht3x_channel_info, }; -/* device ID table */ -static const struct i2c_device_id sht3x_ids[] = { - {"sht3x", sht3x}, - {"sts3x", sts3x}, - {} -}; - -MODULE_DEVICE_TABLE(i2c, sht3x_ids); - static int sht3x_probe(struct i2c_client *client) { int ret; @@ -920,7 +911,7 @@ static int sht3x_probe(struct i2c_client *client) data->mode = 0; data->last_update = jiffies - msecs_to_jiffies(3000); data->client = client; - data->chip_id = i2c_match_id(sht3x_ids, client)->driver_data; + data->chip_id = (uintptr_t)i2c_get_match_data(client); crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL); sht3x_select_command(data); @@ -963,6 +954,15 @@ static int sht3x_probe(struct i2c_client *client) return PTR_ERR_OR_ZERO(hwmon_dev); } +/* device ID table */ +static const struct i2c_device_id sht3x_ids[] = { + {"sht3x", sht3x}, + {"sts3x", sts3x}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, sht3x_ids); + static struct i2c_driver sht3x_i2c_driver = { .driver.name = "sht3x", .probe = sht3x_probe, diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c index 439dd3dba5fc..2ac906e8e173 100644 --- a/drivers/hwmon/shtc1.c +++ b/drivers/hwmon/shtc1.c @@ -186,8 +186,6 @@ static void shtc1_select_command(struct shtc1_data *data) } } -static const struct i2c_device_id shtc1_id[]; - static int shtc1_probe(struct i2c_client *client) { int ret; @@ -195,7 +193,7 @@ static int shtc1_probe(struct i2c_client *client) char id_reg_buf[2]; struct shtc1_data *data; struct device *hwmon_dev; - enum shtcx_chips chip = i2c_match_id(shtc1_id, client)->driver_data; + enum shtcx_chips chip = (uintptr_t)i2c_get_match_data(client); struct i2c_adapter *adap = client->adapter; struct device *dev = &client->dev; struct device_node *np = dev->of_node; diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c new file mode 100644 index 000000000000..fcbce5a01e55 --- /dev/null +++ b/drivers/hwmon/spd5118.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Jedec 5118 compliant temperature sensors + * + * Derived from https://github.com/Steve-Tech/SPD5118-DKMS + * Originally from T/2 driver at https://t2sde.org/packages/linux + * Copyright (c) 2023 René Rebe, ExactCODE GmbH; Germany. + * + * Copyright (c) 2024 Guenter Roeck + * + * Inspired by ee1004.c and jc42.c. + * + * SPD5118 compliant temperature sensors are typically used on DDR5 + * memory modules. + */ + +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nvmem-provider.h> +#include <linux/pm.h> +#include <linux/regmap.h> +#include <linux/units.h> + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = { + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, I2C_CLIENT_END }; + +/* SPD5118 registers. */ +#define SPD5118_REG_TYPE 0x00 /* MR0:MR1 */ +#define SPD5118_REG_REVISION 0x02 /* MR2 */ +#define SPD5118_REG_VENDOR 0x03 /* MR3:MR4 */ +#define SPD5118_REG_CAPABILITY 0x05 /* MR5 */ +#define SPD5118_REG_I2C_LEGACY_MODE 0x0B /* MR11 */ +#define SPD5118_REG_TEMP_CLR 0x13 /* MR19 */ +#define SPD5118_REG_ERROR_CLR 0x14 /* MR20 */ +#define SPD5118_REG_TEMP_CONFIG 0x1A /* MR26 */ +#define SPD5118_REG_TEMP_MAX 0x1c /* MR28:MR29 */ +#define SPD5118_REG_TEMP_MIN 0x1e /* MR30:MR31 */ +#define SPD5118_REG_TEMP_CRIT 0x20 /* MR32:MR33 */ +#define SPD5118_REG_TEMP_LCRIT 0x22 /* MR34:MR35 */ +#define SPD5118_REG_TEMP 0x31 /* MR49:MR50 */ +#define SPD5118_REG_TEMP_STATUS 0x33 /* MR51 */ + +#define SPD5118_TEMP_STATUS_HIGH BIT(0) +#define SPD5118_TEMP_STATUS_LOW BIT(1) +#define SPD5118_TEMP_STATUS_CRIT BIT(2) +#define SPD5118_TEMP_STATUS_LCRIT BIT(3) + +#define SPD5118_CAP_TS_SUPPORT BIT(1) /* temperature sensor support */ + +#define SPD5118_TS_DISABLE BIT(0) /* temperature sensor disable */ + +#define SPD5118_LEGACY_MODE_ADDR BIT(3) +#define SPD5118_LEGACY_PAGE_MASK GENMASK(2, 0) +#define SPD5118_LEGACY_MODE_MASK (SPD5118_LEGACY_MODE_ADDR | SPD5118_LEGACY_PAGE_MASK) + +#define SPD5118_NUM_PAGES 8 +#define SPD5118_PAGE_SIZE 128 +#define SPD5118_PAGE_SHIFT 7 +#define SPD5118_PAGE_MASK GENMASK(6, 0) +#define SPD5118_EEPROM_BASE 0x80 +#define SPD5118_EEPROM_SIZE (SPD5118_PAGE_SIZE * SPD5118_NUM_PAGES) + +/* Temperature unit in millicelsius */ +#define SPD5118_TEMP_UNIT (MILLIDEGREE_PER_DEGREE / 4) +/* Representable temperature range in millicelsius */ +#define SPD5118_TEMP_RANGE_MIN -256000 +#define SPD5118_TEMP_RANGE_MAX 255750 + +struct spd5118_data { + struct regmap *regmap; + struct mutex nvmem_lock; +}; + +/* hwmon */ + +static int spd5118_temp_from_reg(u16 reg) +{ + int temp = sign_extend32(reg >> 2, 10); + + return temp * SPD5118_TEMP_UNIT; +} + +static u16 spd5118_temp_to_reg(long temp) +{ + temp = clamp_val(temp, SPD5118_TEMP_RANGE_MIN, SPD5118_TEMP_RANGE_MAX); + return (DIV_ROUND_CLOSEST(temp, SPD5118_TEMP_UNIT) & 0x7ff) << 2; +} + +static int spd5118_read_temp(struct regmap *regmap, u32 attr, long *val) +{ + int reg, err; + u8 regval[2]; + u16 temp; + + switch (attr) { + case hwmon_temp_input: + reg = SPD5118_REG_TEMP; + break; + case hwmon_temp_max: + reg = SPD5118_REG_TEMP_MAX; + break; + case hwmon_temp_min: + reg = SPD5118_REG_TEMP_MIN; + break; + case hwmon_temp_crit: + reg = SPD5118_REG_TEMP_CRIT; + break; + case hwmon_temp_lcrit: + reg = SPD5118_REG_TEMP_LCRIT; + break; + default: + return -EOPNOTSUPP; + } + + err = regmap_bulk_read(regmap, reg, regval, 2); + if (err) + return err; + + temp = (regval[1] << 8) | regval[0]; + + *val = spd5118_temp_from_reg(temp); + return 0; +} + +static int spd5118_read_alarm(struct regmap *regmap, u32 attr, long *val) +{ + unsigned int mask, regval; + int err; + + switch (attr) { + case hwmon_temp_max_alarm: + mask = SPD5118_TEMP_STATUS_HIGH; + break; + case hwmon_temp_min_alarm: + mask = SPD5118_TEMP_STATUS_LOW; + break; + case hwmon_temp_crit_alarm: + mask = SPD5118_TEMP_STATUS_CRIT; + break; + case hwmon_temp_lcrit_alarm: + mask = SPD5118_TEMP_STATUS_LCRIT; + break; + default: + return -EOPNOTSUPP; + } + + err = regmap_read(regmap, SPD5118_REG_TEMP_STATUS, ®val); + if (err < 0) + return err; + *val = !!(regval & mask); + if (*val) + return regmap_write(regmap, SPD5118_REG_TEMP_CLR, mask); + return 0; +} + +static int spd5118_read_enable(struct regmap *regmap, long *val) +{ + u32 regval; + int err; + + err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, ®val); + if (err < 0) + return err; + *val = !(regval & SPD5118_TS_DISABLE); + return 0; +} + +static int spd5118_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct regmap *regmap = dev_get_drvdata(dev); + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max: + case hwmon_temp_min: + case hwmon_temp_crit: + case hwmon_temp_lcrit: + return spd5118_read_temp(regmap, attr, val); + case hwmon_temp_max_alarm: + case hwmon_temp_min_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_lcrit_alarm: + return spd5118_read_alarm(regmap, attr, val); + case hwmon_temp_enable: + return spd5118_read_enable(regmap, val); + default: + return -EOPNOTSUPP; + } +} + +static int spd5118_write_temp(struct regmap *regmap, u32 attr, long val) +{ + u8 regval[2]; + u16 temp; + int reg; + + switch (attr) { + case hwmon_temp_max: + reg = SPD5118_REG_TEMP_MAX; + break; + case hwmon_temp_min: + reg = SPD5118_REG_TEMP_MIN; + break; + case hwmon_temp_crit: + reg = SPD5118_REG_TEMP_CRIT; + break; + case hwmon_temp_lcrit: + reg = SPD5118_REG_TEMP_LCRIT; + break; + default: + return -EOPNOTSUPP; + } + + temp = spd5118_temp_to_reg(val); + regval[0] = temp & 0xff; + regval[1] = temp >> 8; + + return regmap_bulk_write(regmap, reg, regval, 2); +} + +static int spd5118_write_enable(struct regmap *regmap, long val) +{ + if (val && val != 1) + return -EINVAL; + + return regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, + SPD5118_TS_DISABLE, + val ? 0 : SPD5118_TS_DISABLE); +} + +static int spd5118_temp_write(struct regmap *regmap, u32 attr, long val) +{ + switch (attr) { + case hwmon_temp_max: + case hwmon_temp_min: + case hwmon_temp_crit: + case hwmon_temp_lcrit: + return spd5118_write_temp(regmap, attr, val); + case hwmon_temp_enable: + return spd5118_write_enable(regmap, val); + default: + return -EOPNOTSUPP; + } +} + +static int spd5118_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct regmap *regmap = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + return spd5118_temp_write(regmap, attr, val); + default: + return -EOPNOTSUPP; + } +} + +static umode_t spd5118_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_lcrit: + case hwmon_temp_crit: + case hwmon_temp_enable: + return 0644; + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_lcrit_alarm: + return 0444; + default: + return 0; + } +} + +static inline bool spd5118_parity8(u8 w) +{ + w ^= w >> 4; + return (0x6996 >> (w & 0xf)) & 1; +} + +/* + * Bank and vendor id are 8-bit fields with seven data bits and odd parity. + * Vendor IDs 0 and 0x7f are invalid. + * See Jedec standard JEP106BJ for details and a list of assigned vendor IDs. + */ +static bool spd5118_vendor_valid(u8 bank, u8 id) +{ + if (!spd5118_parity8(bank) || !spd5118_parity8(id)) + return false; + + id &= 0x7f; + return id && id != 0x7f; +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int regval; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE); + if (regval != 0x5118) + return -ENODEV; + + regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR); + if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8)) + return -ENODEV; + + regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY); + if (regval < 0) + return -ENODEV; + if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc)) + return -ENODEV; + + regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR); + if (regval) + return -ENODEV; + regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR); + if (regval) + return -ENODEV; + + regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION); + if (regval < 0 || (regval & 0xc1)) + return -ENODEV; + + regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG); + if (regval < 0) + return -ENODEV; + if (regval & ~SPD5118_TS_DISABLE) + return -ENODEV; + + strscpy(info->type, "spd5118", I2C_NAME_SIZE); + return 0; +} + +static const struct hwmon_channel_info *spd5118_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | + HWMON_T_LCRIT | HWMON_T_LCRIT_ALARM | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_MAX | HWMON_T_MAX_ALARM | + HWMON_T_CRIT | HWMON_T_CRIT_ALARM | + HWMON_T_ENABLE), + NULL +}; + +static const struct hwmon_ops spd5118_hwmon_ops = { + .is_visible = spd5118_is_visible, + .read = spd5118_read, + .write = spd5118_write, +}; + +static const struct hwmon_chip_info spd5118_chip_info = { + .ops = &spd5118_hwmon_ops, + .info = spd5118_info, +}; + +/* nvmem */ + +static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf, + unsigned int offset, size_t count) +{ + int addr = (offset >> SPD5118_PAGE_SHIFT) * 0x100 + SPD5118_EEPROM_BASE; + int err; + + offset &= SPD5118_PAGE_MASK; + + /* Can't cross page boundaries */ + if (offset + count > SPD5118_PAGE_SIZE) + count = SPD5118_PAGE_SIZE - offset; + + err = regmap_bulk_read(regmap, addr + offset, buf, count); + if (err) + return err; + + return count; +} + +static int spd5118_nvmem_read(void *priv, unsigned int off, void *val, size_t count) +{ + struct spd5118_data *data = priv; + char *buf = val; + int ret; + + if (unlikely(!count)) + return count; + + if (off + count > SPD5118_EEPROM_SIZE) + return -EINVAL; + + mutex_lock(&data->nvmem_lock); + + while (count) { + ret = spd5118_nvmem_read_page(data->regmap, buf, off, count); + if (ret < 0) { + mutex_unlock(&data->nvmem_lock); + return ret; + } + buf += ret; + off += ret; + count -= ret; + } + mutex_unlock(&data->nvmem_lock); + return 0; +} + +static int spd5118_nvmem_init(struct device *dev, struct spd5118_data *data) +{ + struct nvmem_config nvmem_config = { + .type = NVMEM_TYPE_EEPROM, + .name = dev_name(dev), + .id = NVMEM_DEVID_NONE, + .dev = dev, + .base_dev = dev, + .read_only = true, + .root_only = false, + .owner = THIS_MODULE, + .compat = true, + .reg_read = spd5118_nvmem_read, + .priv = data, + .stride = 1, + .word_size = 1, + .size = SPD5118_EEPROM_SIZE, + }; + struct nvmem_device *nvmem; + + nvmem = devm_nvmem_register(dev, &nvmem_config); + return PTR_ERR_OR_ZERO(nvmem); +} + +/* regmap */ + +static bool spd5118_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case SPD5118_REG_I2C_LEGACY_MODE: + case SPD5118_REG_TEMP_CLR: + case SPD5118_REG_TEMP_CONFIG: + case SPD5118_REG_TEMP_MAX: + case SPD5118_REG_TEMP_MAX + 1: + case SPD5118_REG_TEMP_MIN: + case SPD5118_REG_TEMP_MIN + 1: + case SPD5118_REG_TEMP_CRIT: + case SPD5118_REG_TEMP_CRIT + 1: + case SPD5118_REG_TEMP_LCRIT: + case SPD5118_REG_TEMP_LCRIT + 1: + return true; + default: + return false; + } +} + +static bool spd5118_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case SPD5118_REG_TEMP_CLR: + case SPD5118_REG_ERROR_CLR: + case SPD5118_REG_TEMP: + case SPD5118_REG_TEMP + 1: + case SPD5118_REG_TEMP_STATUS: + return true; + default: + return false; + } +} + +static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = { + { + .selector_reg = SPD5118_REG_I2C_LEGACY_MODE, + .selector_mask = SPD5118_LEGACY_PAGE_MASK, + .selector_shift = 0, + .window_start = 0, + .window_len = 0x100, + .range_min = 0, + .range_max = 0x7ff, + }, +}; + +static const struct regmap_config spd5118_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x7ff, + .writeable_reg = spd5118_writeable_reg, + .volatile_reg = spd5118_volatile_reg, + .cache_type = REGCACHE_MAPLE, + + .ranges = spd5118_regmap_range_cfg, + .num_ranges = ARRAY_SIZE(spd5118_regmap_range_cfg), +}; + +static int spd5118_init(struct i2c_client *client) +{ + struct i2c_adapter *adapter = client->adapter; + int err, regval, mode; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE); + if (regval < 0 || (regval && regval != 0x5118)) + return -ENODEV; + + /* + * If the device type registers return 0, it is possible that the chip + * has a non-zero page selected and takes the specification literally, + * i.e. disables access to volatile registers besides the page register + * if the page is not 0. Try to identify such chips. + */ + if (!regval) { + /* Vendor ID registers must also be 0 */ + regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR); + if (regval) + return -ENODEV; + + /* The selected page in MR11 must not be 0 */ + mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE); + if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) || + !(mode & SPD5118_LEGACY_PAGE_MASK)) + return -ENODEV; + + err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, + mode & SPD5118_LEGACY_MODE_ADDR); + if (err) + return -ENODEV; + + /* + * If the device type registers are still bad after selecting + * page 0, this is not a SPD5118 device. Restore original + * legacy mode register value and abort. + */ + regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE); + if (regval != 0x5118) { + i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode); + return -ENODEV; + } + } + + /* We are reasonably sure that this is really a SPD5118 hub controller */ + return 0; +} + +static int spd5118_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + unsigned int regval, revision, vendor, bank; + struct spd5118_data *data; + struct device *hwmon_dev; + struct regmap *regmap; + int err; + + err = spd5118_init(client); + if (err) + return err; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + regmap = devm_regmap_init_i2c(client, &spd5118_regmap_config); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n"); + + err = regmap_read(regmap, SPD5118_REG_CAPABILITY, ®val); + if (err) + return err; + if (!(regval & SPD5118_CAP_TS_SUPPORT)) + return -ENODEV; + + err = regmap_read(regmap, SPD5118_REG_REVISION, &revision); + if (err) + return err; + + err = regmap_read(regmap, SPD5118_REG_VENDOR, &bank); + if (err) + return err; + err = regmap_read(regmap, SPD5118_REG_VENDOR + 1, &vendor); + if (err) + return err; + if (!spd5118_vendor_valid(bank, vendor)) + return -ENODEV; + + data->regmap = regmap; + mutex_init(&data->nvmem_lock); + dev_set_drvdata(dev, data); + + err = spd5118_nvmem_init(dev, data); + /* Ignore if NVMEM support is disabled */ + if (err && err != -EOPNOTSUPP) { + dev_err_probe(dev, err, "failed to register nvmem\n"); + return err; + } + + hwmon_dev = devm_hwmon_device_register_with_info(dev, "spd5118", + regmap, &spd5118_chip_info, + NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + /* + * From JESD300-5B + * MR2 bits [5:4]: Major revision, 1..4 + * MR2 bits [3:1]: Minor revision, 0..8? Probably a typo, assume 1..8 + */ + dev_info(dev, "DDR5 temperature sensor: vendor 0x%02x:0x%02x revision %d.%d\n", + bank & 0x7f, vendor, ((revision >> 4) & 0x03) + 1, ((revision >> 1) & 0x07) + 1); + + return 0; +} + +static int spd5118_suspend(struct device *dev) +{ + struct spd5118_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + u32 regval; + int err; + + /* + * Make sure the configuration register in the regmap cache is current + * before bypassing it. + */ + err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, ®val); + if (err < 0) + return err; + + regcache_cache_bypass(regmap, true); + regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE, + SPD5118_TS_DISABLE); + regcache_cache_bypass(regmap, false); + + regcache_cache_only(regmap, true); + regcache_mark_dirty(regmap); + + return 0; +} + +static int spd5118_resume(struct device *dev) +{ + struct spd5118_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + + regcache_cache_only(regmap, false); + return regcache_sync(regmap); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume); + +static const struct i2c_device_id spd5118_id[] = { + { "spd5118", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, spd5118_id); + +static const struct of_device_id spd5118_of_ids[] = { + { .compatible = "jedec,spd5118", }, + { } +}; +MODULE_DEVICE_TABLE(of, spd5118_of_ids); + +static struct i2c_driver spd5118_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "spd5118", + .of_match_table = spd5118_of_ids, + .pm = pm_sleep_ptr(&spd5118_pm_ops), + }, + .probe = spd5118_probe, + .id_table = spd5118_id, + .detect = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? spd5118_detect : NULL, + .address_list = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? normal_i2c : NULL, +}; + +module_i2c_driver(spd5118_driver); + +MODULE_AUTHOR("René Rebe <rene@exactcode.de>"); +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("SPD 5118 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c index 68ba26bc9014..0cbdb91698b1 100644 --- a/drivers/hwmon/thmc50.c +++ b/drivers/hwmon/thmc50.c @@ -377,8 +377,6 @@ static void thmc50_init_client(struct thmc50_data *data) i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); } -static const struct i2c_device_id thmc50_id[]; - static int thmc50_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -391,7 +389,7 @@ static int thmc50_probe(struct i2c_client *client) return -ENOMEM; data->client = client; - data->type = i2c_match_id(thmc50_id, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); mutex_init(&data->update_lock); thmc50_init_client(data); diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index df1b45a62e80..853dbe708ff5 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -693,7 +693,7 @@ static int tmp401_probe(struct i2c_client *client) data->client = client; mutex_init(&data->update_lock); - data->kind = i2c_match_id(tmp401_id, client)->driver_data; + data->kind = (uintptr_t)i2c_get_match_data(client); data->regmap = devm_regmap_init(dev, NULL, data, &tmp401_regmap_config); if (IS_ERR(data->regmap)) diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 10b66c9ce045..7a6f9532e594 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -446,11 +446,7 @@ static int tmp421_probe(struct i2c_client *client) return -ENOMEM; mutex_init(&data->update_lock); - if (client->dev.of_node) - data->channels = (unsigned long) - of_device_get_match_data(&client->dev); - else - data->channels = i2c_match_id(tmp421_id, client)->driver_data; + data->channels = (unsigned long)i2c_get_match_data(client); data->client = client; for (i = 0; i < data->channels; i++) { diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c index f58ca4c6acb6..3ee1137533d6 100644 --- a/drivers/hwmon/tmp464.c +++ b/drivers/hwmon/tmp464.c @@ -666,10 +666,7 @@ static int tmp464_probe(struct i2c_client *client) mutex_init(&data->update_lock); - if (dev->of_node) - data->channels = (int)(unsigned long)of_device_get_match_data(&client->dev); - else - data->channels = i2c_match_id(tmp464_id, client)->driver_data; + data->channels = (int)(unsigned long)i2c_get_match_data(client); data->regmap = devm_regmap_init_i2c(client, &tmp464_regmap_config); if (IS_ERR(data->regmap)) diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c index ea6f4416c124..926d28cd3fab 100644 --- a/drivers/hwmon/tmp513.c +++ b/drivers/hwmon/tmp513.c @@ -159,7 +159,7 @@ static const u8 TMP51X_CURR_INPUT[2] = { TMP51X_BUS_CURRENT_RESULT }; -static struct regmap_config tmp51x_regmap_config = { +static const struct regmap_config tmp51x_regmap_config = { .reg_bits = 8, .val_bits = 16, .max_register = TMP51X_MAX_REGISTER_ADDR, diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c index d33ecbac00d6..dfcfb09d9f3c 100644 --- a/drivers/hwmon/tps23861.c +++ b/drivers/hwmon/tps23861.c @@ -117,7 +117,7 @@ struct tps23861_data { struct dentry *debugfs_dir; }; -static struct regmap_config tps23861_regmap_config = { +static const struct regmap_config tps23861_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0x6f, diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index fe960c0a624f..7d7d70afde65 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000); mutex_lock(&data->update_lock); data->target_temp[nr] = val; @@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr, return err; /* Limit the temp to 0C - 15C */ - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000); mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index cba5ec432e6d..b7957c84d235 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -1192,8 +1192,6 @@ static void w83781d_remove_files(struct device *dev) sysfs_remove_group(&dev->kobj, &w83781d_group_other); } -static const struct i2c_device_id w83781d_ids[]; - static int w83781d_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -1208,7 +1206,7 @@ static int w83781d_probe(struct i2c_client *client) mutex_init(&data->lock); mutex_init(&data->update_lock); - data->type = i2c_match_id(w83781d_ids, client)->driver_data; + data->type = (uintptr_t)i2c_get_match_data(client); data->client = client; /* attach secondary i2c lm75-like clients */ diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index c446e00db658..5174db69db5e 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c @@ -2134,8 +2134,6 @@ static void w83795_apply_temp_config(struct w83795_data *data, u8 config, } } -static const struct i2c_device_id w83795_id[]; - static int w83795_probe(struct i2c_client *client) { int i; @@ -2149,7 +2147,7 @@ static int w83795_probe(struct i2c_client *client) return -ENOMEM; i2c_set_clientdata(client, data); - data->chip_type = i2c_match_id(w83795_id, client)->driver_data; + data->chip_type = (uintptr_t)i2c_get_match_data(client); data->bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL); mutex_init(&data->update_lock); diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 9388823bb0bb..44710267d669 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -135,7 +135,7 @@ config I2C_SLAVE_EEPROM Documentation/i2c/slave-eeprom-backend.rst for further details. config I2C_SLAVE_TESTUNIT - tristate "I2C eeprom testunit driver" + tristate "I2C testunit driver" help This backend can be used to trigger test cases for I2C bus masters which require a remote device with certain capabilities, e.g. diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index a12525b3186b..f448505d5468 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -15,7 +15,6 @@ #include <linux/ioport.h> #include <linux/delay.h> #include <linux/i2c.h> -#include <linux/timer.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/io.h> @@ -32,7 +31,6 @@ struct i2c_pnx_mif { int ret; /* Return value */ int mode; /* Interface mode */ struct completion complete; /* I/O completion */ - struct timer_list timer; /* Timeout */ u8 * buf; /* Data buffer */ int len; /* Length of data buffer */ int order; /* RX Bytes to order via TX */ @@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data) return (timeout <= 0); } -static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) -{ - struct timer_list *timer = &alg_data->mif.timer; - unsigned long expires = msecs_to_jiffies(alg_data->timeout); - - if (expires <= 1) - expires = 2; - - del_timer_sync(timer); - - dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n", - jiffies, expires); - - timer->expires = jiffies + expires; - - add_timer(timer); -} - /** * i2c_pnx_start - start a device * @slave_addr: slave address @@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); - del_timer_sync(&alg_data->mif.timer); - dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine.\n", __func__); @@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); - /* Stop timer. */ - del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine after zero-xfer.\n", __func__); @@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) mcntrl_drmie | mcntrl_daie); iowrite32(ctl, I2C_REG_CTL(alg_data)); - /* Kill timer. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } } @@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); - /* Stop timer, to prevent timeout. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else if (stat & mstatus_nai) { /* Slave did not acknowledge, generate a STOP */ @@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) /* Our return value. */ alg_data->mif.ret = -EIO; - /* Stop timer, to prevent timeout. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else { /* @@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void i2c_pnx_timeout(struct timer_list *t) +static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data) { - struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer); u32 ctl; dev_err(&alg_data->adapter.dev, @@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t) iowrite32(ctl, I2C_REG_CTL(alg_data)); wait_reset(alg_data); alg_data->mif.ret = -EIO; - complete(&alg_data->mif.complete); } static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data) @@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) struct i2c_msg *pmsg; int rc = 0, completed = 0, i; struct i2c_pnx_algo_data *alg_data = adap->algo_data; + unsigned long time_left; u32 stat; dev_dbg(&alg_data->adapter.dev, @@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n", __func__, alg_data->mif.mode, alg_data->mif.len); - i2c_pnx_arm_timer(alg_data); /* initialize the completion var */ init_completion(&alg_data->mif.complete); @@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) break; /* Wait for completion */ - wait_for_completion(&alg_data->mif.complete); + time_left = wait_for_completion_timeout(&alg_data->mif.complete, + alg_data->timeout); + if (time_left == 0) + i2c_pnx_timeout(alg_data); if (!(rc = alg_data->mif.ret)) completed++; @@ -653,7 +624,10 @@ static int i2c_pnx_probe(struct platform_device *pdev) alg_data->adapter.algo_data = alg_data; alg_data->adapter.nr = pdev->id; - alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT; + alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT); + if (alg_data->timeout <= 1) + alg_data->timeout = 2; + #ifdef CONFIG_OF alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node); if (pdev->dev.of_node) { @@ -673,8 +647,6 @@ static int i2c_pnx_probe(struct platform_device *pdev) if (IS_ERR(alg_data->clk)) return PTR_ERR(alg_data->clk); - timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0); - snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name), "%s", pdev->name); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 828aa2ea0fe4..185a5d60f101 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -257,6 +257,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv) } } +static void rcar_i2c_reset_slave(struct rcar_i2c_priv *priv) +{ + rcar_i2c_write(priv, ICSIER, 0); + rcar_i2c_write(priv, ICSSR, 0); + rcar_i2c_write(priv, ICSCR, SDBS); + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ +} + static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv) { int ret; @@ -875,6 +883,10 @@ static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv) { int ret; + /* Don't reset if a slave instance is currently running */ + if (priv->slave) + return -EISCONN; + ret = reset_control_reset(priv->rstc); if (ret) return ret; @@ -903,10 +915,10 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, /* Gen3+ needs a reset. That also allows RXDMA once */ if (priv->devtype >= I2C_RCAR_GEN3) { - priv->flags &= ~ID_P_NO_RXDMA; ret = rcar_i2c_do_reset(priv); if (ret) goto out; + priv->flags &= ~ID_P_NO_RXDMA; } rcar_i2c_init(priv); @@ -1033,11 +1045,8 @@ static int rcar_unreg_slave(struct i2c_client *slave) /* ensure no irq is running before clearing ptr */ disable_irq(priv->irq); - rcar_i2c_write(priv, ICSIER, 0); - rcar_i2c_write(priv, ICSSR, 0); + rcar_i2c_reset_slave(priv); enable_irq(priv->irq); - rcar_i2c_write(priv, ICSCR, SDBS); - rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ priv->slave = NULL; @@ -1152,7 +1161,9 @@ static int rcar_i2c_probe(struct platform_device *pdev) goto out_pm_disable; } - rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ + /* Bring hardware to known state */ + rcar_i2c_init(priv); + rcar_i2c_reset_slave(priv); if (priv->devtype < I2C_RCAR_GEN3) { irqflags |= IRQF_NO_THREAD; @@ -1168,6 +1179,7 @@ static int rcar_i2c_probe(struct platform_device *pdev) if (of_property_read_bool(dev->of_node, "smbus")) priv->flags |= ID_P_HOST_NOTIFY; + /* R-Car Gen3+ needs a reset before every transfer */ if (priv->devtype >= I2C_RCAR_GEN3) { priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(priv->rstc)) { @@ -1178,6 +1190,9 @@ static int rcar_i2c_probe(struct platform_device *pdev) ret = reset_control_status(priv->rstc); if (ret < 0) goto out_pm_put; + + /* hard reset disturbs HostNotify local target, so disable it */ + priv->flags &= ~ID_P_HOST_NOTIFY; } ret = platform_get_irq(pdev, 0); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index db0d1ac82910..7e7b15440832 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1067,6 +1067,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode); static const struct i2c_device_id dummy_id[] = { { "dummy", 0 }, + { "smbus_host_notify", 0 }, { }, }; diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c index ca43e98cae1b..23a11e4e9256 100644 --- a/drivers/i2c/i2c-slave-testunit.c +++ b/drivers/i2c/i2c-slave-testunit.c @@ -118,6 +118,13 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client, queue_delayed_work(system_long_wq, &tu->worker, msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY])); } + + /* + * Reset reg_idx to avoid that work gets queued again in case of + * STOP after a following read message. But do not clear TU regs + * here because we still need them in the workqueue! + */ + tu->reg_idx = 0; break; case I2C_SLAVE_WRITE_REQUESTED: diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 97f338b123b1..f809f0ef2004 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device); * target systems are the same. * Restrictions to automatic SPD instantiation: * - Only works if all filled slots have the same memory type - * - Only works for DDR, DDR2, DDR3 and DDR4 for now + * - Only works for (LP)DDR memory types up to DDR5 * - Only works on systems with 1 to 8 memory slots */ #if IS_ENABLED(CONFIG_DMI) @@ -382,6 +382,10 @@ void i2c_register_spd(struct i2c_adapter *adap) case 0x1E: /* LPDDR4 */ name = "ee1004"; break; + case 0x22: /* DDR5 */ + case 0x23: /* LPDDR5 */ + name = "spd5118"; + break; default: dev_info(&adap->dev, "Memory type 0x%02x not supported yet, not instantiating SPD\n", diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index e486027f8b07..9aab7abc2ae9 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1494,53 +1494,53 @@ static const struct idle_cpu idle_cpu_srf __initconst = { }; static const struct x86_cpu_id intel_idle_ids[] __initconst = { - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_G, &idle_cpu_nehalem), - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &idle_cpu_nehalem), - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &idle_cpu_nhx), - X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &idle_cpu_nhx), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL, &idle_cpu_atom), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL_MID, &idle_cpu_lincroft), - X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &idle_cpu_nhx), - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &idle_cpu_snb), - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &idle_cpu_snx), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL, &idle_cpu_atom), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &idle_cpu_byt), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &idle_cpu_tangier), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &idle_cpu_cht), - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &idle_cpu_ivb), - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &idle_cpu_ivt), - X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &idle_cpu_hsw), - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &idle_cpu_hsx), - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &idle_cpu_hsw), - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &idle_cpu_hsw), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &idle_cpu_avn), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &idle_cpu_bdw), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &idle_cpu_bdw), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &idle_cpu_bdx), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &idle_cpu_bdx), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &idle_cpu_skl), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &idle_cpu_skl), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &idle_cpu_mtl_l), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), - X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr), - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf), + X86_MATCH_VFM(INTEL_NEHALEM_EP, &idle_cpu_nhx), + X86_MATCH_VFM(INTEL_NEHALEM, &idle_cpu_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM_G, &idle_cpu_nehalem), + X86_MATCH_VFM(INTEL_WESTMERE, &idle_cpu_nehalem), + X86_MATCH_VFM(INTEL_WESTMERE_EP, &idle_cpu_nhx), + X86_MATCH_VFM(INTEL_NEHALEM_EX, &idle_cpu_nhx), + X86_MATCH_VFM(INTEL_ATOM_BONNELL, &idle_cpu_atom), + X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &idle_cpu_lincroft), + X86_MATCH_VFM(INTEL_WESTMERE_EX, &idle_cpu_nhx), + X86_MATCH_VFM(INTEL_SANDYBRIDGE, &idle_cpu_snb), + X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &idle_cpu_snx), + X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &idle_cpu_atom), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &idle_cpu_byt), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &idle_cpu_tangier), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &idle_cpu_cht), + X86_MATCH_VFM(INTEL_IVYBRIDGE, &idle_cpu_ivb), + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &idle_cpu_ivt), + X86_MATCH_VFM(INTEL_HASWELL, &idle_cpu_hsw), + X86_MATCH_VFM(INTEL_HASWELL_X, &idle_cpu_hsx), + X86_MATCH_VFM(INTEL_HASWELL_L, &idle_cpu_hsw), + X86_MATCH_VFM(INTEL_HASWELL_G, &idle_cpu_hsw), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &idle_cpu_avn), + X86_MATCH_VFM(INTEL_BROADWELL, &idle_cpu_bdw), + X86_MATCH_VFM(INTEL_BROADWELL_G, &idle_cpu_bdw), + X86_MATCH_VFM(INTEL_BROADWELL_X, &idle_cpu_bdx), + X86_MATCH_VFM(INTEL_BROADWELL_D, &idle_cpu_bdx), + X86_MATCH_VFM(INTEL_SKYLAKE_L, &idle_cpu_skl), + X86_MATCH_VFM(INTEL_SKYLAKE, &idle_cpu_skl), + X86_MATCH_VFM(INTEL_KABYLAKE_L, &idle_cpu_skl), + X86_MATCH_VFM(INTEL_KABYLAKE, &idle_cpu_skl), + X86_MATCH_VFM(INTEL_SKYLAKE_X, &idle_cpu_skx), + X86_MATCH_VFM(INTEL_ICELAKE_X, &idle_cpu_icx), + X86_MATCH_VFM(INTEL_ICELAKE_D, &idle_cpu_icx), + X86_MATCH_VFM(INTEL_ALDERLAKE, &idle_cpu_adl), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, &idle_cpu_adl_l), + X86_MATCH_VFM(INTEL_METEORLAKE_L, &idle_cpu_mtl_l), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr), + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr), + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl), + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &idle_cpu_dnv), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr), + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr), + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf), {} }; @@ -1990,27 +1990,27 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) { int cstate; - switch (boot_cpu_data.x86_model) { - case INTEL_FAM6_IVYBRIDGE_X: + switch (boot_cpu_data.x86_vfm) { + case INTEL_IVYBRIDGE_X: ivt_idle_state_table_update(); break; - case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: + case INTEL_ATOM_GOLDMONT: + case INTEL_ATOM_GOLDMONT_PLUS: bxt_idle_state_table_update(); break; - case INTEL_FAM6_SKYLAKE: + case INTEL_SKYLAKE: sklh_idle_state_table_update(); break; - case INTEL_FAM6_SKYLAKE_X: + case INTEL_SKYLAKE_X: skx_idle_state_table_update(); break; - case INTEL_FAM6_SAPPHIRERAPIDS_X: - case INTEL_FAM6_EMERALDRAPIDS_X: + case INTEL_SAPPHIRERAPIDS_X: + case INTEL_EMERALDRAPIDS_X: spr_idle_state_table_update(); break; - case INTEL_FAM6_ALDERLAKE: - case INTEL_FAM6_ALDERLAKE_L: - case INTEL_FAM6_ATOM_GRACEMONT: + case INTEL_ALDERLAKE: + case INTEL_ALDERLAKE_L: + case INTEL_ATOM_GRACEMONT: adl_idle_state_table_update(); break; } diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index 16de57846bd9..2e84776f4fbd 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -315,7 +315,7 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig, * this is the case if the IIO device and the trigger device share the * same parent device. */ - if (iio_validate_own_trigger(pf->indio_dev, trig)) + if (!iio_validate_own_trigger(pf->indio_dev, trig)) trig->attached_own_device = true; return ret; diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c index d6627b3e6000..66a063ea3db4 100644 --- a/drivers/iio/light/apds9306.c +++ b/drivers/iio/light/apds9306.c @@ -583,8 +583,8 @@ static int apds9306_intg_time_set(struct apds9306_data *data, int val2) return ret; intg_old = iio_gts_find_int_time_by_sel(&data->gts, intg_time_idx); - if (ret < 0) - return ret; + if (intg_old < 0) + return intg_old; if (intg_old == val2) return 0; diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c index ea9eff7c8099..fb1d986a6895 100644 --- a/drivers/input/mouse/vmmouse.c +++ b/drivers/input/mouse/vmmouse.c @@ -21,19 +21,16 @@ #include "psmouse.h" #include "vmmouse.h" -#define VMMOUSE_PROTO_MAGIC 0x564D5868U - /* * Main commands supported by the vmmouse hypervisor port. */ -#define VMMOUSE_PROTO_CMD_GETVERSION 10 -#define VMMOUSE_PROTO_CMD_ABSPOINTER_DATA 39 -#define VMMOUSE_PROTO_CMD_ABSPOINTER_STATUS 40 -#define VMMOUSE_PROTO_CMD_ABSPOINTER_COMMAND 41 -#define VMMOUSE_PROTO_CMD_ABSPOINTER_RESTRICT 86 +#define VMWARE_CMD_ABSPOINTER_DATA 39 +#define VMWARE_CMD_ABSPOINTER_STATUS 40 +#define VMWARE_CMD_ABSPOINTER_COMMAND 41 +#define VMWARE_CMD_ABSPOINTER_RESTRICT 86 /* - * Subcommands for VMMOUSE_PROTO_CMD_ABSPOINTER_COMMAND + * Subcommands for VMWARE_CMD_ABSPOINTER_COMMAND */ #define VMMOUSE_CMD_ENABLE 0x45414552U #define VMMOUSE_CMD_DISABLE 0x000000f5U @@ -76,28 +73,6 @@ struct vmmouse_data { char dev_name[128]; }; -/* - * Hypervisor-specific bi-directional communication channel - * implementing the vmmouse protocol. Should never execute on - * bare metal hardware. - */ -#define VMMOUSE_CMD(cmd, in1, out1, out2, out3, out4) \ -({ \ - unsigned long __dummy1, __dummy2; \ - __asm__ __volatile__ (VMWARE_HYPERCALL : \ - "=a"(out1), \ - "=b"(out2), \ - "=c"(out3), \ - "=d"(out4), \ - "=S"(__dummy1), \ - "=D"(__dummy2) : \ - "a"(VMMOUSE_PROTO_MAGIC), \ - "b"(in1), \ - "c"(VMMOUSE_PROTO_CMD_##cmd), \ - "d"(0) : \ - "memory"); \ -}) - /** * vmmouse_report_button - report button state on the correct input device * @@ -145,14 +120,12 @@ static psmouse_ret_t vmmouse_report_events(struct psmouse *psmouse) struct input_dev *abs_dev = priv->abs_dev; struct input_dev *pref_dev; u32 status, x, y, z; - u32 dummy1, dummy2, dummy3; unsigned int queue_length; unsigned int count = 255; while (count--) { /* See if we have motion data. */ - VMMOUSE_CMD(ABSPOINTER_STATUS, 0, - status, dummy1, dummy2, dummy3); + status = vmware_hypercall1(VMWARE_CMD_ABSPOINTER_STATUS, 0); if ((status & VMMOUSE_ERROR) == VMMOUSE_ERROR) { psmouse_err(psmouse, "failed to fetch status data\n"); /* @@ -172,7 +145,8 @@ static psmouse_ret_t vmmouse_report_events(struct psmouse *psmouse) } /* Now get it */ - VMMOUSE_CMD(ABSPOINTER_DATA, 4, status, x, y, z); + status = vmware_hypercall4(VMWARE_CMD_ABSPOINTER_DATA, 4, + &x, &y, &z); /* * And report what we've got. Prefer to report button @@ -247,14 +221,10 @@ static psmouse_ret_t vmmouse_process_byte(struct psmouse *psmouse) static void vmmouse_disable(struct psmouse *psmouse) { u32 status; - u32 dummy1, dummy2, dummy3, dummy4; - - VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_DISABLE, - dummy1, dummy2, dummy3, dummy4); - VMMOUSE_CMD(ABSPOINTER_STATUS, 0, - status, dummy1, dummy2, dummy3); + vmware_hypercall1(VMWARE_CMD_ABSPOINTER_COMMAND, VMMOUSE_CMD_DISABLE); + status = vmware_hypercall1(VMWARE_CMD_ABSPOINTER_STATUS, 0); if ((status & VMMOUSE_ERROR) != VMMOUSE_ERROR) psmouse_warn(psmouse, "failed to disable vmmouse device\n"); } @@ -271,26 +241,24 @@ static void vmmouse_disable(struct psmouse *psmouse) static int vmmouse_enable(struct psmouse *psmouse) { u32 status, version; - u32 dummy1, dummy2, dummy3, dummy4; /* * Try enabling the device. If successful, we should be able to * read valid version ID back from it. */ - VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_ENABLE, - dummy1, dummy2, dummy3, dummy4); + vmware_hypercall1(VMWARE_CMD_ABSPOINTER_COMMAND, VMMOUSE_CMD_ENABLE); /* * See if version ID can be retrieved. */ - VMMOUSE_CMD(ABSPOINTER_STATUS, 0, status, dummy1, dummy2, dummy3); + status = vmware_hypercall1(VMWARE_CMD_ABSPOINTER_STATUS, 0); if ((status & 0x0000ffff) == 0) { psmouse_dbg(psmouse, "empty flags - assuming no device\n"); return -ENXIO; } - VMMOUSE_CMD(ABSPOINTER_DATA, 1 /* single item */, - version, dummy1, dummy2, dummy3); + version = vmware_hypercall1(VMWARE_CMD_ABSPOINTER_DATA, + 1 /* single item */); if (version != VMMOUSE_VERSION_ID) { psmouse_dbg(psmouse, "Unexpected version value: %u vs %u\n", (unsigned) version, VMMOUSE_VERSION_ID); @@ -301,11 +269,11 @@ static int vmmouse_enable(struct psmouse *psmouse) /* * Restrict ioport access, if possible. */ - VMMOUSE_CMD(ABSPOINTER_RESTRICT, VMMOUSE_RESTRICT_CPL0, - dummy1, dummy2, dummy3, dummy4); + vmware_hypercall1(VMWARE_CMD_ABSPOINTER_RESTRICT, + VMMOUSE_RESTRICT_CPL0); - VMMOUSE_CMD(ABSPOINTER_COMMAND, VMMOUSE_CMD_REQUEST_ABSOLUTE, - dummy1, dummy2, dummy3, dummy4); + vmware_hypercall1(VMWARE_CMD_ABSPOINTER_COMMAND, + VMMOUSE_CMD_REQUEST_ABSOLUTE); return 0; } @@ -342,7 +310,7 @@ static bool vmmouse_check_hypervisor(void) */ int vmmouse_detect(struct psmouse *psmouse, bool set_properties) { - u32 response, version, dummy1, dummy2; + u32 response, version, type; if (!vmmouse_check_hypervisor()) { psmouse_dbg(psmouse, @@ -351,9 +319,9 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties) } /* Check if the device is present */ - response = ~VMMOUSE_PROTO_MAGIC; - VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2); - if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) + response = ~VMWARE_HYPERVISOR_MAGIC; + version = vmware_hypercall3(VMWARE_CMD_GETVERSION, 0, &response, &type); + if (response != VMWARE_HYPERVISOR_MAGIC || version == 0xffffffffU) return -ENXIO; if (set_properties) { diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index afd6a1841715..c776f9142610 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -7,6 +7,7 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip/arm-gic.h> +#include <linux/kernel.h> #include "irq-gic-common.h" @@ -45,7 +46,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, } int gic_configure_irq(unsigned int irq, unsigned int type, - void __iomem *base, void (*sync_access)(void)) + void __iomem *base) { u32 confmask = 0x2 << ((irq % 16) * 2); u32 confoff = (irq / 16) * 4; @@ -84,14 +85,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type, raw_spin_unlock_irqrestore(&irq_controller_lock, flags); - if (sync_access) - sync_access(); - return ret; } -void gic_dist_config(void __iomem *base, int gic_irqs, - void (*sync_access)(void)) +void gic_dist_config(void __iomem *base, int gic_irqs, u8 priority) { unsigned int i; @@ -106,7 +103,8 @@ void gic_dist_config(void __iomem *base, int gic_irqs, * Set priority on all global interrupts. */ for (i = 32; i < gic_irqs; i += 4) - writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); + writel_relaxed(REPEAT_BYTE_U32(priority), + base + GIC_DIST_PRI + i); /* * Deactivate and disable all SPIs. Leave the PPI and SGIs @@ -118,12 +116,9 @@ void gic_dist_config(void __iomem *base, int gic_irqs, writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ENABLE_CLEAR + i / 8); } - - if (sync_access) - sync_access(); } -void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void)) +void gic_cpu_config(void __iomem *base, int nr, u8 priority) { int i; @@ -142,9 +137,6 @@ void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void)) * Set priority on PPI and SGI interrupts */ for (i = 0; i < nr; i += 4) - writel_relaxed(GICD_INT_DEF_PRI_X4, + writel_relaxed(REPEAT_BYTE_U32(priority), base + GIC_DIST_PRI + i * 4 / 4); - - if (sync_access) - sync_access(); } diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index f407cce9ecaa..e8eab72ef195 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -20,10 +20,9 @@ struct gic_quirk { }; int gic_configure_irq(unsigned int irq, unsigned int type, - void __iomem *base, void (*sync_access)(void)); -void gic_dist_config(void __iomem *base, int gic_irqs, - void (*sync_access)(void)); -void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void)); + void __iomem *base); +void gic_dist_config(void __iomem *base, int gic_irqs, u8 priority); +void gic_cpu_config(void __iomem *base, int nr, u8 priority); void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, void *data); void gic_enable_of_quirks(const struct device_node *np, diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 3c755d5dad6e..42e63272154e 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -59,7 +59,7 @@ static u32 lpi_id_bits; #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) -#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI +static u8 __ro_after_init lpi_prop_prio; /* * Collection structure - just an ID, and a redistributor address to @@ -1926,7 +1926,7 @@ static int its_vlpi_unmap(struct irq_data *d) /* and restore the physical one */ irqd_clr_forwarded_to_vcpu(d); its_send_mapti(its_dev, d->hwirq, event); - lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + lpi_update_config(d, 0xff, (lpi_prop_prio | LPI_PROP_ENABLED | LPI_PROP_GROUP1)); @@ -2181,8 +2181,8 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) static void gic_reset_prop_table(void *va) { - /* Priority 0xa0, Group-1, disabled */ - memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + /* Regular IRQ priority, Group-1, disabled */ + memset(va, lpi_prop_prio | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); /* Make sure the GIC will observe the written configuration */ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); @@ -5650,7 +5650,7 @@ int __init its_lpi_memreserve_init(void) } int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, - struct irq_domain *parent_domain) + struct irq_domain *parent_domain, u8 irq_prio) { struct device_node *of_node; struct its_node *its; @@ -5660,6 +5660,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, gic_rdists = rdists; + lpi_prop_prio = irq_prio; its_parent = parent_domain; of_node = to_of_node(handle); if (of_node) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 6fb276504bcc..6393f3d780e9 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> +#include <linux/kernel.h> #include <linux/kstrtox.h> #include <linux/of.h> #include <linux/of_address.h> @@ -24,6 +25,7 @@ #include <linux/irqchip.h> #include <linux/irqchip/arm-gic-common.h> #include <linux/irqchip/arm-gic-v3.h> +#include <linux/irqchip/arm-gic-v3-prio.h> #include <linux/irqchip/irq-partition-percpu.h> #include <linux/bitfield.h> #include <linux/bits.h> @@ -36,7 +38,8 @@ #include "irq-gic-common.h" -#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) +static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ; +static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI; #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) @@ -44,6 +47,8 @@ #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) +static struct cpumask broken_rdists __read_mostly __maybe_unused; + struct redist_region { void __iomem *redist_base; phys_addr_t phys_base; @@ -108,29 +113,96 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); */ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); -DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); -EXPORT_SYMBOL(gic_nonsecure_priorities); +static u32 gic_get_pribits(void) +{ + u32 pribits; -/* - * When the Non-secure world has access to group 0 interrupts (as a - * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will - * return the Distributor's view of the interrupt priority. - * - * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority - * written by software is moved to the Non-secure range by the Distributor. - * - * If both are true (which is when gic_nonsecure_priorities gets enabled), - * we need to shift down the priority programmed by software to match it - * against the value returned by ICC_RPR_EL1. - */ -#define GICD_INT_RPR_PRI(priority) \ - ({ \ - u32 __priority = (priority); \ - if (static_branch_unlikely(&gic_nonsecure_priorities)) \ - __priority = 0x80 | (__priority >> 1); \ - \ - __priority; \ - }) + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static bool cpus_have_security_disabled __ro_after_init; +static bool cpus_have_group0 __ro_after_init; + +static void __init gic_prio_init(void) +{ + cpus_have_security_disabled = gic_dist_security_disabled(); + cpus_have_group0 = gic_has_group0(); + + /* + * How priority values are used by the GIC depends on two things: + * the security state of the GIC (controlled by the GICD_CTRL.DS bit) + * and if Group 0 interrupts can be delivered to Linux in the non-secure + * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the + * way priorities are presented in ICC_PMR_EL1 and in the distributor: + * + * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor + * ------------------------------------------------------- + * 1 | - | unchanged | unchanged + * ------------------------------------------------------- + * 0 | 1 | non-secure | non-secure + * ------------------------------------------------------- + * 0 | 0 | unchanged | non-secure + * + * In the non-secure view reads and writes are modified: + * + * - A value written is right-shifted by one and the MSB is set, + * forcing the priority into the non-secure range. + * + * - A value read is left-shifted by one. + * + * In the first two cases, where ICC_PMR_EL1 and the interrupt priority + * are both either modified or unchanged, we can use the same set of + * priorities. + * + * In the last case, where only the interrupt priorities are modified to + * be in the non-secure range, we program the non-secure values into + * the distributor to match the PMR values we want. + */ + if (cpus_have_group0 & !cpus_have_security_disabled) { + dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq); + dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi); + } + + pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n", + cpus_have_security_disabled, + !cpus_have_group0); +} /* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */ static refcount_t *rdist_nmi_refs; @@ -556,7 +628,7 @@ static int gic_irq_nmi_setup(struct irq_data *d) desc->handle_irq = handle_fasteoi_nmi; } - gic_irq_set_prio(d, GICD_INT_NMI_PRI); + gic_irq_set_prio(d, dist_prio_nmi); return 0; } @@ -591,7 +663,7 @@ static void gic_irq_nmi_teardown(struct irq_data *d) desc->handle_irq = handle_fasteoi_irq; } - gic_irq_set_prio(d, GICD_INT_DEF_PRI); + gic_irq_set_prio(d, dist_prio_irq); } static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) @@ -670,7 +742,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) offset = convert_offset_index(d, GICD_ICFGR, &index); - ret = gic_configure_irq(index, type, base + offset, NULL); + ret = gic_configure_irq(index, type, base + offset); if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { /* Misconfigured PPIs are usually not fatal */ pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq); @@ -753,7 +825,7 @@ static bool gic_rpr_is_nmi_prio(void) if (!gic_supports_nmi()) return false; - return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); + return unlikely(gic_read_rpr() == GICV3_PRIO_NMI); } static bool gic_irqnr_is_special(u32 irqnr) @@ -866,44 +938,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs __gic_handle_irq_from_irqson(regs); } -static u32 gic_get_pribits(void) -{ - u32 pribits; - - pribits = gic_read_ctlr(); - pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; - pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; - pribits++; - - return pribits; -} - -static bool gic_has_group0(void) -{ - u32 val; - u32 old_pmr; - - old_pmr = gic_read_pmr(); - - /* - * Let's find out if Group0 is under control of EL3 or not by - * setting the highest possible, non-zero priority in PMR. - * - * If SCR_EL3.FIQ is set, the priority gets shifted down in - * order for the CPU interface to set bit 7, and keep the - * actual priority in the non-secure range. In the process, it - * looses the least significant bit and the actual priority - * becomes 0x80. Reading it back returns 0, indicating that - * we're don't have access to Group0. - */ - gic_write_pmr(BIT(8 - gic_get_pribits())); - val = gic_read_pmr(); - - gic_write_pmr(old_pmr); - - return val != 0; -} - static void __init gic_dist_init(void) { unsigned int i; @@ -937,10 +971,11 @@ static void __init gic_dist_init(void) writel_relaxed(0, base + GICD_ICFGRnE + i / 4); for (i = 0; i < GIC_ESPI_NR; i += 4) - writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); + writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq), + base + GICD_IPRIORITYRnE + i); /* Now do the common stuff */ - gic_dist_config(base, GIC_LINE_NR, NULL); + gic_dist_config(base, GIC_LINE_NR, dist_prio_irq); val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { @@ -1119,12 +1154,6 @@ static void gic_update_rdist_properties(void) gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); } -/* Check whether it's single security state view */ -static inline bool gic_dist_security_disabled(void) -{ - return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; -} - static void gic_cpu_sys_reg_init(void) { int i, cpu = smp_processor_id(); @@ -1152,18 +1181,14 @@ static void gic_cpu_sys_reg_init(void) write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); } else if (gic_supports_nmi()) { /* - * Mismatch configuration with boot CPU, the system is likely - * to die as interrupt masking will not work properly on all - * CPUs + * Check that all CPUs use the same priority space. * - * The boot CPU calls this function before enabling NMI support, - * and as a result we'll never see this warning in the boot path - * for that CPU. + * If there's a mismatch with the boot CPU, the system is + * likely to die as interrupt masking will not work properly on + * all CPUs. */ - if (static_branch_unlikely(&gic_nonsecure_priorities)) - WARN_ON(!group0 || gic_dist_security_disabled()); - else - WARN_ON(group0 && !gic_dist_security_disabled()); + WARN_ON(group0 != cpus_have_group0); + WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled); } /* @@ -1282,7 +1307,8 @@ static void gic_cpu_init(void) for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32) writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); - gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, gic_redist_wait_for_rwp); + gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, dist_prio_irq); + gic_redist_wait_for_rwp(); /* initialise system registers */ gic_cpu_sys_reg_init(); @@ -1293,6 +1319,18 @@ static void gic_cpu_init(void) #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) +/* + * gic_starting_cpu() is called after the last point where cpuhp is allowed + * to fail. So pre check for problems earlier. + */ +static int gic_check_rdist(unsigned int cpu) +{ + if (cpumask_test_cpu(cpu, &broken_rdists)) + return -EINVAL; + + return 0; +} + static int gic_starting_cpu(unsigned int cpu) { gic_cpu_init(); @@ -1384,6 +1422,10 @@ static void __init gic_smp_init(void) }; int base_sgi; + cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN, + "irqchip/arm/gicv3:checkrdist", + gic_check_rdist, NULL); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, "irqchip/arm/gicv3:starting", gic_starting_cpu, NULL); @@ -1948,36 +1990,6 @@ static void gic_enable_nmi_support(void) pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); - /* - * How priority values are used by the GIC depends on two things: - * the security state of the GIC (controlled by the GICD_CTRL.DS bit) - * and if Group 0 interrupts can be delivered to Linux in the non-secure - * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the - * ICC_PMR_EL1 register and the priority that software assigns to - * interrupts: - * - * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority - * ----------------------------------------------------------- - * 1 | - | unchanged | unchanged - * ----------------------------------------------------------- - * 0 | 1 | non-secure | non-secure - * ----------------------------------------------------------- - * 0 | 0 | unchanged | non-secure - * - * where non-secure means that the value is right-shifted by one and the - * MSB bit set, to make it fit in the non-secure priority range. - * - * In the first two cases, where ICC_PMR_EL1 and the interrupt priority - * are both either modified or unchanged, we can use the same set of - * priorities. - * - * In the last case, where only the interrupt priorities are modified to - * be in the non-secure range, we use a different PMR value to mask IRQs - * and the rest of the values that we use remain unchanged. - */ - if (gic_has_group0() && !gic_dist_security_disabled()) - static_branch_enable(&gic_nonsecure_priorities); - static_branch_enable(&supports_pseudo_nmis); if (static_branch_likely(&supports_deactivate_key)) @@ -2058,6 +2070,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base, gic_update_rdist_properties(); + gic_prio_init(); gic_dist_init(); gic_cpu_init(); gic_enable_nmi_support(); @@ -2065,7 +2078,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base, gic_cpu_pm_init(); if (gic_dist_supports_lpis()) { - its_init(handle, &gic_data.rdists, gic_data.domain); + its_init(handle, &gic_data.rdists, gic_data.domain, dist_prio_irq); its_cpu_init(); its_lpi_memreserve_init(); } else { @@ -2365,8 +2378,24 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; void __iomem *redist_base; - if (!acpi_gicc_is_usable(gicc)) + /* Neither enabled or online capable means it doesn't exist, skip it */ + if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) + return 0; + + /* + * Capable but disabled CPUs can be brought online later. What about + * the redistributor? ACPI doesn't want to say! + * Virtual hotplug systems can use the MADT's "always-on" GICR entries. + * Otherwise, prevent such CPUs from being brought online. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) { + int cpu = get_cpu_for_acpi_id(gicc->uid); + + pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu); + if (cpu >= 0) + cpumask_set_cpu(cpu, &broken_rdists); return 0; + } redist_base = ioremap(gicc->gicr_base_address, size); if (!redist_base) @@ -2413,21 +2442,15 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, /* * If GICC is enabled and has valid gicr base address, then it means - * GICR base is presented via GICC + * GICR base is presented via GICC. The redistributor is only known to + * be accessible if the GICC is marked as enabled. If this bit is not + * set, we'd need to add the redistributor at runtime, which isn't + * supported. */ - if (acpi_gicc_is_usable(gicc) && gicc->gicr_base_address) { + if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address) acpi_data.enabled_rdists++; - return 0; - } - /* - * It's perfectly valid firmware can pass disabled GICC entry, driver - * should not treat as errors, skip the entry instead of probe fail. - */ - if (!acpi_gicc_is_usable(gicc)) - return 0; - - return -ENODEV; + return 0; } static int __init gic_acpi_count_gicr_regions(void) @@ -2483,7 +2506,8 @@ static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *hea int maint_irq_mode; static int first_madt = true; - if (!acpi_gicc_is_usable(gicc)) + if (!(gicc->flags & + (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) return 0; maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 98aa383e39db..3be7bd8cd8cd 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -303,7 +303,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) type != IRQ_TYPE_EDGE_RISING) return -EINVAL; - ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL); + ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG); if (ret && gicirq < 32) { /* Misconfigured PPIs are usually not fatal */ pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16); @@ -479,7 +479,7 @@ static void gic_dist_init(struct gic_chip_data *gic) for (i = 32; i < gic_irqs; i += 4) writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); - gic_dist_config(base, gic_irqs, NULL); + gic_dist_config(base, gic_irqs, GICD_INT_DEF_PRI); writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); } @@ -516,7 +516,7 @@ static int gic_cpu_init(struct gic_chip_data *gic) gic_cpu_map[i] &= ~cpu_mask; } - gic_cpu_config(dist_base, 32, NULL); + gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI); writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); gic_cpu_if_up(gic); @@ -608,7 +608,7 @@ void gic_dist_restore(struct gic_chip_data *gic) dist_base + GIC_DIST_CONFIG + i * 4); for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) - writel_relaxed(GICD_INT_DEF_PRI_X4, + writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI), dist_base + GIC_DIST_PRI + i * 4); for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) @@ -697,7 +697,7 @@ void gic_cpu_restore(struct gic_chip_data *gic) writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); for (i = 0; i < DIV_ROUND_UP(32, 4); i++) - writel_relaxed(GICD_INT_DEF_PRI_X4, + writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI), dist_base + GIC_DIST_PRI + i * 4); writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 46161f6ff289..31c3f70a5d5e 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -130,7 +130,7 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type) raw_spin_lock(&irq_controller_lock); - ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL); + ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG); if (ret && irq < 32) { /* Misconfigured PPIs are usually not fatal */ pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16); @@ -260,7 +260,7 @@ static void __init hip04_irq_dist_init(struct hip04_irq_data *intc) for (i = 32; i < nr_irqs; i += 2) writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3)); - gic_dist_config(base, nr_irqs, NULL); + gic_dist_config(base, nr_irqs, GICD_INT_DEF_PRI); writel_relaxed(1, base + GIC_DIST_CTRL); } @@ -287,7 +287,7 @@ static void hip04_irq_cpu_init(struct hip04_irq_data *intc) if (i != cpu) hip04_cpu_map[i] &= ~cpu_mask; - gic_cpu_config(dist_base, 32, NULL); + gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI); writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); writel_relaxed(1, base + GIC_CPU_CTRL); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4d11fc664cb0..b5d6ef430b86 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -897,7 +897,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, sector_t sectors, struct block_device *cached_bdev, const struct block_device_operations *ops) { - struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, SIZE_MAX / sizeof(atomic_t)); struct queue_limits lim = { @@ -909,6 +908,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, .io_min = block_size, .logical_block_size = block_size, .physical_block_size = block_size, + .features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA, }; uint64_t n; int idx; @@ -974,13 +974,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, d->disk->minors = BCACHE_MINORS; d->disk->fops = ops; d->disk->private_data = d; - - q = d->disk->queue; - - blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); - - blk_queue_write_cache(q, true, true); - return 0; out_bioset_exit: @@ -1423,8 +1416,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) } if (bdev_io_opt(dc->bdev)) - dc->partial_stripes_expensive = - q->limits.raid_partial_stripes_expensive; + dc->partial_stripes_expensive = !!(q->limits.features & + BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE); ret = bcache_device_init(&dc->disk, block_size, bdev_nr_sectors(dc->bdev) - dc->sb.data_offset, diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 16884b585053..2d8dd9283ff4 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -3403,7 +3403,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors; limits->discard_granularity = origin_limits->discard_granularity; limits->discard_alignment = origin_limits->discard_alignment; - limits->discard_misaligned = origin_limits->discard_misaligned; } static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index ad79b52ffc14..b4384a8b13e3 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -2059,7 +2059,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits) limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors; limits->discard_granularity = dest_limits->discard_granularity; limits->discard_alignment = dest_limits->discard_alignment; - limits->discard_misaligned = dest_limits->discard_misaligned; limits->max_discard_segments = dest_limits->max_discard_segments; } diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 08700bfc3e23..14a44c0f8286 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -206,7 +206,6 @@ struct dm_table { bool integrity_supported:1; bool singleton:1; - unsigned integrity_added:1; /* * Indicates the rw permissions for the new logical device. This diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1b7a97cc3779..6c013ceb0e5f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1176,8 +1176,8 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk); struct mapped_device *md = dm_table_get_md(ti->table); - /* From now we require underlying device with our integrity profile */ - if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) { + /* We require an underlying device with non-PI metadata */ + if (!bi || bi->csum_type != BLK_INTEGRITY_CSUM_NONE) { ti->error = "Integrity profile not supported."; return -EINVAL; } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 417fddebe367..2a89f8eb4713 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -350,25 +350,6 @@ static struct kmem_cache *journal_io_cache; #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0) #endif -static void dm_integrity_prepare(struct request *rq) -{ -} - -static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes) -{ -} - -/* - * DM Integrity profile, protection is performed layer above (dm-crypt) - */ -static const struct blk_integrity_profile dm_integrity_profile = { - .name = "DM-DIF-EXT-TAG", - .generate_fn = NULL, - .verify_fn = NULL, - .prepare_fn = dm_integrity_prepare, - .complete_fn = dm_integrity_complete, -}; - static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); static void integrity_bio_wait(struct work_struct *w); static void dm_integrity_dtr(struct dm_target *ti); @@ -3494,6 +3475,17 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim limits->dma_alignment = limits->logical_block_size - 1; limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT; } + + if (!ic->internal_hash) { + struct blk_integrity *bi = &limits->integrity; + + memset(bi, 0, sizeof(*bi)); + bi->tuple_size = ic->tag_size; + bi->tag_size = bi->tuple_size; + bi->interval_exp = + ic->sb->log2_sectors_per_block + SECTOR_SHIFT; + } + limits->max_integrity_segments = USHRT_MAX; } @@ -3650,20 +3642,6 @@ try_smaller_buffer: return 0; } -static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) -{ - struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); - struct blk_integrity bi; - - memset(&bi, 0, sizeof(bi)); - bi.profile = &dm_integrity_profile; - bi.tuple_size = ic->tag_size; - bi.tag_size = bi.tuple_size; - bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; - - blk_integrity_register(disk, &bi); -} - static void dm_integrity_free_page_list(struct page_list *pl) { unsigned int i; @@ -4649,9 +4627,6 @@ try_smaller_buffer: } } - if (!ic->internal_hash) - dm_integrity_set(ti, ic); - ti->num_flush_bios = 1; ti->flush_supported = true; if (ic->discard) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index abe88d1e6735..052c00c1eb15 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3542,7 +3542,7 @@ static void raid_status(struct dm_target *ti, status_type_t type, recovery = rs->md.recovery; state = decipher_sync_action(mddev, recovery); progress = rs_get_progress(rs, recovery, state, resync_max_sectors); - resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? + resync_mismatches = mddev->last_sync_action == ACTION_CHECK ? atomic64_read(&mddev->resync_mismatches) : 0; /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */ diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index b2d5246cff21..92d18dcdb3e9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -425,6 +425,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, q->limits.logical_block_size, q->limits.alignment_offset, (unsigned long long) start << SECTOR_SHIFT); + + /* + * Only stack the integrity profile if the target doesn't have native + * integrity support. + */ + if (!dm_target_has_integrity(ti->type)) + queue_limits_stack_integrity_bdev(limits, bdev); return 0; } @@ -572,6 +579,12 @@ int dm_split_args(int *argc, char ***argvp, char *input) return 0; } +static void dm_set_stacking_limits(struct queue_limits *limits) +{ + blk_set_stacking_limits(limits); + limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; +} + /* * Impose necessary and sufficient conditions on a devices's table such * that any incoming bio which respects its logical_block_size can be @@ -610,7 +623,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t, for (i = 0; i < t->num_targets; i++) { ti = dm_table_get_target(t, i); - blk_set_stacking_limits(&ti_limits); + dm_set_stacking_limits(&ti_limits); /* combine all target devices' limits */ if (ti->type->iterate_devices) @@ -702,9 +715,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, t->immutable_target_type = ti->type; } - if (dm_target_has_integrity(ti->type)) - t->integrity_added = 1; - ti->table = t; ti->begin = start; ti->len = len; @@ -1014,14 +1024,13 @@ bool dm_table_request_based(struct dm_table *t) return __table_type_request_based(dm_table_get_type(t)); } -static bool dm_table_supports_poll(struct dm_table *t); - static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) { enum dm_queue_mode type = dm_table_get_type(t); unsigned int per_io_data_size = 0, front_pad, io_front_pad; unsigned int min_pool_size = 0, pool_size; struct dm_md_mempools *pools; + unsigned int bioset_flags = 0; if (unlikely(type == DM_TYPE_NONE)) { DMERR("no table type is set, can't allocate mempools"); @@ -1038,6 +1047,9 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * goto init_bs; } + if (md->queue->limits.features & BLK_FEAT_POLL) + bioset_flags |= BIOSET_PERCPU_CACHE; + for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); @@ -1050,8 +1062,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; - if (bioset_init(&pools->io_bs, pool_size, io_front_pad, - dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0)) + if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags)) goto out_free_pools; if (t->integrity_supported && bioset_integrity_create(&pools->io_bs, pool_size)) @@ -1119,99 +1130,6 @@ static int dm_table_build_index(struct dm_table *t) return r; } -static bool integrity_profile_exists(struct gendisk *disk) -{ - return !!blk_get_integrity(disk); -} - -/* - * Get a disk whose integrity profile reflects the table's profile. - * Returns NULL if integrity support was inconsistent or unavailable. - */ -static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) -{ - struct list_head *devices = dm_table_get_devices(t); - struct dm_dev_internal *dd = NULL; - struct gendisk *prev_disk = NULL, *template_disk = NULL; - - for (unsigned int i = 0; i < t->num_targets; i++) { - struct dm_target *ti = dm_table_get_target(t, i); - - if (!dm_target_passes_integrity(ti->type)) - goto no_integrity; - } - - list_for_each_entry(dd, devices, list) { - template_disk = dd->dm_dev->bdev->bd_disk; - if (!integrity_profile_exists(template_disk)) - goto no_integrity; - else if (prev_disk && - blk_integrity_compare(prev_disk, template_disk) < 0) - goto no_integrity; - prev_disk = template_disk; - } - - return template_disk; - -no_integrity: - if (prev_disk) - DMWARN("%s: integrity not set: %s and %s profile mismatch", - dm_device_name(t->md), - prev_disk->disk_name, - template_disk->disk_name); - return NULL; -} - -/* - * Register the mapped device for blk_integrity support if the - * underlying devices have an integrity profile. But all devices may - * not have matching profiles (checking all devices isn't reliable - * during table load because this table may use other DM device(s) which - * must be resumed before they will have an initialized integity - * profile). Consequently, stacked DM devices force a 2 stage integrity - * profile validation: First pass during table load, final pass during - * resume. - */ -static int dm_table_register_integrity(struct dm_table *t) -{ - struct mapped_device *md = t->md; - struct gendisk *template_disk = NULL; - - /* If target handles integrity itself do not register it here. */ - if (t->integrity_added) - return 0; - - template_disk = dm_table_get_integrity_disk(t); - if (!template_disk) - return 0; - - if (!integrity_profile_exists(dm_disk(md))) { - t->integrity_supported = true; - /* - * Register integrity profile during table load; we can do - * this because the final profile must match during resume. - */ - blk_integrity_register(dm_disk(md), - blk_get_integrity(template_disk)); - return 0; - } - - /* - * If DM device already has an initialized integrity - * profile the new profile should not conflict. - */ - if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { - DMERR("%s: conflict with existing integrity profile: %s profile mismatch", - dm_device_name(t->md), - template_disk->disk_name); - return 1; - } - - /* Preserve existing integrity profile */ - t->integrity_supported = true; - return 0; -} - #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct dm_crypto_profile { @@ -1423,12 +1341,6 @@ int dm_table_complete(struct dm_table *t) return r; } - r = dm_table_register_integrity(t); - if (r) { - DMERR("could not register integrity profile."); - return r; - } - r = dm_table_construct_crypto_profile(t); if (r) { DMERR("could not construct crypto profile."); @@ -1493,14 +1405,6 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } -static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags); -} - /* * type->iterate_devices() should be called when the sanity check needs to * iterate and check all underlying data devices. iterate_devices() will @@ -1548,19 +1452,6 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev, return 0; } -static bool dm_table_supports_poll(struct dm_table *t) -{ - for (unsigned int i = 0; i < t->num_targets; i++) { - struct dm_target *ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, device_not_poll_capable, NULL)) - return false; - } - - return true; -} - /* * Check whether a table has no data devices attached using each * target's iterate_devices method. @@ -1686,12 +1577,20 @@ int dm_calculate_queue_limits(struct dm_table *t, unsigned int zone_sectors = 0; bool zoned = false; - blk_set_stacking_limits(limits); + dm_set_stacking_limits(limits); + + t->integrity_supported = true; + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + + if (!dm_target_passes_integrity(ti->type)) + t->integrity_supported = false; + } for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); - blk_set_stacking_limits(&ti_limits); + dm_set_stacking_limits(&ti_limits); if (!ti->type->iterate_devices) { /* Set I/O hints portion of queue limits */ @@ -1706,12 +1605,12 @@ int dm_calculate_queue_limits(struct dm_table *t, ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); - if (!zoned && ti_limits.zoned) { + if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) { /* * After stacking all limits, validate all devices * in table support this zoned model and zone sectors. */ - zoned = ti_limits.zoned; + zoned = (ti_limits.features & BLK_FEAT_ZONED); zone_sectors = ti_limits.chunk_sectors; } @@ -1738,6 +1637,18 @@ combine_limits: dm_device_name(t->md), (unsigned long long) ti->begin, (unsigned long long) ti->len); + + if (t->integrity_supported || + dm_target_has_integrity(ti->type)) { + if (!queue_limits_stack_integrity(limits, &ti_limits)) { + DMWARN("%s: adding target device (start sect %llu len %llu) " + "disabled integrity support due to incompatibility", + dm_device_name(t->md), + (unsigned long long) ti->begin, + (unsigned long long) ti->len); + t->integrity_supported = false; + } + } } /* @@ -1747,12 +1658,12 @@ combine_limits: * zoned model on host-managed zoned block devices. * BUT... */ - if (limits->zoned) { + if (limits->features & BLK_FEAT_ZONED) { /* * ...IF the above limits stacking determined a zoned model * validate that all of the table's devices conform to it. */ - zoned = limits->zoned; + zoned = limits->features & BLK_FEAT_ZONED; zone_sectors = limits->chunk_sectors; } if (validate_hardware_zoned(t, zoned, zone_sectors)) @@ -1762,63 +1673,15 @@ combine_limits: } /* - * Verify that all devices have an integrity profile that matches the - * DM device's registered integrity profile. If the profiles don't - * match then unregister the DM device's integrity profile. + * Check if a target requires flush support even if none of the underlying + * devices need it (e.g. to persist target-specific metadata). */ -static void dm_table_verify_integrity(struct dm_table *t) -{ - struct gendisk *template_disk = NULL; - - if (t->integrity_added) - return; - - if (t->integrity_supported) { - /* - * Verify that the original integrity profile - * matches all the devices in this table. - */ - template_disk = dm_table_get_integrity_disk(t); - if (template_disk && - blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) - return; - } - - if (integrity_profile_exists(dm_disk(t->md))) { - DMWARN("%s: unable to establish an integrity profile", - dm_device_name(t->md)); - blk_integrity_unregister(dm_disk(t->md)); - } -} - -static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static bool dm_table_supports_flush(struct dm_table *t) { - unsigned long flush = (unsigned long) data; - struct request_queue *q = bdev_get_queue(dev->bdev); - - return (q->queue_flags & flush); -} - -static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) -{ - /* - * Require at least one underlying device to support flushes. - * t->devices includes internal dm devices such as mirror logs - * so we need to use iterate_devices here, which targets - * supporting flushes must provide. - */ for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); - if (!ti->num_flush_bios) - continue; - - if (ti->flush_supported) - return true; - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) + if (ti->num_flush_bios && ti->flush_supported) return true; } @@ -1839,20 +1702,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, return false; } -static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - return !bdev_nonrot(dev->bdev); -} - -static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return !blk_queue_add_random(q); -} - static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1877,12 +1726,6 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t) return true; } -static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - return !bdev_nowait(dev->bdev); -} - static bool dm_table_supports_nowait(struct dm_table *t) { for (unsigned int i = 0; i < t->num_targets; i++) { @@ -1890,10 +1733,6 @@ static bool dm_table_supports_nowait(struct dm_table *t) if (!dm_target_supports_nowait(ti->type)) return false; - - if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, device_not_nowait_capable, NULL)) - return false; } return true; @@ -1950,29 +1789,25 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) return true; } -static int device_requires_stable_pages(struct dm_target *ti, - struct dm_dev *dev, sector_t start, - sector_t len, void *data) -{ - return bdev_stable_writes(dev->bdev); -} - int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { - bool wc = false, fua = false; int r; - if (dm_table_supports_nowait(t)) - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); - else - blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q); + if (!dm_table_supports_nowait(t)) + limits->features &= ~BLK_FEAT_NOWAIT; + + /* + * The current polling impementation does not support request based + * stacking. + */ + if (!__table_type_bio_based(t->type)) + limits->features &= ~BLK_FEAT_POLL; if (!dm_table_supports_discards(t)) { limits->max_hw_discard_sectors = 0; limits->discard_granularity = 0; limits->discard_alignment = 0; - limits->discard_misaligned = 0; } if (!dm_table_supports_write_zeroes(t)) @@ -1981,58 +1816,22 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_supports_secure_erase(t)) limits->max_secure_erase_sectors = 0; - if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { - wc = true; - if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) - fua = true; - } - blk_queue_write_cache(q, wc, fua); + if (dm_table_supports_flush(t)) + limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; if (dm_table_supports_dax(t, device_not_dax_capable)) { - blk_queue_flag_set(QUEUE_FLAG_DAX, q); + limits->features |= BLK_FEAT_DAX; if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) set_dax_synchronous(t->md->dax_dev); } else - blk_queue_flag_clear(QUEUE_FLAG_DAX, q); + limits->features &= ~BLK_FEAT_DAX; if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) dax_write_cache(t->md->dax_dev, true); - /* Ensure that all underlying devices are non-rotational. */ - if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - else - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - - dm_table_verify_integrity(t); - - /* - * Some devices don't use blk_integrity but still want stable pages - * because they do their own checksumming. - * If any underlying device requires stable pages, a table must require - * them as well. Only targets that support iterate_devices are considered: - * don't want error, zero, etc to require stable pages. - */ - if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); - else - blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); - - /* - * Determine whether or not this queue's I/O timings contribute - * to the entropy pool, Only request-based targets use this. - * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not - * have it set. - */ - if (blk_queue_add_random(q) && - dm_table_any_dev_attr(t, device_is_not_random, NULL)) - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - - /* - * For a zoned target, setup the zones related queue attributes - * and resources necessary for zone append emulation if necessary. - */ - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) { + /* For a zoned table, setup the zone related queue attributes. */ + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + (limits->features & BLK_FEAT_ZONED)) { r = dm_set_zones_restrictions(t, q, limits); if (r) return r; @@ -2042,22 +1841,18 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (r) return r; - dm_update_crypto_profile(q, t); - /* - * Check for request-based device is left to - * dm_mq_init_request_queue()->blk_mq_init_allocated_queue(). - * - * For bio-based device, only set QUEUE_FLAG_POLL when all - * underlying devices supporting polling. + * Now that the limits are set, check the zones mapped by the table + * and setup the resources for zone append emulation if necessary. */ - if (__table_type_bio_based(t->type)) { - if (dm_table_supports_poll(t)) - blk_queue_flag_set(QUEUE_FLAG_POLL, q); - else - blk_queue_flag_clear(QUEUE_FLAG_POLL, q); + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + (limits->features & BLK_FEAT_ZONED)) { + r = dm_revalidate_zones(t, q); + if (r) + return r; } + dm_update_crypto_profile(q, t); return 0; } diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index b423bec6458b..9d51f72a9d66 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -945,7 +945,7 @@ static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits) * The value is used by dm-thin to determine whether to pass down discards. The block layer * splits large discards on this boundary when this is set. */ - limits->max_discard_sectors = + limits->max_hw_discard_sectors = (vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK); /* diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 5d66d916730e..c0d41c36e06e 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -13,8 +13,6 @@ #define DM_MSG_PREFIX "zone" -#define DM_ZONE_INVALID_WP_OFST UINT_MAX - /* * For internal zone reports bypassing the top BIO submission path. */ @@ -146,34 +144,27 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) } /* - * Count conventional zones of a mapped zoned device. If the device - * only has conventional zones, do not expose it as zoned. - */ -static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx, - void *data) -{ - unsigned int *nr_conv_zones = data; - - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - (*nr_conv_zones)++; - - return 0; -} - -/* * Revalidate the zones of a mapped device to initialize resource necessary * for zone append emulation. Note that we cannot simply use the block layer * blk_revalidate_disk_zones() function here as the mapped device is suspended * (this is called from __bind() context). */ -static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) +int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) { + struct mapped_device *md = t->md; struct gendisk *disk = md->disk; int ret; + if (!get_capacity(disk)) + return 0; + /* Revalidate only if something changed. */ - if (!disk->nr_zones || disk->nr_zones != md->nr_zones) + if (!disk->nr_zones || disk->nr_zones != md->nr_zones) { + DMINFO("%s using %s zone append", + disk->disk_name, + queue_emulates_zone_append(q) ? "emulated" : "native"); md->nr_zones = 0; + } if (md->nr_zones) return 0; @@ -220,13 +211,129 @@ static bool dm_table_supports_zone_append(struct dm_table *t) return true; } +struct dm_device_zone_count { + sector_t start; + sector_t len; + unsigned int total_nr_seq_zones; + unsigned int target_nr_seq_zones; +}; + +/* + * Count the total number of and the number of mapped sequential zones of a + * target zoned device. + */ +static int dm_device_count_zones_cb(struct blk_zone *zone, + unsigned int idx, void *data) +{ + struct dm_device_zone_count *zc = data; + + if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { + zc->total_nr_seq_zones++; + if (zone->start >= zc->start && + zone->start < zc->start + zc->len) + zc->target_nr_seq_zones++; + } + + return 0; +} + +static int dm_device_count_zones(struct dm_dev *dev, + struct dm_device_zone_count *zc) +{ + int ret; + + ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, + dm_device_count_zones_cb, zc); + if (ret < 0) + return ret; + if (!ret) + return -EIO; + return 0; +} + +struct dm_zone_resource_limits { + unsigned int mapped_nr_seq_zones; + struct queue_limits *lim; + bool reliable_limits; +}; + +static int device_get_zone_resource_limits(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct dm_zone_resource_limits *zlim = data; + struct gendisk *disk = dev->bdev->bd_disk; + unsigned int max_open_zones, max_active_zones; + int ret; + struct dm_device_zone_count zc = { + .start = start, + .len = len, + }; + + /* + * If the target is not the whole device, the device zone resources may + * be shared between different targets. Check this by counting the + * number of mapped sequential zones: if this number is smaller than the + * total number of sequential zones of the target device, then resource + * sharing may happen and the zone limits will not be reliable. + */ + ret = dm_device_count_zones(dev, &zc); + if (ret) { + DMERR("Count %s zones failed %d", disk->disk_name, ret); + return ret; + } + + /* + * If the target does not map any sequential zones, then we do not need + * any zone resource limits. + */ + if (!zc.target_nr_seq_zones) + return 0; + + /* + * If the target does not map all sequential zones, the limits + * will not be reliable and we cannot use REQ_OP_ZONE_RESET_ALL. + */ + if (zc.target_nr_seq_zones < zc.total_nr_seq_zones) { + zlim->reliable_limits = false; + ti->zone_reset_all_supported = false; + } + + /* + * If the target maps less sequential zones than the limit values, then + * we do not have limits for this target. + */ + max_active_zones = disk->queue->limits.max_active_zones; + if (max_active_zones >= zc.target_nr_seq_zones) + max_active_zones = 0; + zlim->lim->max_active_zones = + min_not_zero(max_active_zones, zlim->lim->max_active_zones); + + max_open_zones = disk->queue->limits.max_open_zones; + if (max_open_zones >= zc.target_nr_seq_zones) + max_open_zones = 0; + zlim->lim->max_open_zones = + min_not_zero(max_open_zones, zlim->lim->max_open_zones); + + /* + * Also count the total number of sequential zones for the mapped + * device so that when we are done inspecting all its targets, we are + * able to check if the mapped device actually has any sequential zones. + */ + zlim->mapped_nr_seq_zones += zc.target_nr_seq_zones; + + return 0; +} + int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *lim) { struct mapped_device *md = t->md; struct gendisk *disk = md->disk; - unsigned int nr_conv_zones = 0; - int ret; + struct dm_zone_resource_limits zlim = { + .reliable_limits = true, + .lim = lim, + }; /* * Check if zone append is natively supported, and if not, set the @@ -240,46 +347,63 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, lim->max_zone_append_sectors = 0; } - if (!get_capacity(md->disk)) - return 0; - /* - * Count conventional zones to check that the mapped device will indeed - * have sequential write required zones. + * Determine the max open and max active zone limits for the mapped + * device by inspecting the zone resource limits and the zones mapped + * by each target. */ - md->zone_revalidate_map = t; - ret = dm_blk_report_zones(disk, 0, UINT_MAX, - dm_check_zoned_cb, &nr_conv_zones); - md->zone_revalidate_map = NULL; - if (ret < 0) { - DMERR("Check zoned failed %d", ret); - return ret; + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + + /* + * Assume that the target can accept REQ_OP_ZONE_RESET_ALL. + * device_get_zone_resource_limits() may adjust this if one of + * the device used by the target does not have all its + * sequential write required zones mapped. + */ + ti->zone_reset_all_supported = true; + + if (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, + device_get_zone_resource_limits, &zlim)) { + DMERR("Could not determine %s zone resource limits", + disk->disk_name); + return -ENODEV; + } } /* - * If we only have conventional zones, expose the mapped device as - * a regular device. + * If we only have conventional zones mapped, expose the mapped device + + as a regular device. */ - if (nr_conv_zones >= ret) { + if (!zlim.mapped_nr_seq_zones) { lim->max_open_zones = 0; lim->max_active_zones = 0; - lim->zoned = false; + lim->max_zone_append_sectors = 0; + lim->zone_write_granularity = 0; + lim->chunk_sectors = 0; + lim->features &= ~BLK_FEAT_ZONED; clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); + md->nr_zones = 0; disk->nr_zones = 0; return 0; } - if (!md->disk->nr_zones) { - DMINFO("%s using %s zone append", - md->disk->disk_name, - queue_emulates_zone_append(q) ? "emulated" : "native"); - } - - ret = dm_revalidate_zones(md, t); - if (ret < 0) - return ret; + /* + * Warn once (when the capacity is not yet set) if the mapped device is + * partially using zone resources of the target devices as that leads to + * unreliable limits, i.e. if another mapped device uses the same + * underlying devices, we cannot enforce zone limits to guarantee that + * writing will not lead to errors. Note that we really should return + * an error for such case but there is no easy way to find out if + * another mapped device uses the same underlying zoned devices. + */ + if (!get_capacity(disk) && !zlim.reliable_limits) + DMWARN("%s zone resource limits may be unreliable", + disk->disk_name); - if (!static_key_enabled(&zoned_enabled.key)) + if (lim->features & BLK_FEAT_ZONED && + !static_key_enabled(&zoned_enabled.key)) static_branch_enable(&zoned_enabled); return 0; } @@ -306,3 +430,39 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone) return; } + +static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx, + void *data) +{ + /* + * For an all-zones reset, ignore conventional, empty, read-only + * and offline zones. + */ + switch (zone->cond) { + case BLK_ZONE_COND_NOT_WP: + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_READONLY: + case BLK_ZONE_COND_OFFLINE: + return 0; + default: + set_bit(idx, (unsigned long *)data); + return 0; + } +} + +int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t, + sector_t sector, unsigned int nr_zones, + unsigned long *need_reset) +{ + int ret; + + ret = dm_blk_do_report_zones(md, t, sector, nr_zones, + dm_zone_need_reset_cb, need_reset); + if (ret != nr_zones) { + DMERR("Get %s zone reset bitmap failed\n", + md->disk->disk_name); + return -EIO; + } + + return 0; +} diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 12236e6f46f3..cd0ee144973f 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1009,7 +1009,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) limits->max_sectors = chunk_sectors; /* We are exposing a drive-managed zoned block device */ - limits->zoned = false; + limits->features &= ~BLK_FEAT_ZONED; } /* diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 13037d6a6f62..4b1b69e576a5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1188,7 +1188,7 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector, return len; return min_t(sector_t, len, min(max_sectors ? : queue_max_sectors(ti->table->md->queue), - blk_chunk_sectors_left(target_offset, max_granularity))); + blk_boundary_sectors_left(target_offset, max_granularity))); } static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) @@ -1598,20 +1598,19 @@ static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti, static bool is_abnormal_io(struct bio *bio) { - enum req_op op = bio_op(bio); - - if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) { - switch (op) { - case REQ_OP_DISCARD: - case REQ_OP_SECURE_ERASE: - case REQ_OP_WRITE_ZEROES: - return true; - default: - break; - } + switch (bio_op(bio)) { + case REQ_OP_READ: + case REQ_OP_WRITE: + case REQ_OP_FLUSH: + return false; + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + case REQ_OP_ZONE_RESET_ALL: + return true; + default: + return false; } - - return false; } static blk_status_t __process_abnormal_io(struct clone_info *ci, @@ -1776,6 +1775,119 @@ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) { return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0); } + +static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci, + struct dm_target *ti) +{ + struct bio_list blist = BIO_EMPTY_LIST; + struct mapped_device *md = ci->io->md; + unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors; + unsigned long *need_reset; + unsigned int i, nr_zones, nr_reset; + unsigned int num_bios = 0; + blk_status_t sts = BLK_STS_OK; + sector_t sector = ti->begin; + struct bio *clone; + int ret; + + nr_zones = ti->len >> ilog2(zone_sectors); + need_reset = bitmap_zalloc(nr_zones, GFP_NOIO); + if (!need_reset) + return BLK_STS_RESOURCE; + + ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin, + nr_zones, need_reset); + if (ret) { + sts = BLK_STS_IOERR; + goto free_bitmap; + } + + /* If we have no zone to reset, we are done. */ + nr_reset = bitmap_weight(need_reset, nr_zones); + if (!nr_reset) + goto free_bitmap; + + atomic_add(nr_zones, &ci->io->io_count); + + for (i = 0; i < nr_zones; i++) { + + if (!test_bit(i, need_reset)) { + sector += zone_sectors; + continue; + } + + if (bio_list_empty(&blist)) { + /* This may take a while, so be nice to others */ + if (num_bios) + cond_resched(); + + /* + * We may need to reset thousands of zones, so let's + * not go crazy with the clone allocation. + */ + alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32), + NULL, GFP_NOIO); + } + + /* Get a clone and change it to a regular reset operation. */ + clone = bio_list_pop(&blist); + clone->bi_opf &= ~REQ_OP_MASK; + clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC; + clone->bi_iter.bi_sector = sector; + clone->bi_iter.bi_size = 0; + __map_bio(clone); + + sector += zone_sectors; + num_bios++; + nr_reset--; + } + + WARN_ON_ONCE(!bio_list_empty(&blist)); + atomic_sub(nr_zones - num_bios, &ci->io->io_count); + ci->sector_count = 0; + +free_bitmap: + bitmap_free(need_reset); + + return sts; +} + +static void __send_zone_reset_all_native(struct clone_info *ci, + struct dm_target *ti) +{ + unsigned int bios; + + atomic_add(1, &ci->io->io_count); + bios = __send_duplicate_bios(ci, ti, 1, NULL, GFP_NOIO); + atomic_sub(1 - bios, &ci->io->io_count); + + ci->sector_count = 0; +} + +static blk_status_t __send_zone_reset_all(struct clone_info *ci) +{ + struct dm_table *t = ci->map; + blk_status_t sts = BLK_STS_OK; + + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + + if (ti->zone_reset_all_supported) { + __send_zone_reset_all_native(ci, ti); + continue; + } + + sts = __send_zone_reset_all_emulated(ci, ti); + if (sts != BLK_STS_OK) + break; + } + + /* Release the reference that alloc_io() took for submission. */ + atomic_sub(1, &ci->io->io_count); + + return sts; +} + #else static inline bool dm_zone_bio_needs_split(struct mapped_device *md, struct bio *bio) @@ -1786,6 +1898,10 @@ static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) { return false; } +static blk_status_t __send_zone_reset_all(struct clone_info *ci) +{ + return BLK_STS_NOTSUPP; +} #endif /* @@ -1799,9 +1915,14 @@ static void dm_split_and_process_bio(struct mapped_device *md, blk_status_t error = BLK_STS_OK; bool is_abnormal, need_split; - need_split = is_abnormal = is_abnormal_io(bio); - if (static_branch_unlikely(&zoned_enabled)) - need_split = is_abnormal || dm_zone_bio_needs_split(md, bio); + is_abnormal = is_abnormal_io(bio); + if (static_branch_unlikely(&zoned_enabled)) { + /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */ + need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) && + (is_abnormal || dm_zone_bio_needs_split(md, bio)); + } else { + need_split = is_abnormal; + } if (unlikely(need_split)) { /* @@ -1842,6 +1963,12 @@ static void dm_split_and_process_bio(struct mapped_device *md, goto out; } + if (static_branch_unlikely(&zoned_enabled) && + (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) { + error = __send_zone_reset_all(&ci); + goto out; + } + error = __split_and_process_bio(&ci); if (error || !ci.sector_count) goto out; @@ -2386,22 +2513,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) struct table_device *td; int r; - switch (type) { - case DM_TYPE_REQUEST_BASED: + WARN_ON_ONCE(type == DM_TYPE_NONE); + + if (type == DM_TYPE_REQUEST_BASED) { md->disk->fops = &dm_rq_blk_dops; r = dm_mq_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based dm mapped device"); return r; } - break; - case DM_TYPE_BIO_BASED: - case DM_TYPE_DAX_BIO_BASED: - blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue); - break; - case DM_TYPE_NONE: - WARN_ON_ONCE(true); - break; } r = dm_calculate_queue_limits(t, &limits); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 53ef8207fe2c..cc466ad5cb1d 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -103,12 +103,16 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); */ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *lim); +int dm_revalidate_zones(struct dm_table *t, struct request_queue *q); void dm_zone_endio(struct dm_io *io, struct bio *clone); #ifdef CONFIG_BLK_DEV_ZONED int dm_blk_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); bool dm_is_zone_write(struct mapped_device *md, struct bio *bio); int dm_zone_map_bio(struct dm_target_io *io); +int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t, + sector_t sector, unsigned int nr_zones, + unsigned long *need_reset); #else #define dm_blk_report_zones NULL static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 0a2d37eb38ef..08232d8dc815 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -227,6 +227,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, struct block_device *bdev; struct mddev *mddev = bitmap->mddev; struct bitmap_storage *store = &bitmap->storage; + unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) << + PAGE_SHIFT; loff_t sboff, offset = mddev->bitmap_info.offset; sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE; unsigned int size = PAGE_SIZE; @@ -269,11 +271,9 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, if (size == 0) /* bitmap runs in to data */ return -EINVAL; - } else { - /* DATA METADATA BITMAP - no problems */ } - md_super_write(mddev, rdev, sboff + ps, (int) size, page); + md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page); return 0; } diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 8e36a0feec09..139fe2019c1d 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -1570,7 +1570,7 @@ out: return err; } -static struct md_cluster_operations cluster_ops = { +static const struct md_cluster_operations cluster_ops = { .join = join, .leave = leave, .slot_number = slot_number, diff --git a/drivers/md/md.c b/drivers/md/md.c index aff9118ff697..64693913ed18 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -69,13 +69,23 @@ #include "md-bitmap.h" #include "md-cluster.h" +static const char *action_name[NR_SYNC_ACTIONS] = { + [ACTION_RESYNC] = "resync", + [ACTION_RECOVER] = "recover", + [ACTION_CHECK] = "check", + [ACTION_REPAIR] = "repair", + [ACTION_RESHAPE] = "reshape", + [ACTION_FROZEN] = "frozen", + [ACTION_IDLE] = "idle", +}; + /* pers_list is a list of registered personalities protected by pers_lock. */ static LIST_HEAD(pers_list); static DEFINE_SPINLOCK(pers_lock); static const struct kobj_type md_ktype; -struct md_cluster_operations *md_cluster_ops; +const struct md_cluster_operations *md_cluster_ops; EXPORT_SYMBOL(md_cluster_ops); static struct module *md_cluster_mod; @@ -479,7 +489,6 @@ int mddev_suspend(struct mddev *mddev, bool interruptible) */ WRITE_ONCE(mddev->suspended, mddev->suspended + 1); - del_timer_sync(&mddev->safemode_timer); /* restrict memory reclaim I/O during raid array is suspend */ mddev->noio_flag = memalloc_noio_save(); @@ -550,13 +559,9 @@ static void md_end_flush(struct bio *bio) rdev_dec_pending(rdev, mddev); - if (atomic_dec_and_test(&mddev->flush_pending)) { - /* The pair is percpu_ref_get() from md_flush_request() */ - percpu_ref_put(&mddev->active_io); - + if (atomic_dec_and_test(&mddev->flush_pending)) /* The pre-request flush has finished */ queue_work(md_wq, &mddev->flush_work); - } } static void md_submit_flush_data(struct work_struct *ws); @@ -587,12 +592,8 @@ static void submit_flushes(struct work_struct *ws) rcu_read_lock(); } rcu_read_unlock(); - if (atomic_dec_and_test(&mddev->flush_pending)) { - /* The pair is percpu_ref_get() from md_flush_request() */ - percpu_ref_put(&mddev->active_io); - + if (atomic_dec_and_test(&mddev->flush_pending)) queue_work(md_wq, &mddev->flush_work); - } } static void md_submit_flush_data(struct work_struct *ws) @@ -617,8 +618,20 @@ static void md_submit_flush_data(struct work_struct *ws) bio_endio(bio); } else { bio->bi_opf &= ~REQ_PREFLUSH; - md_handle_request(mddev, bio); + + /* + * make_requst() will never return error here, it only + * returns error in raid5_make_request() by dm-raid. + * Since dm always splits data and flush operation into + * two separate io, io size of flush submitted by dm + * always is 0, make_request() will not be called here. + */ + if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio))) + bio_io_error(bio); } + + /* The pair is percpu_ref_get() from md_flush_request() */ + percpu_ref_put(&mddev->active_io); } /* @@ -654,24 +667,22 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio) WARN_ON(percpu_ref_is_zero(&mddev->active_io)); percpu_ref_get(&mddev->active_io); mddev->flush_bio = bio; - bio = NULL; - } - spin_unlock_irq(&mddev->lock); - - if (!bio) { + spin_unlock_irq(&mddev->lock); INIT_WORK(&mddev->flush_work, submit_flushes); queue_work(md_wq, &mddev->flush_work); - } else { - /* flush was performed for some other bio while we waited. */ - if (bio->bi_iter.bi_size == 0) - /* an empty barrier - all done */ - bio_endio(bio); - else { - bio->bi_opf &= ~REQ_PREFLUSH; - return false; - } + return true; } - return true; + + /* flush was performed for some other bio while we waited. */ + spin_unlock_irq(&mddev->lock); + if (bio->bi_iter.bi_size == 0) { + /* pure flush without data - all done */ + bio_endio(bio); + return true; + } + + bio->bi_opf &= ~REQ_PREFLUSH; + return false; } EXPORT_SYMBOL(md_flush_request); @@ -742,7 +753,6 @@ int mddev_init(struct mddev *mddev) mutex_init(&mddev->open_mutex); mutex_init(&mddev->reconfig_mutex); - mutex_init(&mddev->sync_mutex); mutex_init(&mddev->suspend_mutex); mutex_init(&mddev->bitmap_info.mutex); INIT_LIST_HEAD(&mddev->disks); @@ -758,7 +768,7 @@ int mddev_init(struct mddev *mddev) init_waitqueue_head(&mddev->recovery_wait); mddev->reshape_position = MaxSector; mddev->reshape_backwards = 0; - mddev->last_sync_action = "none"; + mddev->last_sync_action = ACTION_IDLE; mddev->resync_min = 0; mddev->resync_max = MaxSector; mddev->level = LEVEL_NONE; @@ -2410,36 +2420,10 @@ static LIST_HEAD(pending_raid_disks); */ int md_integrity_register(struct mddev *mddev) { - struct md_rdev *rdev, *reference = NULL; - if (list_empty(&mddev->disks)) return 0; /* nothing to do */ - if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk)) - return 0; /* shouldn't register, or already is */ - rdev_for_each(rdev, mddev) { - /* skip spares and non-functional disks */ - if (test_bit(Faulty, &rdev->flags)) - continue; - if (rdev->raid_disk < 0) - continue; - if (!reference) { - /* Use the first rdev as the reference */ - reference = rdev; - continue; - } - /* does this rdev's profile match the reference profile? */ - if (blk_integrity_compare(reference->bdev->bd_disk, - rdev->bdev->bd_disk) < 0) - return -EINVAL; - } - if (!reference || !bdev_get_integrity(reference->bdev)) - return 0; - /* - * All component devices are integrity capable and have matching - * profiles, register the common profile for the md device. - */ - blk_integrity_register(mddev->gendisk, - bdev_get_integrity(reference->bdev)); + if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk)) + return 0; /* shouldn't register */ pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || @@ -2459,32 +2443,6 @@ int md_integrity_register(struct mddev *mddev) } EXPORT_SYMBOL(md_integrity_register); -/* - * Attempt to add an rdev, but only if it is consistent with the current - * integrity profile - */ -int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) -{ - struct blk_integrity *bi_mddev; - - if (mddev_is_dm(mddev)) - return 0; - - bi_mddev = blk_get_integrity(mddev->gendisk); - - if (!bi_mddev) /* nothing to do */ - return 0; - - if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { - pr_err("%s: incompatible integrity profile for %pg\n", - mdname(mddev), rdev->bdev); - return -ENXIO; - } - - return 0; -} -EXPORT_SYMBOL(md_integrity_add_rdev); - static bool rdev_read_only(struct md_rdev *rdev) { return bdev_read_only(rdev->bdev) || @@ -4867,30 +4825,81 @@ out_unlock: static struct md_sysfs_entry md_metadata = __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); -static ssize_t -action_show(struct mddev *mddev, char *page) +enum sync_action md_sync_action(struct mddev *mddev) { - char *type = "idle"; unsigned long recovery = mddev->recovery; + + /* + * frozen has the highest priority, means running sync_thread will be + * stopped immediately, and no new sync_thread can start. + */ if (test_bit(MD_RECOVERY_FROZEN, &recovery)) - type = "frozen"; - else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || - (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) { - if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) - type = "reshape"; - else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { - if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) - type = "resync"; - else if (test_bit(MD_RECOVERY_CHECK, &recovery)) - type = "check"; - else - type = "repair"; - } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) - type = "recover"; - else if (mddev->reshape_position != MaxSector) - type = "reshape"; + return ACTION_FROZEN; + + /* + * read-only array can't register sync_thread, and it can only + * add/remove spares. + */ + if (!md_is_rdwr(mddev)) + return ACTION_IDLE; + + /* + * idle means no sync_thread is running, and no new sync_thread is + * requested. + */ + if (!test_bit(MD_RECOVERY_RUNNING, &recovery) && + !test_bit(MD_RECOVERY_NEEDED, &recovery)) + return ACTION_IDLE; + + if (test_bit(MD_RECOVERY_RESHAPE, &recovery) || + mddev->reshape_position != MaxSector) + return ACTION_RESHAPE; + + if (test_bit(MD_RECOVERY_RECOVER, &recovery)) + return ACTION_RECOVER; + + if (test_bit(MD_RECOVERY_SYNC, &recovery)) { + /* + * MD_RECOVERY_CHECK must be paired with + * MD_RECOVERY_REQUESTED. + */ + if (test_bit(MD_RECOVERY_CHECK, &recovery)) + return ACTION_CHECK; + if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) + return ACTION_REPAIR; + return ACTION_RESYNC; } - return sprintf(page, "%s\n", type); + + /* + * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no + * sync_action is specified. + */ + return ACTION_IDLE; +} + +enum sync_action md_sync_action_by_name(const char *page) +{ + enum sync_action action; + + for (action = 0; action < NR_SYNC_ACTIONS; ++action) { + if (cmd_match(page, action_name[action])) + return action; + } + + return NR_SYNC_ACTIONS; +} + +const char *md_sync_action_name(enum sync_action action) +{ + return action_name[action]; +} + +static ssize_t +action_show(struct mddev *mddev, char *page) +{ + enum sync_action action = md_sync_action(mddev); + + return sprintf(page, "%s\n", md_sync_action_name(action)); } /** @@ -4899,15 +4908,10 @@ action_show(struct mddev *mddev, char *page) * @locked: if set, reconfig_mutex will still be held after this function * return; if not set, reconfig_mutex will be released after this * function return. - * @check_seq: if set, only wait for curent running sync_thread to stop, noted - * that new sync_thread can still start. */ -static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) +static void stop_sync_thread(struct mddev *mddev, bool locked) { - int sync_seq; - - if (check_seq) - sync_seq = atomic_read(&mddev->sync_seq); + int sync_seq = atomic_read(&mddev->sync_seq); if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { if (!locked) @@ -4928,7 +4932,8 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - (check_seq && sync_seq != atomic_read(&mddev->sync_seq))); + (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) && + sync_seq != atomic_read(&mddev->sync_seq))); if (locked) mddev_lock_nointr(mddev); @@ -4939,7 +4944,7 @@ void md_idle_sync_thread(struct mddev *mddev) lockdep_assert_held(&mddev->reconfig_mutex); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - stop_sync_thread(mddev, true, true); + stop_sync_thread(mddev, true); } EXPORT_SYMBOL_GPL(md_idle_sync_thread); @@ -4948,7 +4953,7 @@ void md_frozen_sync_thread(struct mddev *mddev) lockdep_assert_held(&mddev->reconfig_mutex); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - stop_sync_thread(mddev, true, false); + stop_sync_thread(mddev, true); } EXPORT_SYMBOL_GPL(md_frozen_sync_thread); @@ -4963,100 +4968,127 @@ void md_unfrozen_sync_thread(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread); -static void idle_sync_thread(struct mddev *mddev) +static int mddev_start_reshape(struct mddev *mddev) { - mutex_lock(&mddev->sync_mutex); - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - - if (mddev_lock(mddev)) { - mutex_unlock(&mddev->sync_mutex); - return; - } - - stop_sync_thread(mddev, false, true); - mutex_unlock(&mddev->sync_mutex); -} + int ret; -static void frozen_sync_thread(struct mddev *mddev) -{ - mutex_lock(&mddev->sync_mutex); - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + if (mddev->pers->start_reshape == NULL) + return -EINVAL; - if (mddev_lock(mddev)) { - mutex_unlock(&mddev->sync_mutex); - return; + if (mddev->reshape_position == MaxSector || + mddev->pers->check_reshape == NULL || + mddev->pers->check_reshape(mddev)) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + ret = mddev->pers->start_reshape(mddev); + if (ret) + return ret; + } else { + /* + * If reshape is still in progress, and md_check_recovery() can + * continue to reshape, don't restart reshape because data can + * be corrupted for raid456. + */ + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } - stop_sync_thread(mddev, false, false); - mutex_unlock(&mddev->sync_mutex); + sysfs_notify_dirent_safe(mddev->sysfs_degraded); + return 0; } static ssize_t action_store(struct mddev *mddev, const char *page, size_t len) { + int ret; + enum sync_action action; + if (!mddev->pers || !mddev->pers->sync_request) return -EINVAL; +retry: + if (work_busy(&mddev->sync_work)) + flush_work(&mddev->sync_work); - if (cmd_match(page, "idle")) - idle_sync_thread(mddev); - else if (cmd_match(page, "frozen")) - frozen_sync_thread(mddev); - else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) - return -EBUSY; - else if (cmd_match(page, "resync")) - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - else if (cmd_match(page, "recover")) { - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); - } else if (cmd_match(page, "reshape")) { - int err; - if (mddev->pers->start_reshape == NULL) - return -EINVAL; - err = mddev_lock(mddev); - if (!err) { - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { - err = -EBUSY; - } else if (mddev->reshape_position == MaxSector || - mddev->pers->check_reshape == NULL || - mddev->pers->check_reshape(mddev)) { - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - err = mddev->pers->start_reshape(mddev); - } else { - /* - * If reshape is still in progress, and - * md_check_recovery() can continue to reshape, - * don't restart reshape because data can be - * corrupted for raid456. - */ - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - } - mddev_unlock(mddev); + ret = mddev_lock(mddev); + if (ret) + return ret; + + if (work_busy(&mddev->sync_work)) { + mddev_unlock(mddev); + goto retry; + } + + action = md_sync_action_by_name(page); + + /* TODO: mdadm rely on "idle" to start sync_thread. */ + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { + switch (action) { + case ACTION_FROZEN: + md_frozen_sync_thread(mddev); + ret = len; + goto out; + case ACTION_IDLE: + md_idle_sync_thread(mddev); + break; + case ACTION_RESHAPE: + case ACTION_RECOVER: + case ACTION_CHECK: + case ACTION_REPAIR: + case ACTION_RESYNC: + ret = -EBUSY; + goto out; + default: + ret = -EINVAL; + goto out; } - if (err) - return err; - sysfs_notify_dirent_safe(mddev->sysfs_degraded); } else { - if (cmd_match(page, "check")) + switch (action) { + case ACTION_FROZEN: + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + ret = len; + goto out; + case ACTION_RESHAPE: + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + ret = mddev_start_reshape(mddev); + if (ret) + goto out; + break; + case ACTION_RECOVER: + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); + break; + case ACTION_CHECK: set_bit(MD_RECOVERY_CHECK, &mddev->recovery); - else if (!cmd_match(page, "repair")) - return -EINVAL; - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); - set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + fallthrough; + case ACTION_REPAIR: + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + fallthrough; + case ACTION_RESYNC: + case ACTION_IDLE: + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + break; + default: + ret = -EINVAL; + goto out; + } } + if (mddev->ro == MD_AUTO_READ) { /* A write to sync_action is enough to justify * canceling read-auto mode */ - flush_work(&mddev->sync_work); mddev->ro = MD_RDWR; md_wakeup_thread(mddev->sync_thread); } + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); sysfs_notify_dirent_safe(mddev->sysfs_action); - return len; + ret = len; + +out: + mddev_unlock(mddev); + return ret; } static struct md_sysfs_entry md_scan_mode = @@ -5065,7 +5097,8 @@ __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); static ssize_t last_sync_action_show(struct mddev *mddev, char *page) { - return sprintf(page, "%s\n", mddev->last_sync_action); + return sprintf(page, "%s\n", + md_sync_action_name(mddev->last_sync_action)); } static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); @@ -5755,14 +5788,20 @@ static const struct kobj_type md_ktype = { int mdp_major = 0; /* stack the limit for all rdevs into lim */ -void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim) +int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, + unsigned int flags) { struct md_rdev *rdev; rdev_for_each(rdev, mddev) { queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset, mddev->gendisk->disk_name); + if ((flags & MDDEV_STACK_INTEGRITY) && + !queue_limits_stack_integrity_bdev(lim, rdev->bdev)) + return -EINVAL; } + + return 0; } EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits); @@ -5777,6 +5816,14 @@ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev) lim = queue_limits_start_update(mddev->gendisk->queue); queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset, mddev->gendisk->disk_name); + + if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) { + pr_err("%s: incompatible integrity profile for %pg\n", + mdname(mddev), rdev->bdev); + queue_limits_cancel_update(mddev->gendisk->queue); + return -ENXIO; + } + return queue_limits_commit_update(mddev->gendisk->queue, &lim); } EXPORT_SYMBOL_GPL(mddev_stack_new_rdev); @@ -5806,6 +5853,14 @@ static void mddev_delayed_delete(struct work_struct *ws) kobject_put(&mddev->kobj); } +void md_init_stacking_limits(struct queue_limits *lim) +{ + blk_set_stacking_limits(lim); + lim->features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | + BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; +} +EXPORT_SYMBOL_GPL(md_init_stacking_limits); + struct mddev *md_alloc(dev_t dev, char *name) { /* @@ -5823,7 +5878,7 @@ struct mddev *md_alloc(dev_t dev, char *name) int partitioned; int shift; int unit; - int error ; + int error; /* * Wait for any previous instance of this device to be completely @@ -5881,7 +5936,6 @@ struct mddev *md_alloc(dev_t dev, char *name) disk->fops = &md_fops; disk->private_data = mddev; - blk_queue_write_cache(disk->queue, true, true); disk->events |= DISK_EVENT_MEDIA_CHANGE; mddev->gendisk = disk; error = add_disk(disk); @@ -6185,28 +6239,6 @@ int md_run(struct mddev *mddev) } } - if (!mddev_is_dm(mddev)) { - struct request_queue *q = mddev->gendisk->queue; - bool nonrot = true; - - rdev_for_each(rdev, mddev) { - if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) { - nonrot = false; - break; - } - } - if (mddev->degraded) - nonrot = false; - if (nonrot) - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - else - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q); - - /* Set the NOWAIT flags if all underlying devices support it */ - if (nowait) - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); - } if (pers->sync_request) { if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_redundancy_group)) @@ -6437,7 +6469,7 @@ void md_stop_writes(struct mddev *mddev) { mddev_lock_nointr(mddev); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - stop_sync_thread(mddev, true, false); + stop_sync_thread(mddev, true); __md_stop_writes(mddev); mddev_unlock(mddev); } @@ -6505,7 +6537,7 @@ static int md_set_readonly(struct mddev *mddev) set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } - stop_sync_thread(mddev, false, false); + stop_sync_thread(mddev, false); wait_event(mddev->sb_wait, !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); @@ -6551,7 +6583,7 @@ static int do_md_stop(struct mddev *mddev, int mode) set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } - stop_sync_thread(mddev, true, false); + stop_sync_thread(mddev, true); if (mddev->sysfs_active || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { @@ -7166,15 +7198,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) if (!mddev->thread) md_update_sb(mddev, 1); /* - * If the new disk does not support REQ_NOWAIT, - * disable on the whole MD. - */ - if (!bdev_nowait(rdev->bdev)) { - pr_info("%s: Disabling nowait because %pg does not support nowait\n", - mdname(mddev), rdev->bdev); - blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue); - } - /* * Kick recovery, maybe this spare has to be added to the * array immediately. */ @@ -7742,12 +7765,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode, return get_bitmap_file(mddev, argp); } - if (cmd == HOT_REMOVE_DISK) - /* need to ensure recovery thread has run */ - wait_event_interruptible_timeout(mddev->sb_wait, - !test_bit(MD_RECOVERY_NEEDED, - &mddev->recovery), - msecs_to_jiffies(5000)); if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { /* Need to flush page cache, and ensure no-one else opens * and writes @@ -8520,7 +8537,7 @@ int unregister_md_personality(struct md_personality *p) } EXPORT_SYMBOL(unregister_md_personality); -int register_md_cluster_operations(struct md_cluster_operations *ops, +int register_md_cluster_operations(const struct md_cluster_operations *ops, struct module *module) { int ret = 0; @@ -8641,12 +8658,12 @@ EXPORT_SYMBOL(md_done_sync); * A return value of 'false' means that the write wasn't recorded * and cannot proceed as the array is being suspend. */ -bool md_write_start(struct mddev *mddev, struct bio *bi) +void md_write_start(struct mddev *mddev, struct bio *bi) { int did_change = 0; if (bio_data_dir(bi) != WRITE) - return true; + return; BUG_ON(mddev->ro == MD_RDONLY); if (mddev->ro == MD_AUTO_READ) { @@ -8679,15 +8696,9 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); if (!mddev->has_superblocks) - return true; + return; wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || - is_md_suspended(mddev)); - if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { - percpu_ref_put(&mddev->writes_pending); - return false; - } - return true; + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } EXPORT_SYMBOL(md_write_start); @@ -8835,6 +8846,77 @@ void md_allow_write(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_allow_write); +static sector_t md_sync_max_sectors(struct mddev *mddev, + enum sync_action action) +{ + switch (action) { + case ACTION_RESYNC: + case ACTION_CHECK: + case ACTION_REPAIR: + atomic64_set(&mddev->resync_mismatches, 0); + fallthrough; + case ACTION_RESHAPE: + return mddev->resync_max_sectors; + case ACTION_RECOVER: + return mddev->dev_sectors; + default: + return 0; + } +} + +static sector_t md_sync_position(struct mddev *mddev, enum sync_action action) +{ + sector_t start = 0; + struct md_rdev *rdev; + + switch (action) { + case ACTION_CHECK: + case ACTION_REPAIR: + return mddev->resync_min; + case ACTION_RESYNC: + if (!mddev->bitmap) + return mddev->recovery_cp; + return 0; + case ACTION_RESHAPE: + /* + * If the original node aborts reshaping then we continue the + * reshaping, so set again to avoid restart reshape from the + * first beginning + */ + if (mddev_is_clustered(mddev) && + mddev->reshape_position != MaxSector) + return mddev->reshape_position; + return 0; + case ACTION_RECOVER: + start = MaxSector; + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev) + if (rdev->raid_disk >= 0 && + !test_bit(Journal, &rdev->flags) && + !test_bit(Faulty, &rdev->flags) && + !test_bit(In_sync, &rdev->flags) && + rdev->recovery_offset < start) + start = rdev->recovery_offset; + rcu_read_unlock(); + + /* If there is a bitmap, we need to make sure all + * writes that started before we added a spare + * complete before we start doing a recovery. + * Otherwise the write might complete and (via + * bitmap_endwrite) set a bit in the bitmap after the + * recovery has checked that bit and skipped that + * region. + */ + if (mddev->bitmap) { + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + } + return start; + default: + return MaxSector; + } +} + #define SYNC_MARKS 10 #define SYNC_MARK_STEP (3*HZ) #define UPDATE_FREQUENCY (5*60*HZ) @@ -8851,7 +8933,8 @@ void md_do_sync(struct md_thread *thread) sector_t last_check; int skipped = 0; struct md_rdev *rdev; - char *desc, *action = NULL; + enum sync_action action; + const char *desc; struct blk_plug plug; int ret; @@ -8882,21 +8965,9 @@ void md_do_sync(struct md_thread *thread) goto skip; } - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { - if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { - desc = "data-check"; - action = "check"; - } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { - desc = "requested-resync"; - action = "repair"; - } else - desc = "resync"; - } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) - desc = "reshape"; - else - desc = "recovery"; - - mddev->last_sync_action = action ?: desc; + action = md_sync_action(mddev); + desc = md_sync_action_name(action); + mddev->last_sync_action = action; /* * Before starting a resync we must have set curr_resync to @@ -8964,56 +9035,8 @@ void md_do_sync(struct md_thread *thread) spin_unlock(&all_mddevs_lock); } while (mddev->curr_resync < MD_RESYNC_DELAYED); - j = 0; - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { - /* resync follows the size requested by the personality, - * which defaults to physical size, but can be virtual size - */ - max_sectors = mddev->resync_max_sectors; - atomic64_set(&mddev->resync_mismatches, 0); - /* we don't use the checkpoint if there's a bitmap */ - if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) - j = mddev->resync_min; - else if (!mddev->bitmap) - j = mddev->recovery_cp; - - } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { - max_sectors = mddev->resync_max_sectors; - /* - * If the original node aborts reshaping then we continue the - * reshaping, so set j again to avoid restart reshape from the - * first beginning - */ - if (mddev_is_clustered(mddev) && - mddev->reshape_position != MaxSector) - j = mddev->reshape_position; - } else { - /* recovery follows the physical size of devices */ - max_sectors = mddev->dev_sectors; - j = MaxSector; - rcu_read_lock(); - rdev_for_each_rcu(rdev, mddev) - if (rdev->raid_disk >= 0 && - !test_bit(Journal, &rdev->flags) && - !test_bit(Faulty, &rdev->flags) && - !test_bit(In_sync, &rdev->flags) && - rdev->recovery_offset < j) - j = rdev->recovery_offset; - rcu_read_unlock(); - - /* If there is a bitmap, we need to make sure all - * writes that started before we added a spare - * complete before we start doing a recovery. - * Otherwise the write might complete and (via - * bitmap_endwrite) set a bit in the bitmap after the - * recovery has checked that bit and skipped that - * region. - */ - if (mddev->bitmap) { - mddev->pers->quiesce(mddev, 1); - mddev->pers->quiesce(mddev, 0); - } - } + max_sectors = md_sync_max_sectors(mddev, action); + j = md_sync_position(mddev, action); pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); @@ -9095,7 +9118,8 @@ void md_do_sync(struct md_thread *thread) if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) break; - sectors = mddev->pers->sync_request(mddev, j, &skipped); + sectors = mddev->pers->sync_request(mddev, j, max_sectors, + &skipped); if (sectors == 0) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); break; @@ -9185,7 +9209,7 @@ void md_do_sync(struct md_thread *thread) mddev->curr_resync_completed = mddev->curr_resync; sysfs_notify_dirent_safe(mddev->sysfs_completed); } - mddev->pers->sync_request(mddev, max_sectors, &skipped); + mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped); if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && mddev->curr_resync > MD_RESYNC_ACTIVE) { diff --git a/drivers/md/md.h b/drivers/md/md.h index ca085ecad504..a0d6827dced9 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -34,6 +34,61 @@ */ #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) +/* Status of sync thread. */ +enum sync_action { + /* + * Represent by MD_RECOVERY_SYNC, start when: + * 1) after assemble, sync data from first rdev to other copies, this + * must be done first before other sync actions and will only execute + * once; + * 2) resize the array(notice that this is not reshape), sync data for + * the new range; + */ + ACTION_RESYNC, + /* + * Represent by MD_RECOVERY_RECOVER, start when: + * 1) for new replacement, sync data based on the replace rdev or + * available copies from other rdev; + * 2) for new member disk while the array is degraded, sync data from + * other rdev; + * 3) reassemble after power failure or re-add a hot removed rdev, sync + * data from first rdev to other copies based on bitmap; + */ + ACTION_RECOVER, + /* + * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED | + * MD_RECOVERY_CHECK, start when user echo "check" to sysfs api + * sync_action, used to check if data copies from differenct rdev are + * the same. The number of mismatch sectors will be exported to user + * by sysfs api mismatch_cnt; + */ + ACTION_CHECK, + /* + * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED, start when + * user echo "repair" to sysfs api sync_action, usually paired with + * ACTION_CHECK, used to force syncing data once user found that there + * are inconsistent data, + */ + ACTION_REPAIR, + /* + * Represent by MD_RECOVERY_RESHAPE, start when new member disk is added + * to the conf, notice that this is different from spares or + * replacement; + */ + ACTION_RESHAPE, + /* + * Represent by MD_RECOVERY_FROZEN, can be set by sysfs api sync_action + * or internal usage like setting the array read-only, will forbid above + * actions. + */ + ACTION_FROZEN, + /* + * All above actions don't match. + */ + ACTION_IDLE, + NR_SYNC_ACTIONS, +}; + /* * The struct embedded in rdev is used to serialize IO. */ @@ -371,13 +426,12 @@ struct mddev { struct md_thread __rcu *thread; /* management thread */ struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */ - /* 'last_sync_action' is initialized to "none". It is set when a - * sync operation (i.e "data-check", "requested-resync", "resync", - * "recovery", or "reshape") is started. It holds this value even + /* + * Set when a sync operation is started. It holds this value even * when the sync thread is "frozen" (interrupted) or "idle" (stopped - * or finished). It is overwritten when a new sync operation is begun. + * or finished). It is overwritten when a new sync operation is begun. */ - char *last_sync_action; + enum sync_action last_sync_action; sector_t curr_resync; /* last block scheduled */ /* As resync requests can complete out of order, we cannot easily track * how much resync has been completed. So we occasionally pause until @@ -540,8 +594,6 @@ struct mddev { */ struct list_head deleting; - /* Used to synchronize idle and frozen for action_store() */ - struct mutex sync_mutex; /* The sequence number for sync thread */ atomic_t sync_seq; @@ -551,22 +603,46 @@ struct mddev { }; enum recovery_flags { + /* flags for sync thread running status */ + + /* + * set when one of sync action is set and new sync thread need to be + * registered, or just add/remove spares from conf. + */ + MD_RECOVERY_NEEDED, + /* sync thread is running, or about to be started */ + MD_RECOVERY_RUNNING, + /* sync thread needs to be aborted for some reason */ + MD_RECOVERY_INTR, + /* sync thread is done and is waiting to be unregistered */ + MD_RECOVERY_DONE, + /* running sync thread must abort immediately, and not restart */ + MD_RECOVERY_FROZEN, + /* waiting for pers->start() to finish */ + MD_RECOVERY_WAIT, + /* interrupted because io-error */ + MD_RECOVERY_ERROR, + + /* flags determines sync action, see details in enum sync_action */ + + /* if just this flag is set, action is resync. */ + MD_RECOVERY_SYNC, + /* + * paired with MD_RECOVERY_SYNC, if MD_RECOVERY_CHECK is not set, + * action is repair, means user requested resync. + */ + MD_RECOVERY_REQUESTED, /* - * If neither SYNC or RESHAPE are set, then it is a recovery. + * paired with MD_RECOVERY_SYNC and MD_RECOVERY_REQUESTED, action is + * check. */ - MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */ - MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */ - MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */ - MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */ - MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */ - MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */ - MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */ - MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */ - MD_RECOVERY_RESHAPE, /* A reshape is happening */ - MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */ - MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */ - MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */ - MD_RESYNCING_REMOTE, /* remote node is running resync thread */ + MD_RECOVERY_CHECK, + /* recovery, or need to try it */ + MD_RECOVERY_RECOVER, + /* reshape */ + MD_RECOVERY_RESHAPE, + /* remote node is running resync thread */ + MD_RESYNCING_REMOTE, }; enum md_ro_state { @@ -653,7 +729,8 @@ struct md_personality int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev); int (*spare_active) (struct mddev *mddev); - sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped); + sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, + sector_t max_sector, int *skipped); int (*resize) (struct mddev *mddev, sector_t sectors); sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); int (*check_reshape) (struct mddev *mddev); @@ -772,7 +849,7 @@ static inline void safe_put_page(struct page *p) extern int register_md_personality(struct md_personality *p); extern int unregister_md_personality(struct md_personality *p); -extern int register_md_cluster_operations(struct md_cluster_operations *ops, +extern int register_md_cluster_operations(const struct md_cluster_operations *ops, struct module *module); extern int unregister_md_cluster_operations(void); extern int md_setup_cluster(struct mddev *mddev, int nodes); @@ -785,7 +862,10 @@ extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **t extern void md_wakeup_thread(struct md_thread __rcu *thread); extern void md_check_recovery(struct mddev *mddev); extern void md_reap_sync_thread(struct mddev *mddev); -extern bool md_write_start(struct mddev *mddev, struct bio *bi); +extern enum sync_action md_sync_action(struct mddev *mddev); +extern enum sync_action md_sync_action_by_name(const char *page); +extern const char *md_sync_action_name(enum sync_action action); +extern void md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); @@ -809,11 +889,11 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); extern int md_check_no_bitmap(struct mddev *mddev); extern int md_integrity_register(struct mddev *mddev); -extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); extern int mddev_init(struct mddev *mddev); extern void mddev_destroy(struct mddev *mddev); +void md_init_stacking_limits(struct queue_limits *lim); struct mddev *md_alloc(dev_t dev, char *name); void mddev_put(struct mddev *mddev); extern int md_run(struct mddev *mddev); @@ -852,7 +932,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) } } -extern struct md_cluster_operations *md_cluster_ops; +extern const struct md_cluster_operations *md_cluster_ops; static inline int mddev_is_clustered(struct mddev *mddev) { return mddev->cluster_info && mddev->bitmap_info.nodes > 1; @@ -908,7 +988,9 @@ void md_autostart_arrays(int part); int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info); int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info); int do_md_run(struct mddev *mddev); -void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim); +#define MDDEV_STACK_INTEGRITY (1u << 0) +int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, + unsigned int flags); int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev); void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c5d4aeb68404..32d587524778 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -365,30 +365,30 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks return array_sectors; } -static void free_conf(struct mddev *mddev, struct r0conf *conf) -{ - kfree(conf->strip_zone); - kfree(conf->devlist); - kfree(conf); -} - static void raid0_free(struct mddev *mddev, void *priv) { struct r0conf *conf = priv; - free_conf(mddev, conf); + kfree(conf->strip_zone); + kfree(conf->devlist); + kfree(conf); } static int raid0_set_limits(struct mddev *mddev) { struct queue_limits lim; + int err; - blk_set_stacking_limits(&lim); + md_init_stacking_limits(&lim); lim.max_hw_sectors = mddev->chunk_sectors; lim.max_write_zeroes_sectors = mddev->chunk_sectors; lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * mddev->raid_disks; - mddev_stack_rdev_limits(mddev, &lim); + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); + if (err) { + queue_limits_cancel_update(mddev->gendisk->queue); + return err; + } return queue_limits_set(mddev->gendisk->queue, &lim); } @@ -415,7 +415,7 @@ static int raid0_run(struct mddev *mddev) if (!mddev_is_dm(mddev)) { ret = raid0_set_limits(mddev); if (ret) - goto out_free_conf; + return ret; } /* calculate array device size */ @@ -427,13 +427,7 @@ static int raid0_run(struct mddev *mddev) dump_zones(mddev); - ret = md_integrity_register(mddev); - if (ret) - goto out_free_conf; - return 0; -out_free_conf: - free_conf(mddev, conf); - return ret; + return md_integrity_register(mddev); } /* diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7b8a71ca66dd..04a0c2ca1732 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1687,8 +1687,7 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio) if (bio_data_dir(bio) == READ) raid1_read_request(mddev, bio, sectors, NULL); else { - if (!md_write_start(mddev,bio)) - return false; + md_write_start(mddev,bio); raid1_write_request(mddev, bio, sectors); } return true; @@ -1907,9 +1906,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (mddev->recovery_disabled == conf->recovery_disabled) return -EBUSY; - if (md_integrity_add_rdev(rdev, mddev)) - return -ENXIO; - if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; @@ -2757,12 +2753,12 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) */ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, - int *skipped) + sector_t max_sector, int *skipped) { struct r1conf *conf = mddev->private; struct r1bio *r1_bio; struct bio *bio; - sector_t max_sector, nr_sectors; + sector_t nr_sectors; int disk = -1; int i; int wonly = -1; @@ -2778,7 +2774,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, if (init_resync(conf)) return 0; - max_sector = mddev->dev_sectors; if (sector_nr >= max_sector) { /* If we aborted, we need to abort the * sync on the 'current' bitmap chunk (there will @@ -3197,14 +3192,18 @@ static struct r1conf *setup_conf(struct mddev *mddev) static int raid1_set_limits(struct mddev *mddev) { struct queue_limits lim; + int err; - blk_set_stacking_limits(&lim); + md_init_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; - mddev_stack_rdev_limits(mddev, &lim); + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); + if (err) { + queue_limits_cancel_update(mddev->gendisk->queue); + return err; + } return queue_limits_set(mddev->gendisk->queue, &lim); } -static void raid1_free(struct mddev *mddev, void *priv); static int raid1_run(struct mddev *mddev) { struct r1conf *conf; @@ -3238,7 +3237,7 @@ static int raid1_run(struct mddev *mddev) if (!mddev_is_dm(mddev)) { ret = raid1_set_limits(mddev); if (ret) - goto abort; + return ret; } mddev->degraded = 0; @@ -3252,8 +3251,7 @@ static int raid1_run(struct mddev *mddev) */ if (conf->raid_disks - mddev->degraded < 1) { md_unregister_thread(mddev, &conf->thread); - ret = -EINVAL; - goto abort; + return -EINVAL; } if (conf->raid_disks - mddev->degraded == 1) @@ -3277,14 +3275,8 @@ static int raid1_run(struct mddev *mddev) md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); ret = md_integrity_register(mddev); - if (ret) { + if (ret) md_unregister_thread(mddev, &mddev->thread); - goto abort; - } - return 0; - -abort: - raid1_free(mddev, conf); return ret; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a4556d2e46bf..2a9c4ee982e0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1836,8 +1836,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio) && md_flush_request(mddev, bio)) return true; - if (!md_write_start(mddev, bio)) - return false; + md_write_start(mddev, bio); if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) if (!raid10_handle_discard(mddev, bio)) @@ -2083,9 +2082,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) return -EINVAL; - if (md_integrity_add_rdev(rdev, mddev)) - return -ENXIO; - if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; @@ -3140,12 +3136,12 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf) */ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, - int *skipped) + sector_t max_sector, int *skipped) { struct r10conf *conf = mddev->private; struct r10bio *r10_bio; struct bio *biolist = NULL, *bio; - sector_t max_sector, nr_sectors; + sector_t nr_sectors; int i; int max_sync; sector_t sync_blocks; @@ -3175,10 +3171,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, return 0; skipped: - max_sector = mddev->dev_sectors; - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || - test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) - max_sector = mddev->resync_max_sectors; if (sector_nr >= max_sector) { conf->cluster_sync_low = 0; conf->cluster_sync_high = 0; @@ -3980,12 +3972,17 @@ static int raid10_set_queue_limits(struct mddev *mddev) { struct r10conf *conf = mddev->private; struct queue_limits lim; + int err; - blk_set_stacking_limits(&lim); + md_init_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * raid10_nr_stripes(conf); - mddev_stack_rdev_limits(mddev, &lim); + err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); + if (err) { + queue_limits_cancel_update(mddev->gendisk->queue); + return err; + } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2bd1ce9b3922..c14cf2410365 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -155,7 +155,7 @@ static int raid6_idx_to_slot(int idx, struct stripe_head *sh, return slot; } -static void print_raid5_conf (struct r5conf *conf); +static void print_raid5_conf(struct r5conf *conf); static int stripe_operations_active(struct stripe_head *sh) { @@ -5899,6 +5899,39 @@ out: return ret; } +enum reshape_loc { + LOC_NO_RESHAPE, + LOC_AHEAD_OF_RESHAPE, + LOC_INSIDE_RESHAPE, + LOC_BEHIND_RESHAPE, +}; + +static enum reshape_loc get_reshape_loc(struct mddev *mddev, + struct r5conf *conf, sector_t logical_sector) +{ + sector_t reshape_progress, reshape_safe; + /* + * Spinlock is needed as reshape_progress may be + * 64bit on a 32bit platform, and so it might be + * possible to see a half-updated value + * Of course reshape_progress could change after + * the lock is dropped, so once we get a reference + * to the stripe that we think it is, we will have + * to check again. + */ + spin_lock_irq(&conf->device_lock); + reshape_progress = conf->reshape_progress; + reshape_safe = conf->reshape_safe; + spin_unlock_irq(&conf->device_lock); + if (reshape_progress == MaxSector) + return LOC_NO_RESHAPE; + if (ahead_of_reshape(mddev, logical_sector, reshape_progress)) + return LOC_AHEAD_OF_RESHAPE; + if (ahead_of_reshape(mddev, logical_sector, reshape_safe)) + return LOC_INSIDE_RESHAPE; + return LOC_BEHIND_RESHAPE; +} + static enum stripe_result make_stripe_request(struct mddev *mddev, struct r5conf *conf, struct stripe_request_ctx *ctx, sector_t logical_sector, struct bio *bi) @@ -5913,28 +5946,14 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, seq = read_seqcount_begin(&conf->gen_lock); if (unlikely(conf->reshape_progress != MaxSector)) { - /* - * Spinlock is needed as reshape_progress may be - * 64bit on a 32bit platform, and so it might be - * possible to see a half-updated value - * Of course reshape_progress could change after - * the lock is dropped, so once we get a reference - * to the stripe that we think it is, we will have - * to check again. - */ - spin_lock_irq(&conf->device_lock); - if (ahead_of_reshape(mddev, logical_sector, - conf->reshape_progress)) { - previous = 1; - } else { - if (ahead_of_reshape(mddev, logical_sector, - conf->reshape_safe)) { - spin_unlock_irq(&conf->device_lock); - ret = STRIPE_SCHEDULE_AND_RETRY; - goto out; - } + enum reshape_loc loc = get_reshape_loc(mddev, conf, + logical_sector); + if (loc == LOC_INSIDE_RESHAPE) { + ret = STRIPE_SCHEDULE_AND_RETRY; + goto out; } - spin_unlock_irq(&conf->device_lock); + if (loc == LOC_AHEAD_OF_RESHAPE) + previous = 1; } new_sector = raid5_compute_sector(conf, logical_sector, previous, @@ -6078,8 +6097,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ctx.do_flush = bi->bi_opf & REQ_PREFLUSH; } - if (!md_write_start(mddev, bi)) - return false; + md_write_start(mddev, bi); /* * If array is degraded, better not do chunk aligned read because * later we might have to read it again in order to reconstruct @@ -6113,8 +6131,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) /* Bail out if conflicts with reshape and REQ_NOWAIT is set */ if ((bi->bi_opf & REQ_NOWAIT) && (conf->reshape_progress != MaxSector) && - !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) && - ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) { + get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) { bio_wouldblock_error(bi); if (rw == WRITE) md_write_end(mddev); @@ -6255,7 +6272,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk safepos = conf->reshape_safe; sector_div(safepos, data_disks); if (mddev->reshape_backwards) { - BUG_ON(writepos < reshape_sectors); + if (WARN_ON(writepos < reshape_sectors)) + return MaxSector; + writepos -= reshape_sectors; readpos += reshape_sectors; safepos += reshape_sectors; @@ -6273,14 +6292,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk * to set 'stripe_addr' which is where we will write to. */ if (mddev->reshape_backwards) { - BUG_ON(conf->reshape_progress == 0); + if (WARN_ON(conf->reshape_progress == 0)) + return MaxSector; + stripe_addr = writepos; - BUG_ON((mddev->dev_sectors & - ~((sector_t)reshape_sectors - 1)) - - reshape_sectors - stripe_addr - != sector_nr); + if (WARN_ON((mddev->dev_sectors & + ~((sector_t)reshape_sectors - 1)) - + reshape_sectors - stripe_addr != sector_nr)) + return MaxSector; } else { - BUG_ON(writepos != sector_nr + reshape_sectors); + if (WARN_ON(writepos != sector_nr + reshape_sectors)) + return MaxSector; + stripe_addr = sector_nr; } @@ -6458,11 +6481,10 @@ ret: } static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, - int *skipped) + sector_t max_sector, int *skipped) { struct r5conf *conf = mddev->private; struct stripe_head *sh; - sector_t max_sector = mddev->dev_sectors; sector_t sync_blocks; int still_degraded = 0; int i; @@ -7082,12 +7104,14 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) err = -ENODEV; else if (new != conf->skip_copy) { struct request_queue *q = mddev->gendisk->queue; + struct queue_limits lim = queue_limits_start_update(q); conf->skip_copy = new; if (new) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); + lim.features |= BLK_FEAT_STABLE_WRITES; else - blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); + lim.features &= ~BLK_FEAT_STABLE_WRITES; + err = queue_limits_commit_update(q, &lim); } mddev_unlock_and_resume(mddev); return err ?: len; @@ -7562,11 +7586,11 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (test_bit(Replacement, &rdev->flags)) { if (disk->replacement) goto abort; - RCU_INIT_POINTER(disk->replacement, rdev); + disk->replacement = rdev; } else { if (disk->rdev) goto abort; - RCU_INIT_POINTER(disk->rdev, rdev); + disk->rdev = rdev; } if (test_bit(In_sync, &rdev->flags)) { @@ -7702,13 +7726,13 @@ static int raid5_set_limits(struct mddev *mddev) */ stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9)); - blk_set_stacking_limits(&lim); + md_init_stacking_limits(&lim); lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded); - lim.raid_partial_stripes_expensive = 1; + lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE; lim.discard_granularity = stripe; lim.max_write_zeroes_sectors = 0; - mddev_stack_rdev_limits(mddev, &lim); + mddev_stack_rdev_limits(mddev, &lim, 0); rdev_for_each(rdev, mddev) queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset, mddev->gendisk->disk_name); @@ -8048,7 +8072,7 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev) seq_printf (seq, "]"); } -static void print_raid5_conf (struct r5conf *conf) +static void print_raid5_conf(struct r5conf *conf) { struct md_rdev *rdev; int i; @@ -8062,15 +8086,13 @@ static void print_raid5_conf (struct r5conf *conf) conf->raid_disks, conf->raid_disks - conf->mddev->degraded); - rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { - rdev = rcu_dereference(conf->disks[i].rdev); + rdev = conf->disks[i].rdev; if (rdev) pr_debug(" disk %d, o:%d, dev:%pg\n", i, !test_bit(Faulty, &rdev->flags), rdev->bdev); } - rcu_read_unlock(); } static int raid5_spare_active(struct mddev *mddev) diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c index c8a33e1e910c..06090cc0a476 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c @@ -943,7 +943,7 @@ ipu6_isys_query_stream_by_source(struct ipu6_isys *isys, int source, u8 vc) return NULL; if (source < 0) { - dev_err(&stream->isys->adev->auxdev.dev, + dev_err(&isys->adev->auxdev.dev, "query stream with invalid port number\n"); return NULL; } diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c index 8b9b77719bb1..c4aff2e2009b 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c @@ -799,7 +799,7 @@ static int isys_register_devices(struct ipu6_isys *isys) isys->v4l2_dev.mdev = &isys->media_dev; isys->v4l2_dev.ctrl_handler = NULL; - ret = v4l2_device_register(&pdev->dev, &isys->v4l2_dev); + ret = v4l2_device_register(dev, &isys->v4l2_dev); if (ret < 0) goto out_media_device_unregister; diff --git a/drivers/media/pci/intel/ivsc/Kconfig b/drivers/media/pci/intel/ivsc/Kconfig index 407a800c81bc..a7d9607ecdc6 100644 --- a/drivers/media/pci/intel/ivsc/Kconfig +++ b/drivers/media/pci/intel/ivsc/Kconfig @@ -4,6 +4,7 @@ config INTEL_VSC tristate "Intel Visual Sensing Controller" depends on INTEL_MEI && ACPI && VIDEO_DEV + depends on IPU_BRIDGE || !IPU_BRIDGE select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select V4L2_FWNODE diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c index 502822059498..4ce76ce6dd4d 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.c +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -412,10 +412,9 @@ static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable) u32 val; int ret; - if (IS_V6(core)) { - ctrl = core->wrapper_base + WRAPPER_CORE_POWER_CONTROL_V6; - stat = core->wrapper_base + WRAPPER_CORE_POWER_STATUS_V6; - } else if (coreid == VIDC_CORE_ID_1) { + if (IS_V6(core)) + return dev_pm_genpd_set_hwmode(core->pmdomains->pd_devs[coreid], !enable); + else if (coreid == VIDC_CORE_ID_1) { ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL; stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS; } else { @@ -451,9 +450,11 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask) vcodec_clks_disable(core, core->vcodec0_clks); - ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false); - if (ret) - return ret; + if (!IS_V6(core)) { + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false); + if (ret) + return ret; + } ret = pm_runtime_put_sync(core->pmdomains->pd_devs[1]); if (ret < 0) @@ -467,9 +468,11 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask) vcodec_clks_disable(core, core->vcodec1_clks); - ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false); - if (ret) - return ret; + if (!IS_V6(core)) { + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false); + if (ret) + return ret; + } ret = pm_runtime_put_sync(core->pmdomains->pd_devs[2]); if (ret < 0) @@ -488,9 +491,11 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask) if (ret < 0) return ret; - ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true); - if (ret) - return ret; + if (!IS_V6(core)) { + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true); + if (ret) + return ret; + } ret = vcodec_clks_enable(core, core->vcodec0_clks); if (ret) @@ -506,9 +511,11 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask) if (ret < 0) return ret; - ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true); - if (ret) - return ret; + if (!IS_V6(core)) { + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true); + if (ret) + return ret; + } ret = vcodec_clks_enable(core, core->vcodec1_clks); if (ret) diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 8efdd1f97139..c82d8d8a16ea 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -167,7 +167,7 @@ config FSL_CORENET_CF represents a coherency violation. config FSL_IFC - bool "Freescale IFC driver" if COMPILE_TEST + bool "Freescale IFC driver" depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST depends on HAS_IOMEM diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig index 4113343da056..fcd2c2cc3cb4 100644 --- a/drivers/memstick/host/Kconfig +++ b/drivers/memstick/host/Kconfig @@ -44,16 +44,6 @@ config MEMSTICK_R592 To compile this driver as a module, choose M here: the module will be called r592. -config MEMSTICK_REALTEK_PCI - tristate "Realtek PCI-E Memstick Card Interface Driver" - depends on MISC_RTSX_PCI - help - Say Y here to include driver code to support Memstick card interface - of Realtek PCI-E card reader - - To compile this driver as a module, choose M here: the module will - be called rtsx_pci_ms. - config MEMSTICK_REALTEK_USB tristate "Realtek USB Memstick Card Interface Driver" depends on MISC_RTSX_USB diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile index 1abaa03ee68c..0c90df33165d 100644 --- a/drivers/memstick/host/Makefile +++ b/drivers/memstick/host/Makefile @@ -6,5 +6,4 @@ obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o obj-$(CONFIG_MEMSTICK_R592) += r592.o -obj-$(CONFIG_MEMSTICK_REALTEK_PCI) += rtsx_pci_ms.o obj-$(CONFIG_MEMSTICK_REALTEK_USB) += rtsx_usb_ms.o diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c deleted file mode 100644 index 980a54513e6c..000000000000 --- a/drivers/memstick/host/rtsx_pci_ms.c +++ /dev/null @@ -1,638 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* Realtek PCI-Express Memstick Card Interface driver - * - * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. - * - * Author: - * Wei WANG <wei_wang@realsil.com.cn> - */ - -#include <linux/module.h> -#include <linux/highmem.h> -#include <linux/delay.h> -#include <linux/platform_device.h> -#include <linux/memstick.h> -#include <linux/rtsx_pci.h> -#include <asm/unaligned.h> - -struct realtek_pci_ms { - struct platform_device *pdev; - struct rtsx_pcr *pcr; - struct memstick_host *msh; - struct memstick_request *req; - - struct mutex host_mutex; - struct work_struct handle_req; - - u8 ssc_depth; - unsigned int clock; - unsigned char ifmode; - bool eject; -}; - -static inline struct device *ms_dev(struct realtek_pci_ms *host) -{ - return &(host->pdev->dev); -} - -static inline void ms_clear_error(struct realtek_pci_ms *host) -{ - rtsx_pci_write_register(host->pcr, CARD_STOP, - MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR); -} - -#ifdef DEBUG - -static void ms_print_debug_regs(struct realtek_pci_ms *host) -{ - struct rtsx_pcr *pcr = host->pcr; - u16 i; - u8 *ptr; - - /* Print MS host internal registers */ - rtsx_pci_init_cmd(pcr); - for (i = 0xFD40; i <= 0xFD44; i++) - rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); - for (i = 0xFD52; i <= 0xFD69; i++) - rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); - rtsx_pci_send_cmd(pcr, 100); - - ptr = rtsx_pci_get_cmd_data(pcr); - for (i = 0xFD40; i <= 0xFD44; i++) - dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); - for (i = 0xFD52; i <= 0xFD69; i++) - dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); -} - -#else - -#define ms_print_debug_regs(host) - -#endif - -static int ms_power_on(struct realtek_pci_ms *host) -{ - struct rtsx_pcr *pcr = host->pcr; - int err; - - rtsx_pci_init_cmd(pcr); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, MS_MOD_SEL); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE, - CARD_SHARE_MASK, CARD_SHARE_48_MS); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, - MS_CLK_EN, MS_CLK_EN); - err = rtsx_pci_send_cmd(pcr, 100); - if (err < 0) - return err; - - err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_MS_CARD); - if (err < 0) - return err; - - err = rtsx_pci_card_power_on(pcr, RTSX_MS_CARD); - if (err < 0) - return err; - - /* Wait ms power stable */ - msleep(150); - - err = rtsx_pci_write_register(pcr, CARD_OE, - MS_OUTPUT_EN, MS_OUTPUT_EN); - if (err < 0) - return err; - - return 0; -} - -static int ms_power_off(struct realtek_pci_ms *host) -{ - struct rtsx_pcr *pcr = host->pcr; - int err; - - rtsx_pci_init_cmd(pcr); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0); - - err = rtsx_pci_send_cmd(pcr, 100); - if (err < 0) - return err; - - err = rtsx_pci_card_power_off(pcr, RTSX_MS_CARD); - if (err < 0) - return err; - - return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD); -} - -static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir, - u8 tpc, u8 cfg, struct scatterlist *sg) -{ - struct rtsx_pcr *pcr = host->pcr; - int err; - unsigned int length = sg->length; - u16 sec_cnt = (u16)(length / 512); - u8 val, trans_mode, dma_dir; - struct memstick_dev *card = host->msh->card; - bool pro_card = card->id.type == MEMSTICK_TYPE_PRO; - - dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n", - __func__, tpc, (data_dir == READ) ? "READ" : "WRITE", - length); - - if (data_dir == READ) { - dma_dir = DMA_DIR_FROM_CARD; - trans_mode = pro_card ? MS_TM_AUTO_READ : MS_TM_NORMAL_READ; - } else { - dma_dir = DMA_DIR_TO_CARD; - trans_mode = pro_card ? MS_TM_AUTO_WRITE : MS_TM_NORMAL_WRITE; - } - - rtsx_pci_init_cmd(pcr); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); - if (pro_card) { - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H, - 0xFF, (u8)(sec_cnt >> 8)); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L, - 0xFF, (u8)sec_cnt); - } - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, - DMA_DONE_INT, DMA_DONE_INT); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, 0xFF, (u8)(length >> 24)); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, 0xFF, (u8)(length >> 16)); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, 0xFF, (u8)(length >> 8)); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)length); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, - 0x03 | DMA_PACK_SIZE_MASK, dma_dir | DMA_EN | DMA_512); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, RING_BUFFER); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER, - 0xFF, MS_TRANSFER_START | trans_mode); - rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER, - MS_TRANSFER_END, MS_TRANSFER_END); - - rtsx_pci_send_cmd_no_wait(pcr); - - err = rtsx_pci_transfer_data(pcr, sg, 1, data_dir == READ, 10000); - if (err < 0) { - ms_clear_error(host); - return err; - } - - rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); - if (pro_card) { - if (val & (MS_INT_CMDNK | MS_INT_ERR | - MS_CRC16_ERR | MS_RDY_TIMEOUT)) - return -EIO; - } else { - if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) - return -EIO; - } - - return 0; -} - -static int ms_write_bytes(struct realtek_pci_ms *host, u8 tpc, - u8 cfg, u8 cnt, u8 *data, u8 *int_reg) -{ - struct rtsx_pcr *pcr = host->pcr; - int err, i; - - dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); - - if (!data) - return -EINVAL; - - rtsx_pci_init_cmd(pcr); - - for (i = 0; i < cnt; i++) - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, - PPBUF_BASE2 + i, 0xFF, data[i]); - if (cnt % 2) - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, - PPBUF_BASE2 + i, 0xFF, 0xFF); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, PINGPONG_BUFFER); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER, - 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES); - rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER, - MS_TRANSFER_END, MS_TRANSFER_END); - if (int_reg) - rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0); - - err = rtsx_pci_send_cmd(pcr, 5000); - if (err < 0) { - u8 val; - - rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); - dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); - - if (int_reg) - *int_reg = val & 0x0F; - - ms_print_debug_regs(host); - - ms_clear_error(host); - - if (!(tpc & 0x08)) { - if (val & MS_CRC16_ERR) - return -EIO; - } else { - if (!(val & 0x80)) { - if (val & (MS_INT_ERR | MS_INT_CMDNK)) - return -EIO; - } - } - - return -ETIMEDOUT; - } - - if (int_reg) { - u8 *ptr = rtsx_pci_get_cmd_data(pcr) + 1; - *int_reg = *ptr & 0x0F; - } - - return 0; -} - -static int ms_read_bytes(struct realtek_pci_ms *host, u8 tpc, - u8 cfg, u8 cnt, u8 *data, u8 *int_reg) -{ - struct rtsx_pcr *pcr = host->pcr; - int err, i; - u8 *ptr; - - dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); - - if (!data) - return -EINVAL; - - rtsx_pci_init_cmd(pcr); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, - 0x01, PINGPONG_BUFFER); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER, - 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES); - rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER, - MS_TRANSFER_END, MS_TRANSFER_END); - for (i = 0; i < cnt - 1; i++) - rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0); - if (cnt % 2) - rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + cnt, 0, 0); - else - rtsx_pci_add_cmd(pcr, READ_REG_CMD, - PPBUF_BASE2 + cnt - 1, 0, 0); - if (int_reg) - rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0); - - err = rtsx_pci_send_cmd(pcr, 5000); - if (err < 0) { - u8 val; - - rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); - dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); - - if (int_reg) - *int_reg = val & 0x0F; - - ms_print_debug_regs(host); - - ms_clear_error(host); - - if (!(tpc & 0x08)) { - if (val & MS_CRC16_ERR) - return -EIO; - } else { - if (!(val & 0x80)) { - if (val & (MS_INT_ERR | MS_INT_CMDNK)) - return -EIO; - } - } - - return -ETIMEDOUT; - } - - ptr = rtsx_pci_get_cmd_data(pcr) + 1; - for (i = 0; i < cnt; i++) - data[i] = *ptr++; - - if (int_reg) - *int_reg = *ptr & 0x0F; - - return 0; -} - -static int rtsx_pci_ms_issue_cmd(struct realtek_pci_ms *host) -{ - struct memstick_request *req = host->req; - int err = 0; - u8 cfg = 0, int_reg; - - dev_dbg(ms_dev(host), "%s\n", __func__); - - if (req->need_card_int) { - if (host->ifmode != MEMSTICK_SERIAL) - cfg = WAIT_INT; - } - - if (req->long_data) { - err = ms_transfer_data(host, req->data_dir, - req->tpc, cfg, &(req->sg)); - } else { - if (req->data_dir == READ) { - err = ms_read_bytes(host, req->tpc, cfg, - req->data_len, req->data, &int_reg); - } else { - err = ms_write_bytes(host, req->tpc, cfg, - req->data_len, req->data, &int_reg); - } - } - if (err < 0) - return err; - - if (req->need_card_int && (host->ifmode == MEMSTICK_SERIAL)) { - err = ms_read_bytes(host, MS_TPC_GET_INT, - NO_WAIT_INT, 1, &int_reg, NULL); - if (err < 0) - return err; - } - - if (req->need_card_int) { - dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", int_reg); - - if (int_reg & MS_INT_CMDNK) - req->int_reg |= MEMSTICK_INT_CMDNAK; - if (int_reg & MS_INT_BREQ) - req->int_reg |= MEMSTICK_INT_BREQ; - if (int_reg & MS_INT_ERR) - req->int_reg |= MEMSTICK_INT_ERR; - if (int_reg & MS_INT_CED) - req->int_reg |= MEMSTICK_INT_CED; - } - - return 0; -} - -static void rtsx_pci_ms_handle_req(struct work_struct *work) -{ - struct realtek_pci_ms *host = container_of(work, - struct realtek_pci_ms, handle_req); - struct rtsx_pcr *pcr = host->pcr; - struct memstick_host *msh = host->msh; - int rc; - - mutex_lock(&pcr->pcr_mutex); - - rtsx_pci_start_run(pcr); - - rtsx_pci_switch_clock(host->pcr, host->clock, host->ssc_depth, - false, true, false); - rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, MS_MOD_SEL); - rtsx_pci_write_register(pcr, CARD_SHARE_MODE, - CARD_SHARE_MASK, CARD_SHARE_48_MS); - - if (!host->req) { - do { - rc = memstick_next_req(msh, &host->req); - dev_dbg(ms_dev(host), "next req %d\n", rc); - - if (!rc) - host->req->error = rtsx_pci_ms_issue_cmd(host); - } while (!rc); - } - - mutex_unlock(&pcr->pcr_mutex); -} - -static void rtsx_pci_ms_request(struct memstick_host *msh) -{ - struct realtek_pci_ms *host = memstick_priv(msh); - - dev_dbg(ms_dev(host), "--> %s\n", __func__); - - if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD)) - return; - - schedule_work(&host->handle_req); -} - -static int rtsx_pci_ms_set_param(struct memstick_host *msh, - enum memstick_param param, int value) -{ - struct realtek_pci_ms *host = memstick_priv(msh); - struct rtsx_pcr *pcr = host->pcr; - unsigned int clock = 0; - u8 ssc_depth = 0; - int err; - - dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", - __func__, param, value); - - err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD); - if (err) - return err; - - switch (param) { - case MEMSTICK_POWER: - if (value == MEMSTICK_POWER_ON) - err = ms_power_on(host); - else if (value == MEMSTICK_POWER_OFF) - err = ms_power_off(host); - else - return -EINVAL; - break; - - case MEMSTICK_INTERFACE: - if (value == MEMSTICK_SERIAL) { - clock = 19000000; - ssc_depth = RTSX_SSC_DEPTH_500K; - - err = rtsx_pci_write_register(pcr, MS_CFG, 0x58, - MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT); - if (err < 0) - return err; - } else if (value == MEMSTICK_PAR4) { - clock = 39000000; - ssc_depth = RTSX_SSC_DEPTH_1M; - - err = rtsx_pci_write_register(pcr, MS_CFG, - 0x58, MS_BUS_WIDTH_4 | PUSH_TIME_ODD); - if (err < 0) - return err; - } else { - return -EINVAL; - } - - err = rtsx_pci_switch_clock(pcr, clock, - ssc_depth, false, true, false); - if (err < 0) - return err; - - host->ssc_depth = ssc_depth; - host->clock = clock; - host->ifmode = value; - break; - } - - return 0; -} - -#ifdef CONFIG_PM - -static int rtsx_pci_ms_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct realtek_pci_ms *host = platform_get_drvdata(pdev); - struct memstick_host *msh = host->msh; - - dev_dbg(ms_dev(host), "--> %s\n", __func__); - - memstick_suspend_host(msh); - return 0; -} - -static int rtsx_pci_ms_resume(struct platform_device *pdev) -{ - struct realtek_pci_ms *host = platform_get_drvdata(pdev); - struct memstick_host *msh = host->msh; - - dev_dbg(ms_dev(host), "--> %s\n", __func__); - - memstick_resume_host(msh); - return 0; -} - -#else /* CONFIG_PM */ - -#define rtsx_pci_ms_suspend NULL -#define rtsx_pci_ms_resume NULL - -#endif /* CONFIG_PM */ - -static void rtsx_pci_ms_card_event(struct platform_device *pdev) -{ - struct realtek_pci_ms *host = platform_get_drvdata(pdev); - - memstick_detect_change(host->msh); -} - -static int rtsx_pci_ms_drv_probe(struct platform_device *pdev) -{ - struct memstick_host *msh; - struct realtek_pci_ms *host; - struct rtsx_pcr *pcr; - struct pcr_handle *handle = pdev->dev.platform_data; - int rc; - - if (!handle) - return -ENXIO; - - pcr = handle->pcr; - if (!pcr) - return -ENXIO; - - dev_dbg(&(pdev->dev), - ": Realtek PCI-E Memstick controller found\n"); - - msh = memstick_alloc_host(sizeof(*host), &pdev->dev); - if (!msh) - return -ENOMEM; - - host = memstick_priv(msh); - host->pcr = pcr; - host->msh = msh; - host->pdev = pdev; - platform_set_drvdata(pdev, host); - pcr->slots[RTSX_MS_CARD].p_dev = pdev; - pcr->slots[RTSX_MS_CARD].card_event = rtsx_pci_ms_card_event; - - mutex_init(&host->host_mutex); - - INIT_WORK(&host->handle_req, rtsx_pci_ms_handle_req); - msh->request = rtsx_pci_ms_request; - msh->set_param = rtsx_pci_ms_set_param; - msh->caps = MEMSTICK_CAP_PAR4; - - rc = memstick_add_host(msh); - if (rc) { - memstick_free_host(msh); - return rc; - } - - return 0; -} - -static void rtsx_pci_ms_drv_remove(struct platform_device *pdev) -{ - struct realtek_pci_ms *host = platform_get_drvdata(pdev); - struct rtsx_pcr *pcr; - struct memstick_host *msh; - int rc; - - pcr = host->pcr; - pcr->slots[RTSX_MS_CARD].p_dev = NULL; - pcr->slots[RTSX_MS_CARD].card_event = NULL; - msh = host->msh; - host->eject = true; - cancel_work_sync(&host->handle_req); - - mutex_lock(&host->host_mutex); - if (host->req) { - dev_dbg(&(pdev->dev), - "%s: Controller removed during transfer\n", - dev_name(&msh->dev)); - - rtsx_pci_complete_unfinished_transfer(pcr); - - host->req->error = -ENOMEDIUM; - do { - rc = memstick_next_req(msh, &host->req); - if (!rc) - host->req->error = -ENOMEDIUM; - } while (!rc); - } - mutex_unlock(&host->host_mutex); - - memstick_remove_host(msh); - memstick_free_host(msh); - - dev_dbg(&(pdev->dev), - ": Realtek PCI-E Memstick controller has been removed\n"); -} - -static struct platform_device_id rtsx_pci_ms_ids[] = { - { - .name = DRV_NAME_RTSX_PCI_MS, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(platform, rtsx_pci_ms_ids); - -static struct platform_driver rtsx_pci_ms_driver = { - .probe = rtsx_pci_ms_drv_probe, - .remove_new = rtsx_pci_ms_drv_remove, - .id_table = rtsx_pci_ms_ids, - .suspend = rtsx_pci_ms_suspend, - .resume = rtsx_pci_ms_resume, - .driver = { - .name = DRV_NAME_RTSX_PCI_MS, - }, -}; -module_platform_driver(rtsx_pci_ms_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); -MODULE_DESCRIPTION("Realtek PCI-E Memstick Card Host Driver"); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 4c67e2c5a82e..a7a2bcedb37e 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1238,6 +1238,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, struct fastrpc_phy_page pages[1]; char *name; int err; + bool scm_done = false; struct { int pgid; u32 namelen; @@ -1289,6 +1290,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); goto err_map; } + scm_done = true; } } @@ -1320,10 +1322,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, goto err_invoke; kfree(args); + kfree(name); return 0; err_invoke: - if (fl->cctx->vmcount) { + if (fl->cctx->vmcount && scm_done) { u64 src_perms = 0; struct qcom_scm_vmperm dst_perms; u32 i; @@ -1693,16 +1696,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr { struct fastrpc_invoke_args args[2] = { 0 }; - /* Capability filled in userspace */ + /* + * Capability filled in userspace. This carries the information + * about the remoteproc support which is fetched from the remoteproc + * sysfs node by userspace. + */ dsp_attr_buf[0] = 0; + dsp_attr_buf_len -= 1; args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len; args[0].length = sizeof(dsp_attr_buf_len); args[0].fd = -1; args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1]; - args[1].length = dsp_attr_buf_len; + args[1].length = dsp_attr_buf_len * sizeof(u32); args[1].fd = -1; - fl->pd = USER_PD; return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE, FASTRPC_SCALARS(0, 1, 1), args); @@ -1730,7 +1737,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap, if (!dsp_attributes) return -ENOMEM; - err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN); + err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES); if (err == DSP_UNSUPPORTED_API) { dev_info(&cctx->rpdev->dev, "Warning: DSP capabilities not supported on domain: %d\n", domain); @@ -1783,7 +1790,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) if (err) return err; - if (copy_to_user(argp, &cap.capability, sizeof(cap.capability))) + if (copy_to_user(argp, &cap, sizeof(cap))) return -EFAULT; return 0; @@ -2080,6 +2087,16 @@ err_invoke: return err; } +static int is_attach_rejected(struct fastrpc_user *fl) +{ + /* Check if the device node is non-secure */ + if (!fl->is_secure_dev) { + dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n"); + return -EACCES; + } + return 0; +} + static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -2092,13 +2109,19 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, err = fastrpc_invoke(fl, argp); break; case FASTRPC_IOCTL_INIT_ATTACH: - err = fastrpc_init_attach(fl, ROOT_PD); + err = is_attach_rejected(fl); + if (!err) + err = fastrpc_init_attach(fl, ROOT_PD); break; case FASTRPC_IOCTL_INIT_ATTACH_SNS: - err = fastrpc_init_attach(fl, SENSORS_PD); + err = is_attach_rejected(fl); + if (!err) + err = fastrpc_init_attach(fl, SENSORS_PD); break; case FASTRPC_IOCTL_INIT_CREATE_STATIC: - err = fastrpc_init_create_static_process(fl, argp); + err = is_attach_rejected(fl); + if (!err) + err = fastrpc_init_create_static_process(fl, argp); break; case FASTRPC_IOCTL_INIT_CREATE: err = fastrpc_init_create_process(fl, argp); diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 5178c02b21eb..62ba01525479 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -286,6 +286,35 @@ static void lkdtm_HARDLOCKUP(void) cpu_relax(); } +static void __lkdtm_SMP_CALL_LOCKUP(void *unused) +{ + for (;;) + cpu_relax(); +} + +static void lkdtm_SMP_CALL_LOCKUP(void) +{ + unsigned int cpu, target; + + cpus_read_lock(); + + cpu = get_cpu(); + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pr_err("FAIL: no other online CPUs\n"); + goto out_put_cpus; + } + + smp_call_function_single(target, __lkdtm_SMP_CALL_LOCKUP, NULL, 1); + + pr_err("FAIL: did not hang\n"); + +out_put_cpus: + put_cpu(); + cpus_read_unlock(); +} + static void lkdtm_SPINLOCKUP(void) { /* Must be called twice to trigger. */ @@ -680,6 +709,7 @@ static struct crashtype crashtypes[] = { CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), CRASHTYPE(SOFTLOCKUP), CRASHTYPE(HARDLOCKUP), + CRASHTYPE(SMP_CALL_LOCKUP), CRASHTYPE(SPINLOCKUP), CRASHTYPE(HUNG_TASK), CRASHTYPE(OVERFLOW_SIGNED), diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c index 16695cb5e69c..7c3d8bedf90b 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c @@ -153,7 +153,6 @@ static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off, buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); } - ret = byte; error: release_sys_lock(priv); return ret; @@ -197,7 +196,6 @@ static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off, goto error; } } - ret = byte; error: release_sys_lock(priv); return ret; @@ -258,7 +256,6 @@ static int pci1xxxx_otp_read(void *priv_t, unsigned int off, buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET)); } - ret = byte; error: release_sys_lock(priv); return ret; @@ -315,7 +312,6 @@ static int pci1xxxx_otp_write(void *priv_t, unsigned int off, goto error; } } - ret = byte; error: release_sys_lock(priv); return ret; diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index 1ec65d87488a..d02f6e881139 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -28,8 +28,8 @@ #define MEI_VSC_MAX_MSG_SIZE 512 -#define MEI_VSC_POLL_DELAY_US (50 * USEC_PER_MSEC) -#define MEI_VSC_POLL_TIMEOUT_US (200 * USEC_PER_MSEC) +#define MEI_VSC_POLL_DELAY_US (100 * USEC_PER_MSEC) +#define MEI_VSC_POLL_TIMEOUT_US (400 * USEC_PER_MSEC) #define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw)) diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c index 596a9d695dfc..084d0205f97d 100644 --- a/drivers/misc/mei/vsc-fw-loader.c +++ b/drivers/misc/mei/vsc-fw-loader.c @@ -204,7 +204,7 @@ struct vsc_img_frag { /** * struct vsc_fw_loader - represent vsc firmware loader - * @dev: device used to request fimware + * @dev: device used to request firmware * @tp: transport layer used with the firmware loader * @csi: CSI image * @ace: ACE image diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index e6a98dba8a73..1618cca9a731 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -331,12 +331,12 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) return ret; } - ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len); + ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len); if (ret) return ret; if (ibuf) - cpu_to_be32_array(ibuf, tp->rx_buf, words); + be32_to_cpu_array(ibuf, tp->rx_buf, words); return ret; } @@ -568,6 +568,19 @@ static void vsc_tp_remove(struct spi_device *spi) free_irq(spi->irq, tp); } +static void vsc_tp_shutdown(struct spi_device *spi) +{ + struct vsc_tp *tp = spi_get_drvdata(spi); + + platform_device_unregister(tp->pdev); + + mutex_destroy(&tp->mutex); + + vsc_tp_reset(tp); + + free_irq(spi->irq, tp); +} + static const struct acpi_device_id vsc_tp_acpi_ids[] = { { "INTC1009" }, /* Raptor Lake */ { "INTC1058" }, /* Tiger Lake */ @@ -580,6 +593,7 @@ MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids); static struct spi_driver vsc_tp_driver = { .probe = vsc_tp_probe, .remove = vsc_tp_remove, + .shutdown = vsc_tp_shutdown, .driver = { .name = "vsc-tp", .acpi_match_table = vsc_tp_acpi_ids, diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 367509b5b646..2c9963248fcb 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2466,8 +2466,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, struct mmc_blk_data *md; int devidx, ret; char cap_str[10]; - bool cache_enabled = false; - bool fua_enabled = false; + unsigned int features = 0; devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL); if (devidx < 0) { @@ -2499,7 +2498,24 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, */ md->read_only = mmc_blk_readonly(card); - md->disk = mmc_init_queue(&md->queue, card); + if (mmc_host_cmd23(card->host)) { + if ((mmc_card_mmc(card) && + card->csd.mmca_vsn >= CSD_SPEC_VER_3) || + (mmc_card_sd(card) && + card->scr.cmds & SD_SCR_CMD23_SUPPORT)) + md->flags |= MMC_BLK_CMD23; + } + + if (md->flags & MMC_BLK_CMD23 && + ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || + card->ext_csd.rel_sectors)) { + md->flags |= MMC_BLK_REL_WR; + features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); + } else if (mmc_cache_enabled(card->host)) { + features |= BLK_FEAT_WRITE_CACHE; + } + + md->disk = mmc_init_queue(&md->queue, card, features); if (IS_ERR(md->disk)) { ret = PTR_ERR(md->disk); goto err_kfree; @@ -2539,26 +2555,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, set_capacity(md->disk, size); - if (mmc_host_cmd23(card->host)) { - if ((mmc_card_mmc(card) && - card->csd.mmca_vsn >= CSD_SPEC_VER_3) || - (mmc_card_sd(card) && - card->scr.cmds & SD_SCR_CMD23_SUPPORT)) - md->flags |= MMC_BLK_CMD23; - } - - if (md->flags & MMC_BLK_CMD23 && - ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || - card->ext_csd.rel_sectors)) { - md->flags |= MMC_BLK_REL_WR; - fua_enabled = true; - cache_enabled = true; - } - if (mmc_cache_enabled(card->host)) - cache_enabled = true; - - blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled); - string_get_size((u64)size, 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s %s%s\n", diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index a8c17b4cd737..d6c819dd68ed 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2362,4 +2362,5 @@ static void __exit mmc_exit(void) subsys_initcall(mmc_init); module_exit(mmc_exit); +MODULE_DESCRIPTION("MMC core driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c index 3b6d69cefb4e..96fa4c508900 100644 --- a/drivers/mmc/core/pwrseq_emmc.c +++ b/drivers/mmc/core/pwrseq_emmc.c @@ -115,4 +115,5 @@ static struct platform_driver mmc_pwrseq_emmc_driver = { }; module_platform_driver(mmc_pwrseq_emmc_driver); +MODULE_DESCRIPTION("Hardware reset support for eMMC"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c index 0c5808fc3206..f24bbd68e251 100644 --- a/drivers/mmc/core/pwrseq_sd8787.c +++ b/drivers/mmc/core/pwrseq_sd8787.c @@ -130,4 +130,5 @@ static struct platform_driver mmc_pwrseq_sd8787_driver = { }; module_platform_driver(mmc_pwrseq_sd8787_driver); +MODULE_DESCRIPTION("Power sequence support for Marvell SD8787 BT + Wifi chip"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index df9588503ad0..154a8921ae75 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c @@ -159,4 +159,5 @@ static struct platform_driver mmc_pwrseq_simple_driver = { }; module_platform_driver(mmc_pwrseq_simple_driver); +MODULE_DESCRIPTION("Simple power sequence management for MMC"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 241cdc2b2a2a..d0b3ca8a11f0 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -344,10 +344,12 @@ static const struct blk_mq_ops mmc_mq_ops = { }; static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, - struct mmc_card *card) + struct mmc_card *card, unsigned int features) { struct mmc_host *host = card->host; - struct queue_limits lim = { }; + struct queue_limits lim = { + .features = features, + }; struct gendisk *disk; if (mmc_can_erase(card)) @@ -376,18 +378,16 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, lim.max_segments = host->max_segs; } + if (mmc_host_is_spi(host) && host->use_spi_crc) + lim.features |= BLK_FEAT_STABLE_WRITES; + disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq); if (IS_ERR(disk)) return disk; mq->queue = disk->queue; - if (mmc_host_is_spi(host) && host->use_spi_crc) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); blk_queue_rq_timeout(mq->queue, 60 * HZ); - blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); - dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); @@ -413,10 +413,12 @@ static inline bool mmc_merge_capable(struct mmc_host *host) * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue + * @features: block layer features (BLK_FEAT_*) * * Initialise a MMC card request queue. */ -struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) +struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, + unsigned int features) { struct mmc_host *host = card->host; struct gendisk *disk; @@ -460,7 +462,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) return ERR_PTR(ret); - disk = mmc_alloc_disk(mq, card); + disk = mmc_alloc_disk(mq, card, features); if (IS_ERR(disk)) blk_mq_free_tag_set(&mq->tag_set); return disk; diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 9ade3bcbb714..1498840a4ea0 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -94,7 +94,8 @@ struct mmc_queue { struct work_struct complete_work; }; -struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card); +struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, + unsigned int features); extern void mmc_cleanup_queue(struct mmc_queue *); extern void mmc_queue_suspend(struct mmc_queue *); extern void mmc_queue_resume(struct mmc_queue *); diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c index 575ebbce378e..6b7471dba3bf 100644 --- a/drivers/mmc/core/sdio_uart.c +++ b/drivers/mmc/core/sdio_uart.c @@ -1162,4 +1162,5 @@ module_init(sdio_uart_init); module_exit(sdio_uart_exit); MODULE_AUTHOR("Nicolas Pitre"); +MODULE_DESCRIPTION("SDIO UART/GPS driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index bb0d4fb0892a..eb3ecfe05591 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1016,7 +1016,7 @@ config MMC_SDHCI_MICROCHIP_PIC32 config MMC_SDHCI_BRCMSTB tristate "Broadcom SDIO/SD/MMC support" - depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST + depends on ARCH_BRCMSTB || ARCH_BCM2835 || BMIPS_GENERIC || COMPILE_TEST depends on MMC_SDHCI_PLTFM select MMC_CQHCI default ARCH_BRCMSTB || BMIPS_GENERIC diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 8199d9620075..6490df54a6f5 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -33,6 +33,7 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/consumer.h> +#include <linux/workqueue.h> #include <asm/cacheflush.h> #include <asm/io.h> @@ -272,12 +273,12 @@ struct atmel_mci_dma { * EVENT_DATA_ERROR is pending. * @stop_cmdr: Value to be loaded into CMDR when the stop command is * to be sent. - * @tasklet: Tasklet running the request state machine. + * @bh_work: Work running the request state machine. * @pending_events: Bitmask of events flagged by the interrupt handler - * to be processed by the tasklet. + * to be processed by the work. * @completed_events: Bitmask of events which the state machine has * processed. - * @state: Tasklet state. + * @state: Work state. * @queue: List of slots waiting for access to the controller. * @need_clock_update: Update the clock rate before the next request. * @need_reset: Reset controller before next request. @@ -352,7 +353,7 @@ struct atmel_mci { u32 data_status; u32 stop_cmdr; - struct tasklet_struct tasklet; + struct work_struct bh_work; unsigned long pending_events; unsigned long completed_events; enum atmel_mci_state state; @@ -735,7 +736,7 @@ static void atmci_timeout_timer(struct timer_list *t) host->need_reset = 1; host->state = STATE_END_REQUEST; smp_wmb(); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, @@ -958,7 +959,7 @@ static void atmci_pdc_complete(struct atmel_mci *host) dev_dbg(dev, "(%s) set pending xfer complete\n", __func__); atmci_set_pending(host, EVENT_XFER_COMPLETE); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } static void atmci_dma_cleanup(struct atmel_mci *host) @@ -972,7 +973,7 @@ static void atmci_dma_cleanup(struct atmel_mci *host) } /* - * This function is called by the DMA driver from tasklet context. + * This function is called by the DMA driver from bh context. */ static void atmci_dma_complete(void *arg) { @@ -995,7 +996,7 @@ static void atmci_dma_complete(void *arg) if (data) { dev_dbg(dev, "(%s) set pending xfer complete\n", __func__); atmci_set_pending(host, EVENT_XFER_COMPLETE); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); /* * Regardless of what the documentation says, we have @@ -1008,7 +1009,7 @@ static void atmci_dma_complete(void *arg) * haven't seen all the potential error bits yet. * * The interrupt handler will schedule a different - * tasklet to finish things up when the data transfer + * bh work to finish things up when the data transfer * is completely done. * * We may not complete the mmc request here anyway @@ -1745,9 +1746,9 @@ static void atmci_detect_change(struct timer_list *t) } } -static void atmci_tasklet_func(struct tasklet_struct *t) +static void atmci_work_func(struct work_struct *t) { - struct atmel_mci *host = from_tasklet(host, t, tasklet); + struct atmel_mci *host = from_work(host, t, bh_work); struct mmc_request *mrq = host->mrq; struct mmc_data *data = host->data; struct device *dev = host->dev; @@ -1759,7 +1760,7 @@ static void atmci_tasklet_func(struct tasklet_struct *t) state = host->state; - dev_vdbg(dev, "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", + dev_vdbg(dev, "bh_work: state %u pending/completed/mask %lx/%lx/%x\n", state, host->pending_events, host->completed_events, atmci_readl(host, ATMCI_IMR)); @@ -2118,7 +2119,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) dev_dbg(dev, "set pending data error\n"); smp_wmb(); atmci_set_pending(host, EVENT_DATA_ERROR); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } if (pending & ATMCI_TXBUFE) { @@ -2187,7 +2188,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) smp_wmb(); dev_dbg(dev, "set pending notbusy\n"); atmci_set_pending(host, EVENT_NOTBUSY); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } if (pending & ATMCI_NOTBUSY) { @@ -2196,7 +2197,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) smp_wmb(); dev_dbg(dev, "set pending notbusy\n"); atmci_set_pending(host, EVENT_NOTBUSY); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } if (pending & ATMCI_RXRDY) @@ -2211,7 +2212,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) smp_wmb(); dev_dbg(dev, "set pending cmd rdy\n"); atmci_set_pending(host, EVENT_CMD_RDY); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) @@ -2487,7 +2488,7 @@ static int atmci_probe(struct platform_device *pdev) host->mapbase = regs->start; - tasklet_setup(&host->tasklet, atmci_tasklet_func); + INIT_WORK(&host->bh_work, atmci_work_func); ret = request_irq(irq, atmci_interrupt, 0, dev_name(dev), host); if (ret) { diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index b5a5c6a2fe8b..6e80bcb668ec 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -42,6 +42,7 @@ #include <linux/leds.h> #include <linux/mmc/host.h> #include <linux/slab.h> +#include <linux/workqueue.h> #include <asm/io.h> #include <asm/mach-au1x00/au1000.h> @@ -113,8 +114,8 @@ struct au1xmmc_host { int irq; - struct tasklet_struct finish_task; - struct tasklet_struct data_task; + struct work_struct finish_bh_work; + struct work_struct data_bh_work; struct au1xmmc_platform_data *platdata; struct platform_device *pdev; struct resource *ioarea; @@ -253,9 +254,9 @@ static void au1xmmc_finish_request(struct au1xmmc_host *host) mmc_request_done(host->mmc, mrq); } -static void au1xmmc_tasklet_finish(struct tasklet_struct *t) +static void au1xmmc_finish_bh_work(struct work_struct *t) { - struct au1xmmc_host *host = from_tasklet(host, t, finish_task); + struct au1xmmc_host *host = from_work(host, t, finish_bh_work); au1xmmc_finish_request(host); } @@ -363,9 +364,9 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) au1xmmc_finish_request(host); } -static void au1xmmc_tasklet_data(struct tasklet_struct *t) +static void au1xmmc_data_bh_work(struct work_struct *t) { - struct au1xmmc_host *host = from_tasklet(host, t, data_task); + struct au1xmmc_host *host = from_work(host, t, data_bh_work); u32 status = __raw_readl(HOST_STATUS(host)); au1xmmc_data_complete(host, status); @@ -425,7 +426,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host) if (host->flags & HOST_F_STOP) SEND_STOP(host); - tasklet_schedule(&host->data_task); + queue_work(system_bh_wq, &host->data_bh_work); } } @@ -505,7 +506,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) if (host->flags & HOST_F_STOP) SEND_STOP(host); - tasklet_schedule(&host->data_task); + queue_work(system_bh_wq, &host->data_bh_work); } } @@ -561,7 +562,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) if (!trans || cmd->error) { IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); - tasklet_schedule(&host->finish_task); + queue_work(system_bh_wq, &host->finish_bh_work); return; } @@ -797,7 +798,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id) IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */ - tasklet_schedule(&host->finish_task); + queue_work(system_bh_wq, &host->finish_bh_work); } #if 0 else if (status & SD_STATUS_DD) { @@ -806,7 +807,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id) au1xmmc_receive_pio(host); else { au1xmmc_data_complete(host, status); - /* tasklet_schedule(&host->data_task); */ + /* queue_work(system_bh_wq, &host->data_bh_work); */ } } #endif @@ -854,7 +855,7 @@ static void au1xmmc_dbdma_callback(int irq, void *dev_id) if (host->flags & HOST_F_STOP) SEND_STOP(host); - tasklet_schedule(&host->data_task); + queue_work(system_bh_wq, &host->data_bh_work); } static int au1xmmc_dbdma_init(struct au1xmmc_host *host) @@ -1039,9 +1040,9 @@ static int au1xmmc_probe(struct platform_device *pdev) if (host->platdata) mmc->caps &= ~(host->platdata->mask_host_caps); - tasklet_setup(&host->data_task, au1xmmc_tasklet_data); + INIT_WORK(&host->data_bh_work, au1xmmc_data_bh_work); - tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish); + INIT_WORK(&host->finish_bh_work, au1xmmc_finish_bh_work); if (has_dbdma()) { ret = au1xmmc_dbdma_init(host); @@ -1091,8 +1092,8 @@ out5: if (host->flags & HOST_F_DBDMA) au1xmmc_dbdma_shutdown(host); - tasklet_kill(&host->data_task); - tasklet_kill(&host->finish_task); + cancel_work_sync(&host->data_bh_work); + cancel_work_sync(&host->finish_bh_work); if (host->platdata && host->platdata->cd_setup && !(mmc->caps & MMC_CAP_NEEDS_POLL)) @@ -1135,8 +1136,8 @@ static void au1xmmc_remove(struct platform_device *pdev) __raw_writel(0, HOST_CONFIG2(host)); wmb(); /* drain writebuffer */ - tasklet_kill(&host->data_task); - tasklet_kill(&host->finish_task); + cancel_work_sync(&host->data_bh_work); + cancel_work_sync(&host->finish_bh_work); if (host->flags & HOST_F_DBDMA) au1xmmc_dbdma_shutdown(host); diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 0aec33b88bef..902f7f20abaa 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -493,7 +493,7 @@ static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop) cb710_mmc_command(mmc, mrq->stop); - tasklet_schedule(&reader->finish_req_tasklet); + queue_work(system_bh_wq, &reader->finish_req_bh_work); } static int cb710_mmc_powerup(struct cb710_slot *slot) @@ -646,10 +646,10 @@ static int cb710_mmc_irq_handler(struct cb710_slot *slot) return 1; } -static void cb710_mmc_finish_request_tasklet(struct tasklet_struct *t) +static void cb710_mmc_finish_request_bh_work(struct work_struct *t) { - struct cb710_mmc_reader *reader = from_tasklet(reader, t, - finish_req_tasklet); + struct cb710_mmc_reader *reader = from_work(reader, t, + finish_req_bh_work); struct mmc_request *mrq = reader->mrq; reader->mrq = NULL; @@ -718,8 +718,8 @@ static int cb710_mmc_init(struct platform_device *pdev) reader = mmc_priv(mmc); - tasklet_setup(&reader->finish_req_tasklet, - cb710_mmc_finish_request_tasklet); + INIT_WORK(&reader->finish_req_bh_work, + cb710_mmc_finish_request_bh_work); spin_lock_init(&reader->irq_lock); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); @@ -763,7 +763,7 @@ static void cb710_mmc_exit(struct platform_device *pdev) cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0); cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0); - tasklet_kill(&reader->finish_req_tasklet); + cancel_work_sync(&reader->finish_req_bh_work); mmc_free_host(mmc); } diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h index 5e053077dbed..59abaccaad10 100644 --- a/drivers/mmc/host/cb710-mmc.h +++ b/drivers/mmc/host/cb710-mmc.h @@ -8,10 +8,11 @@ #define LINUX_CB710_MMC_H #include <linux/cb710.h> +#include <linux/workqueue.h> /* per-MMC-reader structure */ struct cb710_mmc_reader { - struct tasklet_struct finish_req_tasklet; + struct work_struct finish_req_bh_work; struct mmc_request *mrq; spinlock_t irq_lock; unsigned char last_power_mode; diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index d7427894e0bc..9cbde800685d 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -224,6 +224,9 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host, } p = sgm->addr; + if (n > sgm->length) + n = sgm->length; + /* NOTE: we never transfer more than rw_threshold bytes * to/from the fifo here; there's no I/O overlap. * This also assumes that access width( i.e. ACCWD) is 4 bytes @@ -1184,7 +1187,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev) struct mmc_davinci_host *host = NULL; struct mmc_host *mmc = NULL; struct resource *r, *mem = NULL; - int ret, irq; + int ret, irq, bus_width; size_t mem_size; const struct platform_device_id *id_entry; @@ -1314,9 +1317,14 @@ static int davinci_mmcsd_probe(struct platform_device *pdev) rename_region(mem, mmc_hostname(mmc)); + if (mmc->caps & MMC_CAP_8_BIT_DATA) + bus_width = 8; + else if (mmc->caps & MMC_CAP_4_BIT_DATA) + bus_width = 4; + else + bus_width = 1; dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", - host->use_dma ? "DMA" : "PIO", - (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); + host->use_dma ? "DMA" : "PIO", bus_width); return 0; diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c index 4747e5698f48..24e0b604b405 100644 --- a/drivers/mmc/host/dw_mmc-bluefield.c +++ b/drivers/mmc/host/dw_mmc-bluefield.c @@ -3,6 +3,7 @@ * Copyright (C) 2018 Mellanox Technologies. */ +#include <linux/arm-smccc.h> #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/mmc/host.h> @@ -20,6 +21,9 @@ #define BLUEFIELD_UHS_REG_EXT_SAMPLE 2 #define BLUEFIELD_UHS_REG_EXT_DRIVE 4 +/* SMC call for RST_N */ +#define BLUEFIELD_SMC_SET_EMMC_RST_N 0x82000007 + static void dw_mci_bluefield_set_ios(struct dw_mci *host, struct mmc_ios *ios) { u32 reg; @@ -34,8 +38,20 @@ static void dw_mci_bluefield_set_ios(struct dw_mci *host, struct mmc_ios *ios) mci_writel(host, UHS_REG_EXT, reg); } +static void dw_mci_bluefield_hw_reset(struct dw_mci *host) +{ + struct arm_smccc_res res = { 0 }; + + arm_smccc_smc(BLUEFIELD_SMC_SET_EMMC_RST_N, 0, 0, 0, 0, 0, 0, 0, + &res); + + if (res.a0) + pr_err("RST_N failed.\n"); +} + static const struct dw_mci_drv_data bluefield_drv_data = { - .set_ios = dw_mci_bluefield_set_ios + .set_ios = dw_mci_bluefield_set_ios, + .hw_reset = dw_mci_bluefield_hw_reset }; static const struct of_device_id dw_mci_bluefield_match[] = { diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 8e2d676b9239..2333ef4893ee 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -493,7 +493,7 @@ static void dw_mci_dmac_complete_dma(void *arg) */ if (data) { set_bit(EVENT_XFER_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } } @@ -1617,6 +1617,7 @@ static void dw_mci_hw_reset(struct mmc_host *mmc) { struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; int reset; if (host->use_dma == TRANS_MODE_IDMAC) @@ -1626,6 +1627,11 @@ static void dw_mci_hw_reset(struct mmc_host *mmc) SDMMC_CTRL_FIFO_RESET)) return; + if (drv_data && drv_data->hw_reset) { + drv_data->hw_reset(host); + return; + } + /* * According to eMMC spec, card reset procedure: * tRstW >= 1us: RST_n pulse width @@ -1834,7 +1840,7 @@ static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t) if (!host->data_status) { host->data_status = SDMMC_INT_DCRC; set_bit(EVENT_DATA_ERROR, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } spin_unlock_irqrestore(&host->irq_lock, flags); @@ -2056,9 +2062,9 @@ static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) return true; } -static void dw_mci_tasklet_func(struct tasklet_struct *t) +static void dw_mci_work_func(struct work_struct *t) { - struct dw_mci *host = from_tasklet(host, t, tasklet); + struct dw_mci *host = from_work(host, t, bh_work); struct mmc_data *data; struct mmc_command *cmd; struct mmc_request *mrq; @@ -2113,7 +2119,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) * will waste a bit of time (we already know * the command was bad), it can't cause any * errors since it's possible it would have - * taken place anyway if this tasklet got + * taken place anyway if this bh work got * delayed. Allowing the transfer to take place * avoids races and keeps things simple. */ @@ -2706,7 +2712,7 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) smp_wmb(); /* drain writebuffer */ set_bit(EVENT_CMD_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); dw_mci_start_fault_timer(host); } @@ -2774,7 +2780,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) set_bit(EVENT_DATA_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); spin_unlock(&host->irq_lock); } @@ -2793,7 +2799,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) dw_mci_read_data_pio(host, true); } set_bit(EVENT_DATA_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); spin_unlock(&host->irq_lock); } @@ -3098,7 +3104,7 @@ static void dw_mci_cmd11_timer(struct timer_list *t) host->cmd_status = SDMMC_INT_RTO; set_bit(EVENT_CMD_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); } static void dw_mci_cto_timer(struct timer_list *t) @@ -3144,7 +3150,7 @@ static void dw_mci_cto_timer(struct timer_list *t) */ host->cmd_status = SDMMC_INT_RTO; set_bit(EVENT_CMD_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); break; default: dev_warn(host->dev, "Unexpected command timeout, state %d\n", @@ -3195,7 +3201,7 @@ static void dw_mci_dto_timer(struct timer_list *t) host->data_status = SDMMC_INT_DRTO; set_bit(EVENT_DATA_ERROR, &host->pending_events); set_bit(EVENT_DATA_COMPLETE, &host->pending_events); - tasklet_schedule(&host->tasklet); + queue_work(system_bh_wq, &host->bh_work); break; default: dev_warn(host->dev, "Unexpected data timeout, state %d\n", @@ -3435,7 +3441,7 @@ int dw_mci_probe(struct dw_mci *host) else host->fifo_reg = host->regs + DATA_240A_OFFSET; - tasklet_setup(&host->tasklet, dw_mci_tasklet_func); + INIT_WORK(&host->bh_work, dw_mci_work_func); ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); if (ret) diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 4ed81f94f7ca..6447b916990d 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -17,6 +17,7 @@ #include <linux/fault-inject.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> +#include <linux/workqueue.h> enum dw_mci_state { STATE_IDLE = 0, @@ -89,12 +90,12 @@ struct dw_mci_dma_slave { * @stop_cmdr: Value to be loaded into CMDR when the stop command is * to be sent. * @dir_status: Direction of current transfer. - * @tasklet: Tasklet running the request state machine. + * @bh_work: Work running the request state machine. * @pending_events: Bitmask of events flagged by the interrupt handler - * to be processed by the tasklet. + * to be processed by bh work. * @completed_events: Bitmask of events which the state machine has * processed. - * @state: Tasklet state. + * @state: BH work state. * @queue: List of slots waiting for access to the controller. * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus * rate and timeout calculations. @@ -194,7 +195,7 @@ struct dw_mci { u32 data_status; u32 stop_cmdr; u32 dir_status; - struct tasklet_struct tasklet; + struct work_struct bh_work; unsigned long pending_events; unsigned long completed_events; enum dw_mci_state state; @@ -565,6 +566,7 @@ struct dw_mci_slot { * @execute_tuning: implementation specific tuning procedure. * @set_data_timeout: implementation specific timeout. * @get_drto_clks: implementation specific cycle count for data read timeout. + * @hw_reset: implementation specific HW reset. * * Provide controller implementation specific extensions. The usage of this * data structure is fully optional and usage of each member in this structure @@ -585,5 +587,6 @@ struct dw_mci_drv_data { void (*set_data_timeout)(struct dw_mci *host, unsigned int timeout_ns); u32 (*get_drto_clks)(struct dw_mci *host); + void (*hw_reset)(struct dw_mci *host); }; #endif /* _DW_MMC_H_ */ diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 09d7a6a0dc1a..c9caa1ece7ef 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1208,7 +1208,10 @@ static int mmc_spi_probe(struct spi_device *spi) * that's the only reason not to use a few MHz for f_min (until * the upper layer reads the target frequency from the CSD). */ - mmc->f_min = 400000; + if (spi->controller->min_speed_hz > 400000) + dev_warn(&spi->dev,"Controller unable to reduce bus clock to 400 KHz\n"); + + mmc->f_min = max(spi->controller->min_speed_hz, 400000); mmc->f_max = spi->max_speed_hz; host = mmc_priv(mmc); diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index bf54776fb26c..05939f30a5ae 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -19,6 +19,7 @@ #include <linux/mmc/core.h> #include <linux/mmc/host.h> +MODULE_DESCRIPTION("OpenFirmware bindings for the MMC-over-SPI driver"); MODULE_LICENSE("GPL"); struct of_mmc_spi { diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index a8ee0df47148..335350a4e99a 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/gpio/consumer.h> #include <linux/platform_data/mmc-omap.h> +#include <linux/workqueue.h> #define OMAP_MMC_REG_CMD 0x00 @@ -105,7 +106,7 @@ struct mmc_omap_slot { u16 power_mode; unsigned int fclk_freq; - struct tasklet_struct cover_tasklet; + struct work_struct cover_bh_work; struct timer_list cover_timer; unsigned cover_open; @@ -873,18 +874,18 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed) sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch"); } - tasklet_hi_schedule(&slot->cover_tasklet); + queue_work(system_bh_highpri_wq, &slot->cover_bh_work); } static void mmc_omap_cover_timer(struct timer_list *t) { struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer); - tasklet_schedule(&slot->cover_tasklet); + queue_work(system_bh_wq, &slot->cover_bh_work); } -static void mmc_omap_cover_handler(struct tasklet_struct *t) +static void mmc_omap_cover_bh_handler(struct work_struct *t) { - struct mmc_omap_slot *slot = from_tasklet(slot, t, cover_tasklet); + struct mmc_omap_slot *slot = from_work(slot, t, cover_bh_work); int cover_open = mmc_omap_cover_is_open(slot); mmc_detect_change(slot->mmc, 0); @@ -1314,7 +1315,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) if (slot->pdata->get_cover_state != NULL) { timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0); - tasklet_setup(&slot->cover_tasklet, mmc_omap_cover_handler); + INIT_WORK(&slot->cover_bh_work, mmc_omap_cover_bh_handler); } r = mmc_add_host(mmc); @@ -1333,7 +1334,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) &dev_attr_cover_switch); if (r < 0) goto err_remove_slot_name; - tasklet_schedule(&slot->cover_tasklet); + queue_work(system_bh_wq, &slot->cover_bh_work); } return 0; @@ -1356,7 +1357,7 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot) if (slot->pdata->get_cover_state != NULL) device_remove_file(&mmc->class_dev, &dev_attr_cover_switch); - tasklet_kill(&slot->cover_tasklet); + cancel_work_sync(&slot->cover_bh_work); del_timer_sync(&slot->cover_timer); flush_workqueue(slot->host->mmc_omap_wq); diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index 586f94d4dbfd..f12a87442338 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -11,6 +11,7 @@ #include <linux/dmaengine.h> #include <linux/platform_device.h> +#include <linux/workqueue.h> #include "tmio_mmc.h" struct renesas_sdhi_scc { @@ -67,7 +68,7 @@ struct renesas_sdhi_dma { dma_filter_fn filter; void (*enable)(struct tmio_mmc_host *host, bool enable); struct completion dma_dataend; - struct tasklet_struct dma_complete; + struct work_struct dma_complete; }; struct renesas_sdhi { @@ -93,6 +94,7 @@ struct renesas_sdhi { unsigned int tap_set; struct reset_control *rstc; + struct tmio_mmc_host *host; }; #define host_to_priv(host) \ diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 12f4faaaf4ee..04874791541f 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -970,6 +970,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (IS_ERR(host)) return PTR_ERR(host); + priv->host = host; + if (of_data) { mmc_data->flags |= of_data->tmio_flags; mmc_data->ocr_mask = of_data->tmio_ocr_mask; @@ -1162,4 +1164,5 @@ void renesas_sdhi_remove(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(renesas_sdhi_remove); +MODULE_DESCRIPTION("Renesas SDHI core driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 422fa63a2e99..d4b66daeda66 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -337,7 +337,7 @@ static bool renesas_sdhi_internal_dmac_dma_irq(struct tmio_mmc_host *host) writel(status ^ dma_irqs, host->ctl + DM_CM_INFO1); set_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags); if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags)) - tasklet_schedule(&dma_priv->dma_complete); + queue_work(system_bh_wq, &dma_priv->dma_complete); } return status & dma_irqs; @@ -352,7 +352,7 @@ renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) set_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags); if (test_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags) || host->data->error) - tasklet_schedule(&dma_priv->dma_complete); + queue_work(system_bh_wq, &dma_priv->dma_complete); } /* @@ -440,9 +440,9 @@ force_pio: renesas_sdhi_internal_dmac_enable_dma(host, false); } -static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) +static void renesas_sdhi_internal_dmac_issue_work_fn(struct work_struct *work) { - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + struct tmio_mmc_host *host = from_work(host, work, dma_issue); struct renesas_sdhi *priv = host_to_priv(host); tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); @@ -454,7 +454,7 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) /* on CMD errors, simulate DMA end immediately */ set_bit(SDHI_DMA_END_FLAG_DMA, &priv->dma_priv.end_flags); if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &priv->dma_priv.end_flags)) - tasklet_schedule(&priv->dma_priv.dma_complete); + queue_work(system_bh_wq, &priv->dma_priv.dma_complete); } } @@ -484,9 +484,11 @@ static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host) return true; } -static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +static void renesas_sdhi_internal_dmac_complete_work_fn(struct work_struct *work) { - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + struct renesas_sdhi_dma *dma_priv = from_work(dma_priv, work, dma_complete); + struct renesas_sdhi *priv = container_of(dma_priv, typeof(*priv), dma_priv); + struct tmio_mmc_host *host = priv->host; spin_lock_irq(&host->lock); if (!renesas_sdhi_internal_dmac_complete(host)) @@ -544,12 +546,10 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, /* Each value is set to non-zero to assume "enabling" each DMA */ host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; - tasklet_init(&priv->dma_priv.dma_complete, - renesas_sdhi_internal_dmac_complete_tasklet_fn, - (unsigned long)host); - tasklet_init(&host->dma_issue, - renesas_sdhi_internal_dmac_issue_tasklet_fn, - (unsigned long)host); + INIT_WORK(&priv->dma_priv.dma_complete, + renesas_sdhi_internal_dmac_complete_work_fn); + INIT_WORK(&host->dma_issue, + renesas_sdhi_internal_dmac_issue_work_fn); /* Add pre_req and post_req */ host->ops.pre_req = renesas_sdhi_internal_dmac_pre_req; diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 9cf7f9feab72..5a6f41318645 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -312,9 +312,9 @@ static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host, } } -static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv) +static void renesas_sdhi_sys_dmac_issue_work_fn(struct work_struct *work) { - struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; + struct tmio_mmc_host *host = from_work(host, work, dma_issue); struct dma_chan *chan = NULL; spin_lock_irq(&host->lock); @@ -401,9 +401,8 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, goto ebouncebuf; init_completion(&priv->dma_priv.dma_dataend); - tasklet_init(&host->dma_issue, - renesas_sdhi_sys_dmac_issue_tasklet_fn, - (unsigned long)host); + INIT_WORK(&host->dma_issue, + renesas_sdhi_sys_dmac_issue_work_fn); } renesas_sdhi_sys_dmac_enable_dma(host, true); diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c index cb9152c6a65d..e067c7f5c537 100644 --- a/drivers/mmc/host/sdhci-bcm-kona.c +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -107,7 +107,7 @@ static void sdhci_bcm_kona_sd_init(struct sdhci_host *host) * Software emulation of the SD card insertion/removal. Set insert=1 for insert * and insert=0 for removal. The card detection is done by GPIO. For Broadcom * IP to function properly the bit 0 of CORESTAT register needs to be set/reset - * to generate the CD IRQ handled in sdhci.c which schedules card_tasklet. +* to generate the CD IRQ handled in sdhci.c */ static int sdhci_bcm_kona_sd_card_emulate(struct sdhci_host *host, int insert) { diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 150fb477b7cc..031a4b514d16 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -31,6 +31,21 @@ #define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 +#define SDIO_CFG_CQ_CAPABILITY 0x4c +#define SDIO_CFG_CQ_CAPABILITY_FMUL GENMASK(13, 12) + +#define SDIO_CFG_CTRL 0x0 +#define SDIO_CFG_CTRL_SDCD_N_TEST_EN BIT(31) +#define SDIO_CFG_CTRL_SDCD_N_TEST_LEV BIT(30) + +#define SDIO_CFG_MAX_50MHZ_MODE 0x1ac +#define SDIO_CFG_MAX_50MHZ_MODE_STRAP_OVERRIDE BIT(31) +#define SDIO_CFG_MAX_50MHZ_MODE_ENABLE BIT(0) + +#define MMC_CAP_HSE_MASK (MMC_CAP2_HSX00_1_2V | MMC_CAP2_HSX00_1_8V) +/* Select all SD UHS type I SDR speed above 50MB/s */ +#define MMC_CAP_UHS_I_SDR_MASK (MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104) + struct sdhci_brcmstb_priv { void __iomem *cfg_regs; unsigned int flags; @@ -39,6 +54,7 @@ struct sdhci_brcmstb_priv { }; struct brcmstb_match_priv { + void (*cfginit)(struct sdhci_host *host); void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios); struct sdhci_ops *ops; const unsigned int flags; @@ -169,6 +185,33 @@ static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host, sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); } +static void sdhci_brcmstb_cfginit_2712(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_brcmstb_priv *brcmstb_priv = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + /* + * If we support a speed that requires tuning, + * then select the delay line PHY as the clock source. + */ + if ((host->mmc->caps & MMC_CAP_UHS_I_SDR_MASK) || (host->mmc->caps2 & MMC_CAP_HSE_MASK)) { + reg = readl(brcmstb_priv->cfg_regs + SDIO_CFG_MAX_50MHZ_MODE); + reg &= ~SDIO_CFG_MAX_50MHZ_MODE_ENABLE; + reg |= SDIO_CFG_MAX_50MHZ_MODE_STRAP_OVERRIDE; + writel(reg, brcmstb_priv->cfg_regs + SDIO_CFG_MAX_50MHZ_MODE); + } + + if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) || + (host->mmc->caps & MMC_CAP_NEEDS_POLL)) { + /* Force presence */ + reg = readl(brcmstb_priv->cfg_regs + SDIO_CFG_CTRL); + reg &= ~SDIO_CFG_CTRL_SDCD_N_TEST_LEV; + reg |= SDIO_CFG_CTRL_SDCD_N_TEST_EN; + writel(reg, brcmstb_priv->cfg_regs + SDIO_CFG_CTRL); + } +} + static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc) { sdhci_dumpregs(mmc_priv(mmc)); @@ -201,6 +244,14 @@ static struct sdhci_ops sdhci_brcmstb_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; +static struct sdhci_ops sdhci_brcmstb_ops_2712 = { + .set_clock = sdhci_set_clock, + .set_power = sdhci_set_power_and_bus_voltage, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + static struct sdhci_ops sdhci_brcmstb_ops_7216 = { .set_clock = sdhci_brcmstb_set_clock, .set_bus_width = sdhci_set_bus_width, @@ -215,6 +266,11 @@ static struct sdhci_ops sdhci_brcmstb_ops_74165b0 = { .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, }; +static const struct brcmstb_match_priv match_priv_2712 = { + .cfginit = sdhci_brcmstb_cfginit_2712, + .ops = &sdhci_brcmstb_ops_2712, +}; + static struct brcmstb_match_priv match_priv_7425 = { .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT | BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, @@ -239,6 +295,7 @@ static struct brcmstb_match_priv match_priv_74165b0 = { }; static const struct of_device_id __maybe_unused sdhci_brcm_of_match[] = { + { .compatible = "brcm,bcm2712-sdhci", .data = &match_priv_2712 }, { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, @@ -371,6 +428,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) (host->mmc->caps2 & MMC_CAP2_HS400_ES)) host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es; + if (match_priv->cfginit) + match_priv->cfginit(host); + /* * Supply the existing CAPS, but clear the UHS modes. This * will allow these modes to be specified by device tree diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 40a6e2f8145a..8f0bc6dca2b0 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -201,6 +201,9 @@ /* ERR004536 is not applicable for the IP */ #define ESDHC_FLAG_SKIP_ERR004536 BIT(17) +/* The IP does not have GPIO CD wake capabilities */ +#define ESDHC_FLAG_SKIP_CD_WAKE BIT(18) + enum wp_types { ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ @@ -298,7 +301,7 @@ static struct esdhc_soc_data usdhc_s32g2_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES - | ESDHC_FLAG_SKIP_ERR004536, + | ESDHC_FLAG_SKIP_ERR004536 | ESDHC_FLAG_SKIP_CD_WAKE, }; static struct esdhc_soc_data usdhc_imx7ulp_data = { @@ -1706,7 +1709,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) } pltfm_host->clk = imx_data->clk_per; - pltfm_host->clock = clk_get_rate(pltfm_host->clk); err = clk_prepare_enable(imx_data->clk_per); if (err) goto free_sdhci; @@ -1717,6 +1719,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (err) goto disable_ipg_clk; + pltfm_host->clock = clk_get_rate(pltfm_host->clk); + if (!pltfm_host->clock) { + dev_err(mmc_dev(host->mmc), "could not get clk rate\n"); + err = -EINVAL; + goto disable_ahb_clk; + } + imx_data->pinctrl = devm_pinctrl_get(&pdev->dev); if (IS_ERR(imx_data->pinctrl)) dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n"); @@ -1726,7 +1735,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; /* GPIO CD can be set as a wakeup source */ - host->mmc->caps |= MMC_CAP_CD_WAKE; + if (!(imx_data->socdata->flags & ESDHC_FLAG_SKIP_CD_WAKE)) + host->mmc->caps |= MMC_CAP_CD_WAKE; if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200)) host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 39edf04fedcf..e79aa4b3b6c3 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -908,6 +908,7 @@ static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = { .get_max_clock = rk35xx_get_max_clock, .reset = rk35xx_sdhci_reset, .adma_write_desc = dwcmshc_adma_write_desc, + .irq = dwcmshc_cqe_irq_handler, }; static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = { diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 23e6ba70144c..ed45ed0bdafd 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1319,6 +1319,23 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = { .probe_slot = intel_mrfld_mmc_probe_slot, }; +#define JMB388_SAMPLE_COUNT 5 + +static int jmicron_jmb388_get_ro(struct mmc_host *mmc) +{ + int i, ro_count; + + ro_count = 0; + for (i = 0; i < JMB388_SAMPLE_COUNT; i++) { + if (sdhci_get_ro(mmc) > 0) { + if (++ro_count > JMB388_SAMPLE_COUNT / 2) + return 1; + } + msleep(30); + } + return 0; +} + static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) { u8 scratch; @@ -1403,11 +1420,6 @@ static int jmicron_probe(struct sdhci_pci_chip *chip) return ret; } - /* quirk for unsable RO-detection on JM388 chips */ - if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || - chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) - chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; - return 0; } @@ -1462,6 +1474,11 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot) slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; + /* Handle unstable RO-detection on JM388 chips */ + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || + slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) + slot->host->mmc_host_ops.get_ro = jmicron_jmb388_get_ro; + return 0; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 112584aa0772..4b91c9e96635 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2513,8 +2513,9 @@ out: } EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); -static int sdhci_check_ro(struct sdhci_host *host) +int sdhci_get_ro(struct mmc_host *mmc) { + struct sdhci_host *host = mmc_priv(mmc); bool allow_invert = false; int is_readonly; @@ -2522,10 +2523,10 @@ static int sdhci_check_ro(struct sdhci_host *host) is_readonly = 0; } else if (host->ops->get_ro) { is_readonly = host->ops->get_ro(host); - } else if (mmc_can_gpio_ro(host->mmc)) { - is_readonly = mmc_gpio_get_ro(host->mmc); + } else if (mmc_can_gpio_ro(mmc)) { + is_readonly = mmc_gpio_get_ro(mmc); /* Do not invert twice */ - allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); + allow_invert = !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); } else { is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_WRITE_PROTECT); @@ -2539,27 +2540,7 @@ static int sdhci_check_ro(struct sdhci_host *host) return is_readonly; } - -#define SAMPLE_COUNT 5 - -static int sdhci_get_ro(struct mmc_host *mmc) -{ - struct sdhci_host *host = mmc_priv(mmc); - int i, ro_count; - - if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) - return sdhci_check_ro(host); - - ro_count = 0; - for (i = 0; i < SAMPLE_COUNT; i++) { - if (sdhci_check_ro(host)) { - if (++ro_count > SAMPLE_COUNT / 2) - return 1; - } - msleep(30); - } - return 0; -} +EXPORT_SYMBOL_GPL(sdhci_get_ro); static void sdhci_hw_reset(struct mmc_host *mmc) { @@ -4727,6 +4708,21 @@ int sdhci_setup_host(struct sdhci_host *host) if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { host->max_adma = 65532; /* 32-bit alignment */ mmc->max_seg_size = 65535; + /* + * sdhci_adma_table_pre() expects to define 1 DMA + * descriptor per segment, so the maximum segment size + * is set accordingly. SDHCI allows up to 64KiB per DMA + * descriptor (16-bit field), but some controllers do + * not support "zero means 65536" reducing the maximum + * for them to 65535. That is a problem if PAGE_SIZE is + * 64KiB because the block layer does not support + * max_seg_size < PAGE_SIZE, however + * sdhci_adma_table_pre() has a workaround to handle + * that case, and split the descriptor. Refer also + * comment in sdhci_adma_table_pre(). + */ + if (mmc->max_seg_size < PAGE_SIZE) + mmc->max_seg_size = PAGE_SIZE; } else { mmc->max_seg_size = 65536; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 957c7a917ffb..f531b617f28d 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -437,8 +437,6 @@ struct sdhci_host { #define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) /* Controller treats ADMA descriptors with length 0000h incorrectly */ #define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30) -/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */ -#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31) unsigned int quirks2; /* More deviations from spec. */ @@ -788,6 +786,7 @@ void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, unsigned short vdd); int sdhci_get_cd_nogpio(struct mmc_host *mmc); +int sdhci_get_ro(struct mmc_host *mmc); void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq); int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq); void sdhci_set_bus_width(struct sdhci_host *host, int width); diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 17ad32cfc0c3..64e10f7c9faa 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -90,7 +90,7 @@ /* Command Queue Host Controller Interface Base address */ #define SDHCI_AM654_CQE_BASE_ADDR 0x200 -static struct regmap_config sdhci_am654_regmap_config = { +static const struct regmap_config sdhci_am654_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index b5a2f2f25ad9..aea14bf3e2e8 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -13,6 +13,7 @@ #include <linux/highmem.h> #include <linux/scatterlist.h> #include <linux/module.h> +#include <linux/workqueue.h> #include <asm/io.h> #define DRIVER_NAME "tifm_sd" @@ -97,7 +98,7 @@ struct tifm_sd { unsigned int clk_div; unsigned long timeout_jiffies; - struct tasklet_struct finish_tasklet; + struct work_struct finish_bh_work; struct timer_list timer; struct mmc_request *req; @@ -463,7 +464,7 @@ static void tifm_sd_check_status(struct tifm_sd *host) } } finish_request: - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); } /* Called from interrupt handler */ @@ -723,9 +724,9 @@ err_out: mmc_request_done(mmc, mrq); } -static void tifm_sd_end_cmd(struct tasklet_struct *t) +static void tifm_sd_end_cmd(struct work_struct *t) { - struct tifm_sd *host = from_tasklet(host, t, finish_tasklet); + struct tifm_sd *host = from_work(host, t, finish_bh_work); struct tifm_dev *sock = host->dev; struct mmc_host *mmc = tifm_get_drvdata(sock); struct mmc_request *mrq; @@ -960,7 +961,7 @@ static int tifm_sd_probe(struct tifm_dev *sock) */ mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS; - tasklet_setup(&host->finish_tasklet, tifm_sd_end_cmd); + INIT_WORK(&host->finish_bh_work, tifm_sd_end_cmd); timer_setup(&host->timer, tifm_sd_abort, 0); mmc->ops = &tifm_sd_ops; @@ -999,7 +1000,7 @@ static void tifm_sd_remove(struct tifm_dev *sock) writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); spin_unlock_irqrestore(&sock->lock, flags); - tasklet_kill(&host->finish_tasklet); + cancel_work_sync(&host->finish_bh_work); spin_lock_irqsave(&sock->lock, flags); if (host->req) { @@ -1009,7 +1010,7 @@ static void tifm_sd_remove(struct tifm_dev *sock) host->req->cmd->error = -ENOMEDIUM; if (host->req->stop) host->req->stop->error = -ENOMEDIUM; - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); } spin_unlock_irqrestore(&sock->lock, flags); mmc_remove_host(mmc); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index de56e6534aea..a75755f31d31 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -21,6 +21,7 @@ #include <linux/scatterlist.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/workqueue.h> #define CTL_SD_CMD 0x00 #define CTL_ARG_REG 0x04 @@ -139,9 +140,6 @@ struct tmio_mmc_host { struct mmc_host *mmc; struct mmc_host_ops ops; - /* Callbacks for clock / power control */ - void (*set_pwr)(struct platform_device *host, int state); - /* pio related stuff */ struct scatterlist *sg_ptr; struct scatterlist *sg_orig; @@ -156,7 +154,7 @@ struct tmio_mmc_host { bool dma_on; struct dma_chan *chan_rx; struct dma_chan *chan_tx; - struct tasklet_struct dma_issue; + struct work_struct dma_issue; struct scatterlist bounce_sg; u8 *bounce_buf; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 93e912afd3ae..b61a6310311d 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -608,7 +608,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat) } else { tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP); - tasklet_schedule(&host->dma_issue); + queue_work(system_bh_wq, &host->dma_issue); } } else { if (!host->dma_on) { @@ -616,7 +616,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat) } else { tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_WRITEOP); - tasklet_schedule(&host->dma_issue); + queue_work(system_bh_wq, &host->dma_issue); } } } else { @@ -880,9 +880,6 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) /* .set_ios() is returning void, so, no chance to report an error */ - if (host->set_pwr) - host->set_pwr(host->pdev, 1); - if (!IS_ERR(mmc->supply.vmmc)) { ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); /* @@ -916,9 +913,6 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host) if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); - - if (host->set_pwr) - host->set_pwr(host->pdev, 0); } static unsigned int tmio_mmc_get_timeout_cycles(struct tmio_mmc_host *host) @@ -1160,8 +1154,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) if (pdata->flags & TMIO_MMC_USE_BUSY_TIMEOUT && !_host->get_timeout_cycles) _host->get_timeout_cycles = tmio_mmc_get_timeout_cycles; - _host->set_pwr = pdata->set_pwr; - ret = tmio_mmc_init_ocr(_host); if (ret < 0) return ret; @@ -1319,4 +1311,5 @@ int tmio_mmc_host_runtime_resume(struct device *dev) EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume); #endif +MODULE_DESCRIPTION("TMIO MMC core driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c index 1404989e6151..417fd13efdfd 100644 --- a/drivers/mmc/host/uniphier-sd.c +++ b/drivers/mmc/host/uniphier-sd.c @@ -90,9 +90,9 @@ static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable) } /* external DMA engine */ -static void uniphier_sd_external_dma_issue(struct tasklet_struct *t) +static void uniphier_sd_external_dma_issue(struct work_struct *t) { - struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue); + struct tmio_mmc_host *host = from_work(host, t, dma_issue); struct uniphier_sd_priv *priv = uniphier_sd_priv(host); uniphier_sd_dma_endisable(host, 1); @@ -199,7 +199,7 @@ static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host, host->chan_rx = chan; host->chan_tx = chan; - tasklet_setup(&host->dma_issue, uniphier_sd_external_dma_issue); + INIT_WORK(&host->dma_issue, uniphier_sd_external_dma_issue); } static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host) @@ -236,9 +236,9 @@ static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = { .dataend = uniphier_sd_external_dma_dataend, }; -static void uniphier_sd_internal_dma_issue(struct tasklet_struct *t) +static void uniphier_sd_internal_dma_issue(struct work_struct *t) { - struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue); + struct tmio_mmc_host *host = from_work(host, t, dma_issue); unsigned long flags; spin_lock_irqsave(&host->lock, flags); @@ -317,7 +317,7 @@ static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host, host->chan_tx = (void *)0xdeadbeaf; - tasklet_setup(&host->dma_issue, uniphier_sd_internal_dma_issue); + INIT_WORK(&host->dma_issue, uniphier_sd_internal_dma_issue); } static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host) diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index ba6044b16e07..f77457105ec3 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/mmc/host.h> +#include <linux/workqueue.h> #define DRV_NAME "via_sdmmc" @@ -307,7 +308,7 @@ struct via_crdr_mmc_host { struct sdhcreg pm_sdhc_reg; struct work_struct carddet_work; - struct tasklet_struct finish_tasklet; + struct work_struct finish_bh_work; struct timer_list timer; spinlock_t lock; @@ -643,7 +644,7 @@ static void via_sdc_finish_data(struct via_crdr_mmc_host *host) if (data->stop) via_sdc_send_command(host, data->stop); else - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); } static void via_sdc_finish_command(struct via_crdr_mmc_host *host) @@ -653,7 +654,7 @@ static void via_sdc_finish_command(struct via_crdr_mmc_host *host) host->cmd->error = 0; if (!host->cmd->data) - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); host->cmd = NULL; } @@ -682,7 +683,7 @@ static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq) status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) { host->mrq->cmd->error = -ENOMEDIUM; - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); } else { via_sdc_send_command(host, mrq->cmd); } @@ -848,7 +849,7 @@ static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask) host->cmd->error = -EILSEQ; if (host->cmd->error) - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); else if (intmask & VIA_CRDR_SDSTS_CRD) via_sdc_finish_command(host); } @@ -955,16 +956,16 @@ static void via_sdc_timeout(struct timer_list *t) sdhost->cmd->error = -ETIMEDOUT; else sdhost->mrq->cmd->error = -ETIMEDOUT; - tasklet_schedule(&sdhost->finish_tasklet); + queue_work(system_bh_wq, &sdhost->finish_bh_work); } } spin_unlock_irqrestore(&sdhost->lock, flags); } -static void via_sdc_tasklet_finish(struct tasklet_struct *t) +static void via_sdc_finish_bh_work(struct work_struct *t) { - struct via_crdr_mmc_host *host = from_tasklet(host, t, finish_tasklet); + struct via_crdr_mmc_host *host = from_work(host, t, finish_bh_work); unsigned long flags; struct mmc_request *mrq; @@ -1005,7 +1006,7 @@ static void via_sdc_card_detect(struct work_struct *work) pr_err("%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); host->mrq->cmd->error = -ENOMEDIUM; - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); } spin_unlock_irqrestore(&host->lock, flags); @@ -1051,7 +1052,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host) INIT_WORK(&host->carddet_work, via_sdc_card_detect); - tasklet_setup(&host->finish_tasklet, via_sdc_tasklet_finish); + INIT_WORK(&host->finish_bh_work, via_sdc_finish_bh_work); addrbase = host->sdhc_mmiobase; writel(0x0, addrbase + VIA_CRDR_SDINTMASK); @@ -1193,7 +1194,7 @@ static void via_sd_remove(struct pci_dev *pcidev) sdhost->mrq->cmd->error = -ENOMEDIUM; if (sdhost->mrq->stop) sdhost->mrq->stop->error = -ENOMEDIUM; - tasklet_schedule(&sdhost->finish_tasklet); + queue_work(system_bh_wq, &sdhost->finish_bh_work); } spin_unlock_irqrestore(&sdhost->lock, flags); @@ -1203,7 +1204,7 @@ static void via_sd_remove(struct pci_dev *pcidev) del_timer_sync(&sdhost->timer); - tasklet_kill(&sdhost->finish_tasklet); + cancel_work_sync(&sdhost->finish_bh_work); /* switch off power */ gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index f0562f712d98..6e20405d0430 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -459,7 +459,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host) * FIFO threshold interrupts properly. */ if ((data->blocks * data->blksz - data->bytes_xfered) < 16) - tasklet_schedule(&host->fifo_tasklet); + queue_work(system_bh_wq, &host->fifo_bh_work); } static void wbsd_fill_fifo(struct wbsd_host *host) @@ -524,7 +524,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host) * 'FIFO empty' under certain conditions. So we * need to be a bit more pro-active. */ - tasklet_schedule(&host->fifo_tasklet); + queue_work(system_bh_wq, &host->fifo_bh_work); } static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) @@ -746,7 +746,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) struct mmc_command *cmd; /* - * Disable tasklets to avoid a deadlock. + * Disable bh works to avoid a deadlock. */ spin_lock_bh(&host->lock); @@ -821,7 +821,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) * Dirty fix for hardware bug. */ if (host->dma == -1) - tasklet_schedule(&host->fifo_tasklet); + queue_work(system_bh_wq, &host->fifo_bh_work); spin_unlock_bh(&host->lock); @@ -961,13 +961,13 @@ static void wbsd_reset_ignore(struct timer_list *t) * Card status might have changed during the * blackout. */ - tasklet_schedule(&host->card_tasklet); + queue_work(system_bh_wq, &host->card_bh_work); spin_unlock_bh(&host->lock); } /* - * Tasklets + * BH Works */ static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host) @@ -987,9 +987,9 @@ static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host) return host->mrq->cmd->data; } -static void wbsd_tasklet_card(struct tasklet_struct *t) +static void wbsd_card_bh_work(struct work_struct *t) { - struct wbsd_host *host = from_tasklet(host, t, card_tasklet); + struct wbsd_host *host = from_work(host, t, card_bh_work); u8 csr; int delay = -1; @@ -1020,7 +1020,7 @@ static void wbsd_tasklet_card(struct tasklet_struct *t) wbsd_reset(host); host->mrq->cmd->error = -ENOMEDIUM; - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); } delay = 0; @@ -1036,9 +1036,9 @@ static void wbsd_tasklet_card(struct tasklet_struct *t) mmc_detect_change(host->mmc, msecs_to_jiffies(delay)); } -static void wbsd_tasklet_fifo(struct tasklet_struct *t) +static void wbsd_fifo_bh_work(struct work_struct *t) { - struct wbsd_host *host = from_tasklet(host, t, fifo_tasklet); + struct wbsd_host *host = from_work(host, t, fifo_bh_work); struct mmc_data *data; spin_lock(&host->lock); @@ -1060,16 +1060,16 @@ static void wbsd_tasklet_fifo(struct tasklet_struct *t) */ if (host->num_sg == 0) { wbsd_write_index(host, WBSD_IDX_FIFOEN, 0); - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); } end: spin_unlock(&host->lock); } -static void wbsd_tasklet_crc(struct tasklet_struct *t) +static void wbsd_crc_bh_work(struct work_struct *t) { - struct wbsd_host *host = from_tasklet(host, t, crc_tasklet); + struct wbsd_host *host = from_work(host, t, crc_bh_work); struct mmc_data *data; spin_lock(&host->lock); @@ -1085,15 +1085,15 @@ static void wbsd_tasklet_crc(struct tasklet_struct *t) data->error = -EILSEQ; - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); end: spin_unlock(&host->lock); } -static void wbsd_tasklet_timeout(struct tasklet_struct *t) +static void wbsd_timeout_bh_work(struct work_struct *t) { - struct wbsd_host *host = from_tasklet(host, t, timeout_tasklet); + struct wbsd_host *host = from_work(host, t, timeout_bh_work); struct mmc_data *data; spin_lock(&host->lock); @@ -1109,15 +1109,15 @@ static void wbsd_tasklet_timeout(struct tasklet_struct *t) data->error = -ETIMEDOUT; - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); end: spin_unlock(&host->lock); } -static void wbsd_tasklet_finish(struct tasklet_struct *t) +static void wbsd_finish_bh_work(struct work_struct *t) { - struct wbsd_host *host = from_tasklet(host, t, finish_tasklet); + struct wbsd_host *host = from_work(host, t, finish_bh_work); struct mmc_data *data; spin_lock(&host->lock); @@ -1156,18 +1156,18 @@ static irqreturn_t wbsd_irq(int irq, void *dev_id) host->isr |= isr; /* - * Schedule tasklets as needed. + * Schedule bh work as needed. */ if (isr & WBSD_INT_CARD) - tasklet_schedule(&host->card_tasklet); + queue_work(system_bh_wq, &host->card_bh_work); if (isr & WBSD_INT_FIFO_THRE) - tasklet_schedule(&host->fifo_tasklet); + queue_work(system_bh_wq, &host->fifo_bh_work); if (isr & WBSD_INT_CRC) - tasklet_hi_schedule(&host->crc_tasklet); + queue_work(system_bh_highpri_wq, &host->crc_bh_work); if (isr & WBSD_INT_TIMEOUT) - tasklet_hi_schedule(&host->timeout_tasklet); + queue_work(system_bh_highpri_wq, &host->timeout_bh_work); if (isr & WBSD_INT_TC) - tasklet_schedule(&host->finish_tasklet); + queue_work(system_bh_wq, &host->finish_bh_work); return IRQ_HANDLED; } @@ -1443,13 +1443,13 @@ static int wbsd_request_irq(struct wbsd_host *host, int irq) int ret; /* - * Set up tasklets. Must be done before requesting interrupt. + * Set up bh works. Must be done before requesting interrupt. */ - tasklet_setup(&host->card_tasklet, wbsd_tasklet_card); - tasklet_setup(&host->fifo_tasklet, wbsd_tasklet_fifo); - tasklet_setup(&host->crc_tasklet, wbsd_tasklet_crc); - tasklet_setup(&host->timeout_tasklet, wbsd_tasklet_timeout); - tasklet_setup(&host->finish_tasklet, wbsd_tasklet_finish); + INIT_WORK(&host->card_bh_work, wbsd_card_bh_work); + INIT_WORK(&host->fifo_bh_work, wbsd_fifo_bh_work); + INIT_WORK(&host->crc_bh_work, wbsd_crc_bh_work); + INIT_WORK(&host->timeout_bh_work, wbsd_timeout_bh_work); + INIT_WORK(&host->finish_bh_work, wbsd_finish_bh_work); /* * Allocate interrupt. @@ -1472,11 +1472,11 @@ static void wbsd_release_irq(struct wbsd_host *host) host->irq = 0; - tasklet_kill(&host->card_tasklet); - tasklet_kill(&host->fifo_tasklet); - tasklet_kill(&host->crc_tasklet); - tasklet_kill(&host->timeout_tasklet); - tasklet_kill(&host->finish_tasklet); + cancel_work_sync(&host->card_bh_work); + cancel_work_sync(&host->fifo_bh_work); + cancel_work_sync(&host->crc_bh_work); + cancel_work_sync(&host->timeout_bh_work); + cancel_work_sync(&host->finish_bh_work); } /* diff --git a/drivers/mmc/host/wbsd.h b/drivers/mmc/host/wbsd.h index be30b4d8ce4c..970886831305 100644 --- a/drivers/mmc/host/wbsd.h +++ b/drivers/mmc/host/wbsd.h @@ -171,11 +171,11 @@ struct wbsd_host int irq; /* Interrupt */ int dma; /* DMA channel */ - struct tasklet_struct card_tasklet; /* Tasklet structures */ - struct tasklet_struct fifo_tasklet; - struct tasklet_struct crc_tasklet; - struct tasklet_struct timeout_tasklet; - struct tasklet_struct finish_tasklet; + struct work_struct card_bh_work; /* Work structures */ + struct work_struct fifo_bh_work; + struct work_struct crc_bh_work; + struct work_struct timeout_bh_work; + struct work_struct finish_bh_work; struct timer_list ignore_timer; /* Ignore detection timer */ }; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 3caa0717d46c..47ead84407cd 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -336,6 +336,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) lim.logical_block_size = tr->blksize; if (tr->discard) lim.max_hw_discard_sectors = UINT_MAX; + if (tr->flush) + lim.features |= BLK_FEAT_WRITE_CACHE; /* Create gendisk */ gd = blk_mq_alloc_disk(new->tag_set, &lim, new); @@ -372,13 +374,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) /* Create the request queue */ spin_lock_init(&new->queue_lock); INIT_LIST_HEAD(&new->rq_list); - - if (tr->flush) - blk_queue_write_cache(new->rq, true, false); - - blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); - gd->queue = new->rq; if (new->readonly) diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index cbf8ae85e1ae..614257308516 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -234,8 +234,7 @@ config MTD_NAND_FSL_IFC tristate "Freescale IFC NAND controller" depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST depends on HAS_IOMEM - select FSL_IFC - select MEMORY + depends on FSL_IFC help Various Freescale chips e.g P1010, include a NAND Flash machine with built-in hardware ECC capabilities. diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index d7dbbd469b89..53e16d39af4b 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1093,28 +1093,32 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, unsigned int offset_in_page) { struct mtd_info *mtd = nand_to_mtd(chip); + bool ident_stage = !mtd->writesize; - /* Make sure the offset is less than the actual page size. */ - if (offset_in_page > mtd->writesize + mtd->oobsize) - return -EINVAL; + /* Bypass all checks during NAND identification */ + if (likely(!ident_stage)) { + /* Make sure the offset is less than the actual page size. */ + if (offset_in_page > mtd->writesize + mtd->oobsize) + return -EINVAL; - /* - * On small page NANDs, there's a dedicated command to access the OOB - * area, and the column address is relative to the start of the OOB - * area, not the start of the page. Asjust the address accordingly. - */ - if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) - offset_in_page -= mtd->writesize; + /* + * On small page NANDs, there's a dedicated command to access the OOB + * area, and the column address is relative to the start of the OOB + * area, not the start of the page. Asjust the address accordingly. + */ + if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) + offset_in_page -= mtd->writesize; - /* - * The offset in page is expressed in bytes, if the NAND bus is 16-bit - * wide, then it must be divided by 2. - */ - if (chip->options & NAND_BUSWIDTH_16) { - if (WARN_ON(offset_in_page % 2)) - return -EINVAL; + /* + * The offset in page is expressed in bytes, if the NAND bus is 16-bit + * wide, then it must be divided by 2. + */ + if (chip->options & NAND_BUSWIDTH_16) { + if (WARN_ON(offset_in_page % 2)) + return -EINVAL; - offset_in_page /= 2; + offset_in_page /= 2; + } } addrs[0] = offset_in_page; @@ -1123,7 +1127,7 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, * Small page NANDs use 1 cycle for the columns, while large page NANDs * need 2 */ - if (mtd->writesize <= 512) + if (!ident_stage && mtd->writesize <= 512) return 1; addrs[1] = offset_in_page >> 8; @@ -1436,16 +1440,19 @@ int nand_change_read_column_op(struct nand_chip *chip, unsigned int len, bool force_8bit) { struct mtd_info *mtd = nand_to_mtd(chip); + bool ident_stage = !mtd->writesize; if (len && !buf) return -EINVAL; - if (offset_in_page + len > mtd->writesize + mtd->oobsize) - return -EINVAL; + if (!ident_stage) { + if (offset_in_page + len > mtd->writesize + mtd->oobsize) + return -EINVAL; - /* Small page NANDs do not support column change. */ - if (mtd->writesize <= 512) - return -ENOTSUPP; + /* Small page NANDs do not support column change. */ + if (mtd->writesize <= 512) + return -ENOTSUPP; + } if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = @@ -2173,7 +2180,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op); int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, bool force_8bit, bool check_only) { - if (!len || !buf) + if (!len || (!check_only && !buf)) return -EINVAL; if (nand_has_exec_op(chip)) { @@ -6301,6 +6308,7 @@ static const struct nand_ops rawnand_ops = { static int nand_scan_tail(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_device *base = &chip->base; struct nand_ecc_ctrl *ecc = &chip->ecc; int ret, i; @@ -6445,9 +6453,13 @@ static int nand_scan_tail(struct nand_chip *chip) if (!ecc->write_oob_raw) ecc->write_oob_raw = ecc->write_oob; - /* propagate ecc info to mtd_info */ + /* Propagate ECC info to the generic NAND and MTD layers */ mtd->ecc_strength = ecc->strength; + if (!base->ecc.ctx.conf.strength) + base->ecc.ctx.conf.strength = ecc->strength; mtd->ecc_step_size = ecc->size; + if (!base->ecc.ctx.conf.step_size) + base->ecc.ctx.conf.step_size = ecc->size; /* * Set the number of read / write steps for one page depending on ECC @@ -6455,6 +6467,8 @@ static int nand_scan_tail(struct nand_chip *chip) */ if (!ecc->steps) ecc->steps = mtd->writesize / ecc->size; + if (!base->ecc.ctx.nsteps) + base->ecc.ctx.nsteps = ecc->steps; if (ecc->steps * ecc->size != mtd->writesize) { WARN(1, "Invalid ECC parameters\n"); ret = -EINVAL; diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 7baaef69d70a..55580447633b 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -420,13 +420,13 @@ static int rk_nfc_setup_interface(struct nand_chip *chip, int target, u32 rate, tc2rw, trwpw, trw2c; u32 temp; - if (target < 0) - return 0; - timings = nand_get_sdr_timings(conf); if (IS_ERR(timings)) return -EOPNOTSUPP; + if (target < 0) + return 0; + if (IS_ERR(nfc->nfc_clk)) rate = clk_get_rate(nfc->ahb_clk); else diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 0cacd7027e35..bc80fb6397dc 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1214,9 +1214,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond, __be32 target; if (newval->string) { - if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) { - netdev_err(bond->dev, "invalid ARP target %pI4 specified\n", - &target); + if (strlen(newval->string) < 1 || + !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) { + netdev_err(bond->dev, "invalid ARP target specified\n"); return ret; } if (newval->string[0] == '+') diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 7292c81fc0cd..024169461cad 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -125,6 +125,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = { .quirks = 0, + .family = KVASER_LEAF, .ops = &kvaser_usb_leaf_dev_ops, }; diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 02f07b870f10..268949939636 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1047,31 +1047,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(lan9303_mib); } -static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) +static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum) { struct lan9303 *chip = ds->priv; int phy_base = chip->phy_addr_base; - if (phy == phy_base) + if (port == 0) return lan9303_virt_phy_reg_read(chip, regnum); - if (phy > phy_base + 2) + if (port > 2) return -ENODEV; - return chip->ops->phy_read(chip, phy, regnum); + return chip->ops->phy_read(chip, phy_base + port, regnum); } -static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, +static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { struct lan9303 *chip = ds->priv; int phy_base = chip->phy_addr_base; - if (phy == phy_base) + if (port == 0) return lan9303_virt_phy_reg_write(chip, regnum, val); - if (phy > phy_base + 2) + if (port > 2) return -ENODEV; - return chip->ops->phy_write(chip, phy, regnum, val); + return chip->ops->phy_write(chip, phy_base + port, regnum, val); } static int lan9303_port_enable(struct dsa_switch *ds, int port, @@ -1099,7 +1099,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port) vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port); lan9303_disable_processing_port(chip, port); - lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); + lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN); } static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, @@ -1374,8 +1374,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = { static int lan9303_register_switch(struct lan9303 *chip) { - int base; - chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL); if (!chip->ds) return -ENOMEM; @@ -1385,8 +1383,7 @@ static int lan9303_register_switch(struct lan9303 *chip) chip->ds->priv = chip; chip->ds->ops = &lan9303_switch_ops; chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops; - base = chip->phy_addr_base; - chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base); + chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0); return dsa_register_switch(chip->ds); } diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index a806dadc4196..20c6529ec135 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -1380,6 +1380,7 @@ static int bcmasp_probe(struct platform_device *pdev) dev_err(dev, "Cannot create eth interface %d\n", i); bcmasp_remove_intfs(priv); of_node_put(intf_node); + ret = -ENOMEM; goto of_put_exit; } list_add_tail(&intf->list, &priv->intfs); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e2a4e1088b7f..9580ab83d387 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1262,7 +1262,7 @@ enum { struct bnx2x_fw_stats_req { struct stats_query_header hdr; - struct stats_query_entry query[FP_SB_MAX_E1x+ + struct stats_query_entry query[FP_SB_MAX_E2 + BNX2X_FIRST_QUEUE_QUERY_IDX]; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a6d69a45fa01..43952689bfb0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6146,6 +6146,24 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp) return max_ring; } +u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp) +{ + u16 i, tbl_size, max_ring = 0; + struct bnxt_rss_ctx *rss_ctx; + + if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + return 0; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + + list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) { + for (i = 0; i < tbl_size; i++) + max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]); + } + + return max_ring; +} + int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { @@ -12669,7 +12687,11 @@ bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) if (!BNXT_NEW_RM(bp)) return true; - if (hwr.vnic == bp->hw_resc.resv_vnics && + /* Do not reduce VNIC and RSS ctx reservations. There is a FW + * issue that will mess up the default VNIC if we reduce the + * reservations. + */ + if (hwr.vnic <= bp->hw_resc.resv_vnics && hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) return true; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9cf0acfa04e5..6b10a09ee1af 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2776,6 +2776,7 @@ int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, void bnxt_fill_ipv6_mask(__be32 mask[4]); int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); +u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8763f8a01457..79c09c1cdf93 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -961,6 +961,12 @@ static int bnxt_set_channels(struct net_device *dev, return rc; } + if (req_rx_rings < bp->rx_nr_rings && + req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) { + netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n"); + return -EINVAL; + } + if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && netif_is_rxfh_configured(dev)) { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 2e98a2a0bead..ce227b56cf72 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1109,6 +1109,46 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) } /** + * e1000e_force_smbus - Force interfaces to transition to SMBUS mode. + * @hw: pointer to the HW structure + * + * Force the MAC and the PHY to SMBUS mode. Assumes semaphore already + * acquired. + * + * Return: 0 on success, negative errno on failure. + **/ +static s32 e1000e_force_smbus(struct e1000_hw *hw) +{ + u16 smb_ctrl = 0; + u32 ctrl_ext; + s32 ret_val; + + /* Switching PHY interface always returns MDI error + * so disable retry mechanism to avoid wasting time + */ + e1000e_disable_phy_retry(hw); + + /* Force SMBus mode in the PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl); + if (ret_val) { + e1000e_enable_phy_retry(hw); + return ret_val; + } + + smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, smb_ctrl); + + e1000e_enable_phy_retry(hw); + + /* Force SMBus mode in the MAC */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, ctrl_ext); + + return 0; +} + +/** * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP * @hw: pointer to the HW structure * @to_sx: boolean indicating a system power state transition to Sx @@ -1165,6 +1205,14 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; + if (hw->mac.type != e1000_pch_mtp) { + ret_val = e1000e_force_smbus(hw); + if (ret_val) { + e_dbg("Failed to force SMBUS: %d\n", ret_val); + goto release; + } + } + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable * LPLU and disable Gig speed when entering ULP */ @@ -1225,27 +1273,12 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) } release: - /* Switching PHY interface always returns MDI error - * so disable retry mechanism to avoid wasting time - */ - e1000e_disable_phy_retry(hw); - - /* Force SMBus mode in PHY */ - ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); - if (ret_val) { - e1000e_enable_phy_retry(hw); - hw->phy.ops.release(hw); - goto out; + if (hw->mac.type == e1000_pch_mtp) { + ret_val = e1000e_force_smbus(hw); + if (ret_val) + e_dbg("Failed to force SMBUS over MTL system: %d\n", + ret_val); } - phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; - e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); - - e1000e_enable_phy_retry(hw); - - /* Force SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); hw->phy.ops.release(hw); out: diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index da5c59daf8ba..3cd161c6672b 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6363,49 +6363,49 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; ew32(EXTCNF_CTRL, mac_data); - /* Enable the Dynamic Power Gating in the MAC */ - mac_data = er32(FEXTNVM7); - mac_data |= BIT(22); - ew32(FEXTNVM7, mac_data); - /* Disable disconnected cable conditioning for Power Gating */ mac_data = er32(DPGFR); mac_data |= BIT(2); ew32(DPGFR, mac_data); - /* Don't wake from dynamic Power Gating with clock request */ - mac_data = er32(FEXTNVM12); - mac_data |= BIT(12); - ew32(FEXTNVM12, mac_data); - - /* Ungate PGCB clock */ - mac_data = er32(FEXTNVM9); - mac_data &= ~BIT(28); - ew32(FEXTNVM9, mac_data); - - /* Enable K1 off to enable mPHY Power Gating */ - mac_data = er32(FEXTNVM6); - mac_data |= BIT(31); - ew32(FEXTNVM6, mac_data); - - /* Enable mPHY power gating for any link and speed */ - mac_data = er32(FEXTNVM8); - mac_data |= BIT(9); - ew32(FEXTNVM8, mac_data); - /* Enable the Dynamic Clock Gating in the DMA and MAC */ mac_data = er32(CTRL_EXT); mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN; ew32(CTRL_EXT, mac_data); - - /* No MAC DPG gating SLP_S0 in modern standby - * Switch the logic of the lanphypc to use PMC counter - */ - mac_data = er32(FEXTNVM5); - mac_data |= BIT(7); - ew32(FEXTNVM5, mac_data); } + /* Enable the Dynamic Power Gating in the MAC */ + mac_data = er32(FEXTNVM7); + mac_data |= BIT(22); + ew32(FEXTNVM7, mac_data); + + /* Don't wake from dynamic Power Gating with clock request */ + mac_data = er32(FEXTNVM12); + mac_data |= BIT(12); + ew32(FEXTNVM12, mac_data); + + /* Ungate PGCB clock */ + mac_data = er32(FEXTNVM9); + mac_data &= ~BIT(28); + ew32(FEXTNVM9, mac_data); + + /* Enable K1 off to enable mPHY Power Gating */ + mac_data = er32(FEXTNVM6); + mac_data |= BIT(31); + ew32(FEXTNVM6, mac_data); + + /* Enable mPHY power gating for any link and speed */ + mac_data = er32(FEXTNVM8); + mac_data |= BIT(9); + ew32(FEXTNVM8, mac_data); + + /* No MAC DPG gating SLP_S0 in modern standby + * Switch the logic of the lanphypc to use PMC counter + */ + mac_data = er32(FEXTNVM5); + mac_data |= BIT(7); + ew32(FEXTNVM5, mac_data); + /* Disable the time synchronization clock */ mac_data = er32(FEXTNVM7); mac_data |= BIT(31); @@ -6498,33 +6498,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) } else { /* Request driver unconfigure the device from S0ix */ - /* Disable the Dynamic Power Gating in the MAC */ - mac_data = er32(FEXTNVM7); - mac_data &= 0xFFBFFFFF; - ew32(FEXTNVM7, mac_data); - - /* Disable mPHY power gating for any link and speed */ - mac_data = er32(FEXTNVM8); - mac_data &= ~BIT(9); - ew32(FEXTNVM8, mac_data); - - /* Disable K1 off */ - mac_data = er32(FEXTNVM6); - mac_data &= ~BIT(31); - ew32(FEXTNVM6, mac_data); - - /* Disable Ungate PGCB clock */ - mac_data = er32(FEXTNVM9); - mac_data |= BIT(28); - ew32(FEXTNVM9, mac_data); - - /* Cancel not waking from dynamic - * Power Gating with clock request - */ - mac_data = er32(FEXTNVM12); - mac_data &= ~BIT(12); - ew32(FEXTNVM12, mac_data); - /* Cancel disable disconnected cable conditioning * for Power Gating */ @@ -6537,13 +6510,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) mac_data &= 0xFFF7FFFF; ew32(CTRL_EXT, mac_data); - /* Revert the lanphypc logic to use the internal Gbe counter - * and not the PMC counter - */ - mac_data = er32(FEXTNVM5); - mac_data &= 0xFFFFFF7F; - ew32(FEXTNVM5, mac_data); - /* Enable the periodic inband message, * Request PCIe clock in K1 page770_17[10:9] =01b */ @@ -6581,6 +6547,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) mac_data &= ~BIT(31); mac_data |= BIT(0); ew32(FEXTNVM7, mac_data); + + /* Disable the Dynamic Power Gating in the MAC */ + mac_data = er32(FEXTNVM7); + mac_data &= 0xFFBFFFFF; + ew32(FEXTNVM7, mac_data); + + /* Disable mPHY power gating for any link and speed */ + mac_data = er32(FEXTNVM8); + mac_data &= ~BIT(9); + ew32(FEXTNVM8, mac_data); + + /* Disable K1 off */ + mac_data = er32(FEXTNVM6); + mac_data &= ~BIT(31); + ew32(FEXTNVM6, mac_data); + + /* Disable Ungate PGCB clock */ + mac_data = er32(FEXTNVM9); + mac_data |= BIT(28); + ew32(FEXTNVM9, mac_data); + + /* Cancel not waking from dynamic + * Power Gating with clock request + */ + mac_data = er32(FEXTNVM12); + mac_data &= ~BIT(12); + ew32(FEXTNVM12, mac_data); + + /* Revert the lanphypc logic to use the internal Gbe counter + * and not the PMC counter + */ + mac_data = er32(FEXTNVM5); + mac_data &= 0xFFFFFF7F; + ew32(FEXTNVM5, mac_data); } static int e1000e_pm_freeze(struct device *dev) diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index bbcfd529399b..89d57dd911dc 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -124,7 +124,8 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device, sys_cycles = er32(PLTSTMPH); sys_cycles <<= 32; sys_cycles |= er32(PLTSTMPL); - *system = convert_art_to_tsc(sys_cycles); + system->cycles = sys_cycles; + system->cs_id = CSID_X86_ART; return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index ee86d2c53079..55b5bb884d73 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -109,10 +109,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; - /* aq_rc is invalid if AQ timed out */ - if (aq_ret == -EIO) - return -EAGAIN; - if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) return -ERANGE; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 284c3fad5a6e..310513d9321b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -13293,6 +13293,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, bool need_reset; int i; + /* VSI shall be deleted in a moment, block loading new programs */ + if (prog && test_bit(__I40E_IN_REMOVE, pf->state)) + return -EINVAL; + /* Don't allow frames that span over multiple buffers */ if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags"); @@ -13301,14 +13305,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, /* When turning XDP on->off/off->on we reset and rebuild the rings. */ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); - if (need_reset) i40e_prep_for_reset(pf); - /* VSI shall be deleted in a moment, just return EINVAL */ - if (test_bit(__I40E_IN_REMOVE, pf->state)) - return -EINVAL; - old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) { diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c index e4c2c1bff6c0..b7aa6812510a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hwmon.c +++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c @@ -96,7 +96,7 @@ static bool ice_is_internal_reading_supported(struct ice_pf *pf) unsigned long sensors = pf->hw.dev_caps.supported_sensors; - return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors); + return test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors); }; void ice_hwmon_init(struct ice_pf *pf) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 0f17fc1181d2..927b623cedd5 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -1559,6 +1559,10 @@ void ice_ptp_extts_event(struct ice_pf *pf) u8 chan, tmr_idx; u32 hi, lo; + /* Don't process timestamp events if PTP is not ready */ + if (pf->ptp.state != ICE_PTP_READY) + return; + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Event time is captured by one of the two matched registers * GLTSYN_EVNT_L: 32 LSB of sampled time event @@ -1584,27 +1588,33 @@ void ice_ptp_extts_event(struct ice_pf *pf) /** * ice_ptp_cfg_extts - Configure EXTTS pin and channel * @pf: Board private structure - * @ena: true to enable; false to disable * @chan: GPIO channel (0-3) - * @gpio_pin: GPIO pin - * @extts_flags: request flags from the ptp_extts_request.flags + * @config: desired EXTTS configuration. + * @store: If set to true, the values will be stored + * + * Configure an external timestamp event on the requested channel. + * + * Return: 0 on success, -EOPNOTUSPP on unsupported flags */ -static int -ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, - unsigned int extts_flags) +static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan, + struct ice_extts_channel *config, bool store) { u32 func, aux_reg, gpio_reg, irq_reg; struct ice_hw *hw = &pf->hw; u8 tmr_idx; - if (chan > (unsigned int)pf->ptp.info.n_ext_ts) - return -EINVAL; + /* Reject requests with unsupported flags */ + if (config->flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; irq_reg = rd32(hw, PFINT_OICR_ENA); - if (ena) { + if (config->ena) { /* Enable the interrupt */ irq_reg |= PFINT_OICR_TSYN_EVNT_M; aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; @@ -1613,9 +1623,9 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) /* set event level to requested edge */ - if (extts_flags & PTP_FALLING_EDGE) + if (config->flags & PTP_FALLING_EDGE) aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; - if (extts_flags & PTP_RISING_EDGE) + if (config->flags & PTP_RISING_EDGE) aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; /* Write GPIO CTL reg. @@ -1636,12 +1646,52 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, wr32(hw, PFINT_OICR_ENA, irq_reg); wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); - wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); + wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg); + + if (store) + memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config)); return 0; } /** + * ice_ptp_disable_all_extts - Disable all EXTTS channels + * @pf: Board private structure + */ +static void ice_ptp_disable_all_extts(struct ice_pf *pf) +{ + struct ice_extts_channel extts_cfg = {}; + int i; + + for (i = 0; i < pf->ptp.info.n_ext_ts; i++) { + if (pf->ptp.extts_channels[i].ena) { + extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin; + extts_cfg.ena = false; + ice_ptp_cfg_extts(pf, i, &extts_cfg, false); + } + } + + synchronize_irq(pf->oicr_irq.virq); +} + +/** + * ice_ptp_enable_all_extts - Enable all EXTTS channels + * @pf: Board private structure + * + * Called during reset to restore user configuration. + */ +static void ice_ptp_enable_all_extts(struct ice_pf *pf) +{ + int i; + + for (i = 0; i < pf->ptp.info.n_ext_ts; i++) { + if (pf->ptp.extts_channels[i].ena) + ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i], + false); + } +} + +/** * ice_ptp_cfg_clkout - Configure clock to generate periodic wave * @pf: Board private structure * @chan: GPIO channel (0-3) @@ -1659,6 +1709,9 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, u32 func, val, gpio_pin; u8 tmr_idx; + if (config && config->flags & ~PTP_PEROUT_PHASE) + return -EOPNOTSUPP; + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* 0. Reset mode & out_en in AUX_OUT */ @@ -1795,17 +1848,18 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, struct ptp_clock_request *rq, int on) { struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_perout_channel clk_cfg = {0}; bool sma_pres = false; unsigned int chan; u32 gpio_pin; - int err; if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) sma_pres = true; switch (rq->type) { case PTP_CLK_REQ_PEROUT: + { + struct ice_perout_channel clk_cfg = {}; + chan = rq->perout.index; if (sma_pres) { if (chan == ice_pin_desc_e810t[SMA1].chan) @@ -1825,15 +1879,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, clk_cfg.gpio_pin = chan; } + clk_cfg.flags = rq->perout.flags; clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + rq->perout.period.nsec); clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + rq->perout.start.nsec); clk_cfg.ena = !!on; - err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); - break; + return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); + } case PTP_CLK_REQ_EXTTS: + { + struct ice_extts_channel extts_cfg = {}; + chan = rq->extts.index; if (sma_pres) { if (chan < ice_pin_desc_e810t[SMA2].chan) @@ -1849,14 +1907,15 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, gpio_pin = chan; } - err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, - rq->extts.flags); - break; + extts_cfg.flags = rq->extts.flags; + extts_cfg.gpio_pin = gpio_pin; + extts_cfg.ena = !!on; + + return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true); + } default: return -EOPNOTSUPP; } - - return err; } /** @@ -1869,26 +1928,32 @@ static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, struct ptp_clock_request *rq, int on) { struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_perout_channel clk_cfg = {0}; - int err; switch (rq->type) { case PTP_CLK_REQ_PPS: + { + struct ice_perout_channel clk_cfg = {}; + + clk_cfg.flags = rq->perout.flags; clk_cfg.gpio_pin = PPS_PIN_INDEX; clk_cfg.period = NSEC_PER_SEC; clk_cfg.ena = !!on; - err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); - break; + return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); + } case PTP_CLK_REQ_EXTTS: - err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, - TIME_SYNC_PIN_INDEX, rq->extts.flags); - break; + { + struct ice_extts_channel extts_cfg = {}; + + extts_cfg.flags = rq->extts.flags; + extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX; + extts_cfg.ena = !!on; + + return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true); + } default: return -EOPNOTSUPP; } - - return err; } /** @@ -2091,7 +2156,8 @@ ice_ptp_get_syncdevicetime(ktime_t *device, hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; - *system = convert_art_ns_to_tsc(hh_ts); + system->cycles = hh_ts; + system->cs_id = CSID_X86_ART; /* Read Device source clock time */ hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); @@ -2720,6 +2786,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) ice_ptp_restart_all_phy(pf); } + /* Re-enable all periodic outputs and external timestamp events */ + ice_ptp_enable_all_clkout(pf); + ice_ptp_enable_all_extts(pf); + return 0; } @@ -3275,6 +3345,8 @@ void ice_ptp_release(struct ice_pf *pf) ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); + ice_ptp_disable_all_extts(pf); + kthread_cancel_delayed_work_sync(&pf->ptp.work); ice_ptp_port_phy_stop(&pf->ptp.port); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 3af20025043a..e2af9749061c 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -29,10 +29,17 @@ enum ice_ptp_pin_e810t { struct ice_perout_channel { bool ena; u32 gpio_pin; + u32 flags; u64 period; u64 start_time; }; +struct ice_extts_channel { + bool ena; + u32 gpio_pin; + u32 flags; +}; + /* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp * is stored in a buffer of registers. Depending on the specific hardware, * this buffer might be shared across multiple PHY ports. @@ -226,6 +233,7 @@ enum ice_ptp_state { * @ext_ts_irq: the external timestamp IRQ in use * @kworker: kwork thread for handling periodic work * @perout_channels: periodic output data + * @extts_channels: channels for external timestamps * @info: structure defining PTP hardware capabilities * @clock: pointer to registered PTP clock device * @tstamp_config: hardware timestamping configuration @@ -249,6 +257,7 @@ struct ice_ptp { u8 ext_ts_irq; struct kthread_worker *kworker; struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX]; + struct ice_extts_channel extts_channels[GLTSYN_TGT_H_IDX_MAX]; struct ptp_clock_info info; struct ptp_clock *clock; struct hwtstamp_config tstamp_config; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 1bb026232efc..946edbad4302 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -938,7 +938,11 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) { #if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML) - return convert_art_ns_to_tsc(tstamp); + return (struct system_counterval_t) { + .cs_id = CSID_X86_ART, + .cycles = tstamp, + .use_nsecs = true, + }; #else return (struct system_counterval_t) { }; #endif diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 5352fee62d2b..0b9982804370 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) if (ch->dma.irq) free_irq(ch->dma.irq, priv); if (IS_RX(ch->idx)) { - int desc; + struct ltq_dma_channel *dma = &ch->dma; - for (desc = 0; desc < LTQ_DESC_NUM; desc++) + for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++) dev_kfree_skb_any(ch->skb[ch->dma.desc]); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 4a77f6fe2622..05b84581d5c5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1745,7 +1745,7 @@ struct cpt_lf_alloc_req_msg { u16 nix_pf_func; u16 sso_pf_func; u16 eng_grpmsk; - int blkaddr; + u8 blkaddr; u8 ctx_ilen_valid : 1; u8 ctx_ilen : 7; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index d883157393ea..6c3aca6f278d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype { NPC_LT_LB_CUSTOM1 = 0xF, }; +/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP + * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must + * differ only at bit 0 so mask 0xE can be used to detect extended headers. + */ enum npc_kpu_lc_ltype { - NPC_LT_LC_IP = 1, + NPC_LT_LC_PTP = 1, + NPC_LT_LC_IP, NPC_LT_LC_IP_OPT, NPC_LT_LC_IP6, NPC_LT_LC_IP6_EXT, @@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype { NPC_LT_LC_RARP, NPC_LT_LC_MPLS, NPC_LT_LC_NSH, - NPC_LT_LC_PTP, NPC_LT_LC_FCOE, NPC_LT_LC_NGIO, NPC_LT_LC_CUSTOM0 = 0xE, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index ff78251f92d4..5f661e67ccbc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1643,7 +1643,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, if (req->ssow > block->lf.max) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid SSOW req, %d > max %d\n", - pcifunc, req->sso, block->lf.max); + pcifunc, req->ssow, block->lf.max); return -EINVAL; } mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index f047185f38e0..3e09d2285814 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -696,7 +696,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req, struct cpt_rd_wr_reg_msg *rsp) { - int blkaddr; + u64 offset = req->reg_offset; + int blkaddr, lf; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) @@ -707,17 +708,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, !is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED; - rsp->reg_offset = req->reg_offset; - rsp->ret_val = req->ret_val; - rsp->is_write = req->is_write; - if (!is_valid_offset(rvu, req)) return CPT_AF_ERR_ACCESS_DENIED; + /* Translate local LF used by VFs to global CPT LF */ + lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc, + (offset & 0xFFF) >> 3); + + /* Translate local LF's offset to global CPT LF's offset */ + offset &= 0xFF000; + offset += lf << 3; + + rsp->reg_offset = offset; + rsp->ret_val = req->ret_val; + rsp->is_write = req->is_write; + if (req->is_write) - rvu_write64(rvu, blkaddr, req->reg_offset, req->val); + rvu_write64(rvu, blkaddr, offset, req->val); else - rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset); + rsp->val = rvu_read64(rvu, blkaddr, offset); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 00af8888e329..3dc828cf6c5a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -3864,6 +3864,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) return -ERANGE; } +/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ +#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) +/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ +#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) + static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) { int idx, nr_field, key_off, field_marker, keyoff_marker; @@ -3933,7 +3938,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->hdr_offset = 9; /* offset */ field->bytesm1 = 0; /* 1 byte */ field->ltype_match = NPC_LT_LC_IP; - field->ltype_mask = 0xF; + field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; break; case NIX_FLOW_KEY_TYPE_IPV4: case NIX_FLOW_KEY_TYPE_INNR_IPV4: @@ -3960,8 +3965,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->bytesm1 = 3; /* DIP, 4 bytes */ } } - - field->ltype_mask = 0xF; /* Match only IPv4 */ + field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; keyoff_marker = false; break; case NIX_FLOW_KEY_TYPE_IPV6: @@ -3990,7 +3994,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->bytesm1 = 15; /* DIP,16 bytes */ } } - field->ltype_mask = 0xF; /* Match only IPv6 */ + field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; break; case NIX_FLOW_KEY_TYPE_TCP: case NIX_FLOW_KEY_TYPE_UDP: diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 31aebeb2e285..25989c79c92e 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev) { struct device_node *of_node; struct mtk_star_priv *priv; + struct phy_device *phydev; struct net_device *ndev; struct device *dev; void __iomem *base; @@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll); netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll); + phydev = of_phy_find_device(priv->phy_node); + if (phydev) { + phydev->mac_managed_pm = true; + put_device(&phydev->mdio.dev); + } + return devm_register_netdev(dev, ndev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index c54fd01ea635..3d274599015b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -989,7 +989,12 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x) struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; struct net *net = dev_net(x->xso.dev); + u64 trailer_packets = 0, trailer_bytes = 0; + u64 replay_packets = 0, replay_bytes = 0; + u64 auth_packets = 0, auth_bytes = 0; + u64 success_packets, success_bytes; u64 packets, bytes, lastuse; + size_t headers; lockdep_assert(lockdep_is_held(&x->lock) || lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) || @@ -999,26 +1004,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x) return; if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) { - mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse); - x->stats.integrity_failed += packets; - XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets); - - mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse); - XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets); + mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes, + &auth_packets, &lastuse); + x->stats.integrity_failed += auth_packets; + XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets); + + mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes, + &trailer_packets, &lastuse); + XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets); } if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) return; - mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse); - x->curlft.packets += packets; - x->curlft.bytes += bytes; - if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) { - mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse); - x->stats.replay += packets; - XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets); + mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes, + &replay_packets, &lastuse); + x->stats.replay += replay_packets; + XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets); } + + mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse); + success_packets = packets - auth_packets - trailer_packets - replay_packets; + x->curlft.packets += success_packets; + /* NIC counts all bytes passed through flow steering and doesn't have + * an ability to count payload data size which is needed for SA. + * + * To overcome HW limitestion, let's approximate the payload size + * by removing always available headers. + */ + headers = sizeof(struct ethhdr); + if (sa_entry->attrs.family == AF_INET) + headers += sizeof(struct iphdr); + else + headers += sizeof(struct ipv6hdr); + + success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes; + x->curlft.bytes += success_bytes - headers * success_packets; } static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a605eae56685..eedbcba22689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5868,6 +5868,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) kfree(priv->htb_qos_sq_stats[i]); kvfree(priv->htb_qos_sq_stats); + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + memset(priv, 0, sizeof(*priv)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 5693986ae656..ac1565c0c8af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -1197,9 +1197,7 @@ static int get_num_eqs(struct mlx5_core_dev *dev) if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev)) return 1; - max_dev_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? - MLX5_CAP_GEN(dev, max_num_eqs) : - 1 << MLX5_CAP_GEN(dev, log_max_eq); + max_dev_eqs = mlx5_max_eq_cap_get(dev); num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table), max_dev_eqs - MLX5_MAX_ASYNC_EQS); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c index 50d2ea323979..a436ce895e45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c @@ -6,6 +6,9 @@ #include "helper.h" #include "ofld.h" +static int +acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + static bool esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) @@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw, { struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *flow_rule; + bool created = false; int err = 0; + if (!vport->ingress.acl) { + err = acl_ingress_ofld_setup(esw, vport); + if (err) + return err; + created = true; + } + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; flow_act.fg = vport->ingress.offloads.drop_grp; flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); - goto out; + goto err_out; } vport->ingress.offloads.drop_rule = flow_rule; -out: + + return 0; +err_out: + /* Only destroy ingress acl created in this function. */ + if (created) + esw_acl_ingress_ofld_cleanup(esw, vport); return err; } @@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport) } } -int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +static int +acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { int num_ftes = 0; int err; - if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && - !esw_acl_ingress_prio_tag_enabled(esw, vport)) - return 0; - esw_acl_ingress_allow_rule_destroy(vport); if (mlx5_eswitch_vport_match_metadata_enabled(esw)) @@ -347,6 +359,15 @@ group_err: return err; } +int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && + !esw_acl_ingress_prio_tag_enabled(esw, vport)) + return 0; + + return acl_ingress_ofld_setup(esw, vport); +} + void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 592143d5e1da..72949cb85244 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -4600,20 +4600,26 @@ mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs, return -EOPNOTSUPP; } + if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) { + NL_SET_ERR_MSG_MOD(extack, + "Device doesn't support getting the max number of EQs"); + return -EOPNOTSUPP; + } + query_ctx = kzalloc(query_out_sz, GFP_KERNEL); if (!query_ctx) return -ENOMEM; mutex_lock(&esw->state_lock); err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx, - MLX5_CAP_GENERAL); + MLX5_CAP_GENERAL_2); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); goto out; } hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); - max_eqs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_eqs); + max_eqs = MLX5_GET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b); if (max_eqs < MLX5_ESW_MAX_CTRL_EQS) *max_io_eqs = 0; else @@ -4644,6 +4650,12 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, return -EOPNOTSUPP; } + if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) { + NL_SET_ERR_MSG_MOD(extack, + "Device doesn't support changing the max number of EQs"); + return -EOPNOTSUPP; + } + if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) { NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range"); return -EINVAL; @@ -4655,17 +4667,17 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, mutex_lock(&esw->state_lock); err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx, - MLX5_CAP_GENERAL); + MLX5_CAP_GENERAL_2); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); goto out; } hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); - MLX5_SET(cmd_hca_cap, hca_caps, max_num_eqs, max_eqs); + MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs); err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num, - MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2); if (err) NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index c38342b9f320..a7fd18888b6e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -383,4 +383,14 @@ static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vpo : vport; } +static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev) +{ + if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) + return MLX5_CAP_GEN_2(dev, max_num_eqs_24b); + + if (MLX5_CAP_GEN(dev, max_num_eqs)) + return MLX5_CAP_GEN(dev, max_num_eqs); + + return 1 << MLX5_CAP_GEN(dev, log_max_eq); +} #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index fb8787e30d3f..401d39069680 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -711,9 +711,7 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table) int mlx5_irq_table_create(struct mlx5_core_dev *dev) { - int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? - MLX5_CAP_GEN(dev, max_num_eqs) : - 1 << MLX5_CAP_GEN(dev, log_max_eq); + int num_eqs = mlx5_max_eq_cap_get(dev); int total_vec; int pcif_vec; int req_vec; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 025e0db983fe..b032d5a4b3b8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -1484,6 +1484,7 @@ err_type_file_file_validate: vfree(types_info->data); err_data_alloc: kfree(types_info); + linecards->types_info = NULL; return err; } diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index 6453c92f0fa7..7fa1820db9cc 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -352,11 +352,11 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) netif_dbg(ks, intr, ks->netdev, "%s: txspace %d\n", __func__, tx_space); - spin_lock(&ks->statelock); + spin_lock_bh(&ks->statelock); ks->tx_space = tx_space; if (netif_queue_stopped(ks->netdev)) netif_wake_queue(ks->netdev); - spin_unlock(&ks->statelock); + spin_unlock_bh(&ks->statelock); } if (status & IRQ_SPIBEI) { @@ -482,6 +482,7 @@ static int ks8851_net_open(struct net_device *dev) ks8851_wrreg16(ks, KS_IER, ks->rc_ier); ks->queued_len = 0; + ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); netif_start_queue(ks->netdev); netif_dbg(ks, ifup, ks->netdev, "network device up\n"); @@ -635,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev) /* schedule work to do the actual set of the data if needed */ - spin_lock(&ks->statelock); + spin_lock_bh(&ks->statelock); if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) { memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl)); schedule_work(&ks->rxctrl_work); } - spin_unlock(&ks->statelock); + spin_unlock_bh(&ks->statelock); } static int ks8851_set_mac_address(struct net_device *dev, void *addr) @@ -1101,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev, int ret; ks->netdev = netdev; - ks->tx_space = 6144; ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); ret = PTR_ERR_OR_ZERO(ks->gpio); diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 670c1de966db..3062cc0f9199 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -340,10 +340,10 @@ static void ks8851_tx_work(struct work_struct *work) tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR); - spin_lock(&ks->statelock); + spin_lock_bh(&ks->statelock); ks->queued_len -= dequeued_len; ks->tx_space = tx_space; - spin_unlock(&ks->statelock); + spin_unlock_bh(&ks->statelock); ks8851_unlock_spi(ks, &flags); } diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index dcab638c57fe..24c90d8f5a44 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -871,13 +871,13 @@ static void rswitch_tx_free(struct net_device *ndev) dma_rmb(); skb = gq->skbs[gq->dirty]; if (skb) { + rdev->ndev->stats.tx_packets++; + rdev->ndev->stats.tx_bytes += skb->len; dma_unmap_single(ndev->dev.parent, gq->unmap_addrs[gq->dirty], skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(gq->skbs[gq->dirty]); gq->skbs[gq->dirty] = NULL; - rdev->ndev->stats.tx_packets++; - rdev->ndev->stats.tx_bytes += skb->len; } desc->desc.die_dt = DT_EEMPTY; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 60283543ffc8..e73fa34237d3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -390,10 +390,11 @@ static int intel_crosststamp(ktime_t *device, *device = ns_to_ktime(ptp_time); read_unlock_irqrestore(&priv->ptp_lock, flags); get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); - *system = convert_art_to_tsc(art_time); + system->cycles = art_time; } system->cycles *= intel_priv->crossts_adj; + system->cs_id = CSID_X86_ART; priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 65d7370b47d5..466c4002f00d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -272,7 +272,7 @@ static const struct ethqos_emac_por emac_v4_0_0_por[] = { static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .por = emac_v4_0_0_por, - .num_por = ARRAY_SIZE(emac_v3_0_0_por), + .num_por = ARRAY_SIZE(emac_v4_0_0_por), .rgmii_config_loopback_en = false, .has_emac_ge_3 = true, .link_clk_name = "phyaux", diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b3afc7cb7d72..c58782c41417 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7662,9 +7662,10 @@ int stmmac_dvr_probe(struct device *device, #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; - ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - priv->hw->hw_vlan_en = true; - + if (priv->plat->has_gmac4) { + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + priv->hw->hw_vlan_en = true; + } if (priv->dma_cap.vlhash) { ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 7c4b6881a93f..d1b682ce9c6d 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1959,6 +1959,7 @@ int wx_sw_init(struct wx *wx) } bitmap_zero(wx->state, WX_STATE_NBITS); + wx->misc_irq_domain = false; return 0; } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 68bde91b67a0..81bedc8ee8d4 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -1686,6 +1686,7 @@ static int wx_set_interrupt_capability(struct wx *wx) } pdev->irq = pci_irq_vector(pdev, 0); + wx->num_q_vectors = 1; return 0; } @@ -1996,7 +1997,8 @@ void wx_free_irq(struct wx *wx) int vector; if (!(pdev->msix_enabled)) { - free_irq(pdev->irq, wx); + if (!wx->misc_irq_domain) + free_irq(pdev->irq, wx); return; } @@ -2011,7 +2013,7 @@ void wx_free_irq(struct wx *wx) free_irq(entry->vector, q_vector); } - if (wx->mac.type == wx_mac_em) + if (!wx->misc_irq_domain) free_irq(wx->msix_entry->vector, wx); } EXPORT_SYMBOL(wx_free_irq); @@ -2026,6 +2028,9 @@ int wx_setup_isb_resources(struct wx *wx) { struct pci_dev *pdev = wx->pdev; + if (wx->isb_mem) + return 0; + wx->isb_mem = dma_alloc_coherent(&pdev->dev, sizeof(u32) * 4, &wx->isb_dma, @@ -2385,7 +2390,6 @@ static void wx_free_all_tx_resources(struct wx *wx) void wx_free_resources(struct wx *wx) { - wx_free_isb_resources(wx); wx_free_all_rx_resources(wx); wx_free_all_tx_resources(wx); } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 5aaf7b1fa2db..0df7f5712b6f 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -1058,6 +1058,7 @@ struct wx { dma_addr_t isb_dma; u32 *isb_mem; u32 isb_tag[WX_ISB_MAX]; + bool misc_irq_domain; #define WX_MAX_RETA_ENTRIES 128 #define WX_RSS_INDIR_TBL_MAX 64 diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index e894e01d030d..af30ca0312b8 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -387,6 +387,7 @@ err_dis_phy: err_free_irq: wx_free_irq(wx); err_free_resources: + wx_free_isb_resources(wx); wx_free_resources(wx); return err; } @@ -408,6 +409,7 @@ static int ngbe_close(struct net_device *netdev) ngbe_down(wx); wx_free_irq(wx); + wx_free_isb_resources(wx); wx_free_resources(wx); phylink_disconnect_phy(wx->phylink); wx_control_hw(wx, false); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index b3e3605d1edb..a4cf682dca65 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -27,57 +27,19 @@ void txgbe_irq_enable(struct wx *wx, bool queues) } /** - * txgbe_intr - msi/legacy mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t txgbe_intr(int __always_unused irq, void *data) -{ - struct wx_q_vector *q_vector; - struct wx *wx = data; - struct pci_dev *pdev; - u32 eicr; - - q_vector = wx->q_vector[0]; - pdev = wx->pdev; - - eicr = wx_misc_isb(wx, WX_ISB_VEC0); - if (!eicr) { - /* shared interrupt alert! - * the interrupt that we masked before the ICR read. - */ - if (netif_running(wx->netdev)) - txgbe_irq_enable(wx, true); - return IRQ_NONE; /* Not our interrupt */ - } - wx->isb_mem[WX_ISB_VEC0] = 0; - if (!(pdev->msi_enabled)) - wr32(wx, WX_PX_INTA, 1); - - wx->isb_mem[WX_ISB_MISC] = 0; - /* would disable interrupts here but it is auto disabled */ - napi_schedule_irqoff(&q_vector->napi); - - /* re-enable link(maybe) and non-queue interrupts, no flush. - * txgbe_poll will re-enable the queue interrupts - */ - if (netif_running(wx->netdev)) - txgbe_irq_enable(wx, false); - - return IRQ_HANDLED; -} - -/** - * txgbe_request_msix_irqs - Initialize MSI-X interrupts + * txgbe_request_queue_irqs - Initialize MSI-X queue interrupts * @wx: board private structure * - * Allocate MSI-X vectors and request interrupts from the kernel. + * Allocate MSI-X queue vectors and request interrupts from the kernel. **/ -static int txgbe_request_msix_irqs(struct wx *wx) +int txgbe_request_queue_irqs(struct wx *wx) { struct net_device *netdev = wx->netdev; int vector, err; + if (!wx->pdev->msix_enabled) + return 0; + for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; struct msix_entry *entry = &wx->msix_q_entries[vector]; @@ -110,34 +72,6 @@ free_queue_irqs: return err; } -/** - * txgbe_request_irq - initialize interrupts - * @wx: board private structure - * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -int txgbe_request_irq(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - struct pci_dev *pdev = wx->pdev; - int err; - - if (pdev->msix_enabled) - err = txgbe_request_msix_irqs(wx); - else if (pdev->msi_enabled) - err = request_irq(wx->pdev->irq, &txgbe_intr, 0, - netdev->name, wx); - else - err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED, - netdev->name, wx); - - if (err) - wx_err(wx, "request_irq failed, Error %d\n", err); - - return err; -} - static int txgbe_request_gpio_irq(struct txgbe *txgbe) { txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO); @@ -178,6 +112,36 @@ static const struct irq_domain_ops txgbe_misc_irq_domain_ops = { static irqreturn_t txgbe_misc_irq_handle(int irq, void *data) { + struct wx_q_vector *q_vector; + struct txgbe *txgbe = data; + struct wx *wx = txgbe->wx; + u32 eicr; + + if (wx->pdev->msix_enabled) + return IRQ_WAKE_THREAD; + + eicr = wx_misc_isb(wx, WX_ISB_VEC0); + if (!eicr) { + /* shared interrupt alert! + * the interrupt that we masked before the ICR read. + */ + if (netif_running(wx->netdev)) + txgbe_irq_enable(wx, true); + return IRQ_NONE; /* Not our interrupt */ + } + wx->isb_mem[WX_ISB_VEC0] = 0; + if (!(wx->pdev->msi_enabled)) + wr32(wx, WX_PX_INTA, 1); + + /* would disable interrupts here but it is auto disabled */ + q_vector = wx->q_vector[0]; + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data) +{ struct txgbe *txgbe = data; struct wx *wx = txgbe->wx; unsigned int nhandled = 0; @@ -223,6 +187,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe) int txgbe_setup_misc_irq(struct txgbe *txgbe) { + unsigned long flags = IRQF_ONESHOT; struct wx *wx = txgbe->wx; int hwirq, err; @@ -236,14 +201,17 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) irq_create_mapping(txgbe->misc.domain, hwirq); txgbe->misc.chip = txgbe_irq_chip; - if (wx->pdev->msix_enabled) + if (wx->pdev->msix_enabled) { txgbe->misc.irq = wx->msix_entry->vector; - else + } else { txgbe->misc.irq = wx->pdev->irq; + if (!wx->pdev->msi_enabled) + flags |= IRQF_SHARED; + } - err = request_threaded_irq(txgbe->misc.irq, NULL, - txgbe_misc_irq_handle, - IRQF_ONESHOT, + err = request_threaded_irq(txgbe->misc.irq, txgbe_misc_irq_handle, + txgbe_misc_irq_thread_fn, + flags, wx->netdev->name, txgbe); if (err) goto del_misc_irq; @@ -256,6 +224,8 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) if (err) goto free_gpio_irq; + wx->misc_irq_domain = true; + return 0; free_gpio_irq: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h index b77945e7a0f2..e6285b94625e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h @@ -2,6 +2,6 @@ /* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ void txgbe_irq_enable(struct wx *wx, bool queues); -int txgbe_request_irq(struct wx *wx); +int txgbe_request_queue_irqs(struct wx *wx); void txgbe_free_misc_irq(struct txgbe *txgbe); int txgbe_setup_misc_irq(struct txgbe *txgbe); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 8c7a74981b90..ca74d9422065 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -294,9 +294,9 @@ static int txgbe_open(struct net_device *netdev) wx_configure(wx); - err = txgbe_request_irq(wx); + err = txgbe_request_queue_irqs(wx); if (err) - goto err_free_isb; + goto err_free_resources; /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); @@ -313,8 +313,8 @@ static int txgbe_open(struct net_device *netdev) err_free_irq: wx_free_irq(wx); -err_free_isb: - wx_free_isb_resources(wx); +err_free_resources: + wx_free_resources(wx); err_reset: txgbe_reset(wx); @@ -729,6 +729,7 @@ static void txgbe_remove(struct pci_dev *pdev) txgbe_remove_phy(txgbe); txgbe_free_misc_irq(txgbe); + wx_free_isb_resources(wx); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index ffe7f463e16e..ef6df0e37bea 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; - if (__netif_rx(skb) == NET_RX_DROP) { + if (netif_rx(skb) == NET_RX_DROP) { ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } else { diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h index 1c19ae74ad2b..4830b25e6c7d 100644 --- a/drivers/net/phy/aquantia/aquantia.h +++ b/drivers/net/phy/aquantia/aquantia.h @@ -6,6 +6,9 @@ * Author: Heiner Kallweit <hkallweit1@gmail.com> */ +#ifndef AQUANTIA_H +#define AQUANTIA_H + #include <linux/device.h> #include <linux/phy.h> @@ -120,3 +123,5 @@ static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; } #endif int aqr_firmware_load(struct phy_device *phydev); + +#endif /* AQUANTIA_H */ diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index a838b61cd844..a35528497a57 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -748,7 +748,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev) ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, lan87xx_cable_test_report_trans(detect)); - return 0; + return phy_init_hw(phydev); } static int lan87xx_cable_test_get_status(struct phy_device *phydev, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 0a65b6d690fe..eb9acfcaeb09 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -70,6 +70,7 @@ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ #define PPP_PROTO_LEN 2 +#define PPP_LCP_HDRLEN 4 /* * An instance of /dev/ppp can be associated with either a ppp @@ -493,6 +494,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf, return ret; } +static bool ppp_check_packet(struct sk_buff *skb, size_t count) +{ + /* LCP packets must include LCP header which 4 bytes long: + * 1-byte code, 1-byte identifier, and 2-byte length. + */ + return get_unaligned_be16(skb->data) != PPP_LCP || + count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN; +} + static ssize_t ppp_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -515,6 +525,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, kfree_skb(skb); goto out; } + ret = -EINVAL; + if (unlikely(!ppp_check_packet(skb, count))) { + kfree_skb(skb); + goto out; + } switch (pf->kind) { case INTERFACE: diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index 0ba714ca5185..4b8528206cc8 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits) if (bits == 32) { *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); } else if (bits == 128) { - ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); - ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); + ((u64 *)dst)[0] = get_unaligned_be64(src); + ((u64 *)dst)[1] = get_unaligned_be64(src + 8); } } diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index 1ea4f874e367..7eb76724b3ed 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) */ static inline int wg_cpumask_next_online(int *last_cpu) { - int cpu = cpumask_next(*last_cpu, cpu_online_mask); + int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); if (cpu >= nr_cpu_ids) cpu = cpumask_first(cpu_online_mask); - *last_cpu = cpu; + WRITE_ONCE(*last_cpu, cpu); return cpu; } diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c index 0d48e0f4a1ba..26e09c30d596 100644 --- a/drivers/net/wireguard/send.c +++ b/drivers/net/wireguard/send.c @@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer) { struct sk_buff *skb; - if (skb_queue_empty(&peer->staged_packet_queue)) { + if (skb_queue_empty_lockless(&peer->staged_packet_queue)) { skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, GFP_ATOMIC); if (unlikely(!skb)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 18ce060df9b5..dac6155ae1bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -654,7 +654,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, - IWL_FW_CMD_VER_UNKNOWN) == 3) + IWL_FW_CMD_VER_UNKNOWN) >= 3) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; if (fw_has_api(&mvm->fw->ucode_capa, @@ -1656,7 +1656,8 @@ static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy, struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work); struct iwl_mvm *mvm = mvmvif->mvm; - struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + struct ieee80211_vif *vif = + container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); mutex_lock(&mvm->mutex); iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION); @@ -1682,7 +1683,8 @@ static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk) struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk); struct iwl_mvm *mvm = mvmvif->mvm; - struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + struct ieee80211_vif *vif = + container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); mutex_lock(&mvm->mutex); iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT); @@ -6410,11 +6412,9 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, if (sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, - READ_ONCE(mvm->queue_sync_state) == 0 || - iwl_mvm_is_radio_hw_killed(mvm), + READ_ONCE(mvm->queue_sync_state) == 0, SYNC_RX_QUEUE_TIMEOUT); - WARN_ONCE(!ret && !iwl_mvm_is_radio_hw_killed(mvm), - "queue sync: failed to sync, state is 0x%lx, cookie %d\n", + WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n", mvm->queue_sync_state, mvm->queue_sync_cookie); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 53283d052e18..d343432474db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -153,7 +153,7 @@ static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm, struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); /* FW recommendations is only for entering EMLSR */ - if (!vif || iwl_mvm_vif_from_mac80211(vif)->esr_active) + if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active) return; if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER) @@ -1912,12 +1912,10 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); bool unified = iwl_mvm_has_unified_ucode(mvm); - if (state) { + if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); - wake_up(&mvm->rx_sync_waitq); - } else { + else clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); - } iwl_mvm_set_rfkill_state(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 4fa8066a89b6..6e933907f985 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -557,12 +557,10 @@ struct iwl_mvm_stat_data_all_macs { }; static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig, - struct iwl_mvm_vif_link_info *link_info) + struct iwl_mvm_vif_link_info *link_info, + struct ieee80211_bss_conf *bss_conf) { struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(vif)->mvm; - struct ieee80211_bss_conf *bss_conf = - iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, link_info->fw_link_id, - false); int thold = bss_conf->cqm_rssi_thold; int hyst = bss_conf->cqm_rssi_hyst; int last_event; @@ -670,7 +668,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, mvmvif->deflink.beacon_stats.num_beacons; /* This is used in pre-MLO API so use deflink */ - iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink); + iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf); } static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, @@ -705,7 +703,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy); /* This is used in pre-MLO API so use deflink */ - iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink); + iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf); } static inline void @@ -921,7 +919,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, mvmvif->link[link_id]->beacon_stats.num_beacons; sig = -le32_to_cpu(link_stats->beacon_filter_average_energy); - iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info); + iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info, + bss_conf); if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX, "invalid mvmvif id: %d", mvmvif->id)) @@ -967,7 +966,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - if (!bss_vif) + if (IS_ERR_OR_NULL(bss_vif)) return; mvmvif = iwl_mvm_vif_from_mac80211(bss_vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 61a4638d1be2..f8b08f98daa0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -638,7 +638,7 @@ out: } static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, - int trip, int temp) + const struct thermal_trip *trip, int temp) { struct iwl_mvm *mvm = thermal_zone_device_priv(device); int ret; diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index f1085ccb7eed..7719e4f3e2a2 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -382,7 +382,8 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss, struct ieee80211_p2p_noa_attr noa_attr; const struct cfg80211_bss_ies *ies; struct wilc_join_bss_param *param; - u8 rates_len = 0, ies_len; + u8 rates_len = 0; + int ies_len; int ret; param = kzalloc(sizeof(*param), GFP_KERNEL); diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index a939fd89a7f5..92fc2d456c2c 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1566,13 +1566,6 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, wlvif->band)); - if (!cmd->supported_rates) { - wl1271_debug(DEBUG_CMD, - "peer has no supported rates yet, configuring basic rates: 0x%x", - wlvif->basic_rate_set); - cmd->supported_rates = cpu_to_le32(wlvif->basic_rate_set); - } - wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x", cmd->supported_rates, sta->uapsd_queues); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ef12169f8044..492cd7aef44f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5139,19 +5139,23 @@ static int wl12xx_update_sta_state(struct wl1271 *wl, /* Add station (AP mode) */ if (is_ap && - old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE) { + old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { ret = wl12xx_sta_add(wl, wlvif, sta); if (ret) return ret; + wl_sta->fw_added = true; + wlcore_update_inconn_sta(wl, wlvif, wl_sta, true); } /* Remove station (AP mode) */ if (is_ap && - old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST) { + old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + wl_sta->fw_added = false; + /* must not fail */ wl12xx_sta_remove(wl, wlvif, sta); @@ -5165,11 +5169,6 @@ static int wl12xx_update_sta_state(struct wl1271 *wl, if (ret < 0) return ret; - /* reconfigure rates */ - ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid); - if (ret < 0) - return ret; - ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap, true, wl_sta->hlid); diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 7bd3ce2f0804..464587d16ab2 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -140,11 +140,8 @@ EXPORT_SYMBOL(wl12xx_is_dummy_packet); static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct sk_buff *skb, struct ieee80211_sta *sta) { - if (sta) { - struct wl1271_station *wl_sta; - - wl_sta = (struct wl1271_station *)sta->drv_priv; - return wl_sta->hlid; + if (sta && wl1271_station(sta)->fw_added) { + return wl1271_station(sta)->hlid; } else { struct ieee80211_hdr *hdr; diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index eefae3f867b9..817a8a61cac6 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -324,6 +324,7 @@ struct wl12xx_rx_filter { struct wl1271_station { u8 hlid; + bool fw_added; bool in_connection; /* @@ -335,6 +336,11 @@ struct wl1271_station { u64 total_freed_pkts; }; +static inline struct wl1271_station *wl1271_station(struct ieee80211_sta *sta) +{ + return (struct wl1271_station *)sta->drv_priv; +} + struct wl12xx_vif { struct wl1271 *wl; struct list_head list; diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 1e5aedaf8c7b..e79c06d65bb7 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1501,9 +1501,15 @@ static int btt_blk_init(struct btt *btt) .logical_block_size = btt->sector_size, .max_hw_sectors = UINT_MAX, .max_integrity_segments = 1, + .features = BLK_FEAT_SYNCHRONOUS, }; int rc; + if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { + lim.integrity.tuple_size = btt_meta_size(btt); + lim.integrity.tag_size = btt_meta_size(btt); + } + btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE); if (IS_ERR(btt->btt_disk)) return PTR_ERR(btt->btt_disk); @@ -1513,17 +1519,6 @@ static int btt_blk_init(struct btt *btt) btt->btt_disk->fops = &btt_fops; btt->btt_disk->private_data = btt; - blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue); - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue); - - if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { - struct blk_integrity bi = { - .tuple_size = btt_meta_size(btt), - .tag_size = btt_meta_size(btt), - }; - blk_integrity_register(btt->btt_disk, &bi); - } - set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL); if (rc) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 598fe2e89bda..1dd74c969d5a 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -455,6 +455,8 @@ static int pmem_attach_disk(struct device *dev, .logical_block_size = pmem_sector_size(ndns), .physical_block_size = PAGE_SIZE, .max_hw_sectors = UINT_MAX, + .features = BLK_FEAT_WRITE_CACHE | + BLK_FEAT_SYNCHRONOUS, }; int nid = dev_to_node(dev), fua; struct resource *res = &nsio->res; @@ -463,7 +465,6 @@ static int pmem_attach_disk(struct device *dev, struct dax_device *dax_dev; struct nd_pfn_sb *pfn_sb; struct pmem_device *pmem; - struct request_queue *q; struct gendisk *disk; void *addr; int rc; @@ -495,6 +496,10 @@ static int pmem_attach_disk(struct device *dev, dev_warn(dev, "unable to guarantee persistence of writes\n"); fua = 0; } + if (fua) + lim.features |= BLK_FEAT_FUA; + if (is_nd_pfn(dev)) + lim.features |= BLK_FEAT_DAX; if (!devm_request_mem_region(dev, res->start, resource_size(res), dev_name(&ndns->dev))) { @@ -505,7 +510,6 @@ static int pmem_attach_disk(struct device *dev, disk = blk_alloc_disk(&lim, nid); if (IS_ERR(disk)) return PTR_ERR(disk); - q = disk->queue; pmem->disk = disk; pmem->pgmap.owner = pmem; @@ -543,12 +547,6 @@ static int pmem_attach_disk(struct device *dev, } pmem->virt_addr = addr; - blk_queue_write_cache(q, true, fua); - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q); - if (pmem->pfn_flags & PFN_MAP) - blk_queue_flag_set(QUEUE_FLAG_DAX, q); - disk->fops = &pmem_fops; disk->private_data = pmem; nvdimm_namespace_disk_name(ndns, disk->disk_name); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index b309c8be720f..a3caef75aa0a 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config NVME_CORE tristate - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY config BLK_DEV_NVME tristate "NVM Express block device" diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 0cfa39361d3b..b1387dc459a3 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -1388,7 +1388,7 @@ static void devm_apple_nvme_mempool_destroy(void *data) mempool_destroy(data); } -static int apple_nvme_probe(struct platform_device *pdev) +static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct apple_nvme *anv; @@ -1396,7 +1396,7 @@ static int apple_nvme_probe(struct platform_device *pdev) anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL); if (!anv) - return -ENOMEM; + return ERR_PTR(-ENOMEM); anv->dev = get_device(dev); anv->adminq.is_adminq = true; @@ -1516,10 +1516,30 @@ static int apple_nvme_probe(struct platform_device *pdev) goto put_dev; } + return anv; +put_dev: + put_device(anv->dev); + return ERR_PTR(ret); +} + +static int apple_nvme_probe(struct platform_device *pdev) +{ + struct apple_nvme *anv; + int ret; + + anv = apple_nvme_alloc(pdev); + if (IS_ERR(anv)) + return PTR_ERR(anv); + + ret = nvme_add_ctrl(&anv->ctrl); + if (ret) + goto out_put_ctrl; + anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL); if (IS_ERR(anv->ctrl.admin_q)) { ret = -ENOMEM; - goto put_dev; + anv->ctrl.admin_q = NULL; + goto out_uninit_ctrl; } nvme_reset_ctrl(&anv->ctrl); @@ -1527,8 +1547,10 @@ static int apple_nvme_probe(struct platform_device *pdev) return 0; -put_dev: - put_device(anv->dev); +out_uninit_ctrl: + nvme_uninit_ctrl(&anv->ctrl); +out_put_ctrl: + nvme_put_ctrl(&anv->ctrl); return ret; } diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index 6f2ebb5fcdb0..2b9e6cfaf2a8 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -173,7 +173,7 @@ static const char * const nvme_statuses[] = { const char *nvme_get_error_status_str(u16 status) { - status &= 0x7ff; + status &= NVME_SCT_SC_MASK; if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) return nvme_statuses[status]; return "Unknown"; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 782090ce0bc1..19917253ba7b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -110,7 +110,7 @@ struct workqueue_struct *nvme_delete_wq; EXPORT_SYMBOL_GPL(nvme_delete_wq); static LIST_HEAD(nvme_subsystems); -static DEFINE_MUTEX(nvme_subsystems_lock); +DEFINE_MUTEX(nvme_subsystems_lock); static DEFINE_IDA(nvme_instance_ida); static dev_t nvme_ctrl_base_chr_devt; @@ -261,7 +261,7 @@ void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) static blk_status_t nvme_error_status(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return BLK_STS_OK; case NVME_SC_CAP_EXCEEDED: @@ -307,7 +307,7 @@ static void nvme_retry_req(struct request *req) u16 crd; /* The mask and shift result must be <= 3 */ - crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; + crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11; if (crd) delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; @@ -329,10 +329,10 @@ static void nvme_log_error(struct request *req) nvme_sect_to_lba(ns->head, blk_rq_pos(req)), blk_rq_bytes(req) >> ns->head->lba_shift, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); return; } @@ -341,10 +341,10 @@ static void nvme_log_error(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); } static void nvme_log_err_passthru(struct request *req) @@ -359,10 +359,10 @@ static void nvme_log_err_passthru(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : "", + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : "", nr->cmd->common.cdw10, nr->cmd->common.cdw11, nr->cmd->common.cdw12, @@ -384,11 +384,11 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) return COMPLETE; if (blk_noretry_request(req) || - (nvme_req(req)->status & NVME_SC_DNR) || + (nvme_req(req)->status & NVME_STATUS_DNR) || nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; - if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) + if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED) return AUTHENTICATE; if (req->cmd_flags & REQ_NVME_MPATH) { @@ -927,6 +927,36 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, return BLK_STS_OK; } +/* + * NVMe does not support a dedicated command to issue an atomic write. A write + * which does adhere to the device atomic limits will silently be executed + * non-atomically. The request issuer should ensure that the write is within + * the queue atomic writes limits, but just validate this in case it is not. + */ +static bool nvme_valid_atomic_write(struct request *req) +{ + struct request_queue *q = req->q; + u32 boundary_bytes = queue_atomic_write_boundary_bytes(q); + + if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q)) + return false; + + if (boundary_bytes) { + u64 mask = boundary_bytes - 1, imask = ~mask; + u64 start = blk_rq_pos(req) << SECTOR_SHIFT; + u64 end = start + blk_rq_bytes(req) - 1; + + /* If greater then must be crossing a boundary */ + if (blk_rq_bytes(req) > boundary_bytes) + return false; + + if ((start & imask) != (end & imask)) + return false; + } + + return true; +} + static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_opcode op) @@ -942,6 +972,9 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) + return BLK_STS_INVAL; + cmnd->rw.opcode = op; cmnd->rw.flags = 0; cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); @@ -1224,7 +1257,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); /* * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: - * + * * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ @@ -1724,11 +1757,12 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) +static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head, + struct queue_limits *lim) { - struct blk_integrity integrity = { }; + struct blk_integrity *bi = &lim->integrity; - blk_integrity_unregister(disk); + memset(bi, 0, sizeof(*bi)); if (!head->ms) return true; @@ -1745,17 +1779,16 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) case NVME_NS_DPS_PI_TYPE3: switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.profile = &t10_pi_type3_crc; - integrity.tag_size = sizeof(u16) + sizeof(u32); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->tag_size = sizeof(u16) + sizeof(u32); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; case NVME_NVM_NS_64B_GUARD: - integrity.profile = &ext_pi_type3_crc64; - integrity.tag_size = sizeof(u16) + 6; - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; + bi->tag_size = sizeof(u16) + 6; + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; default: - integrity.profile = NULL; break; } break; @@ -1763,28 +1796,27 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) case NVME_NS_DPS_PI_TYPE2: switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.profile = &t10_pi_type1_crc; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->tag_size = sizeof(u16); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; case NVME_NVM_NS_64B_GUARD: - integrity.profile = &ext_pi_type1_crc64; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; + bi->tag_size = sizeof(u16); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; default: - integrity.profile = NULL; break; } break; default: - integrity.profile = NULL; break; } - integrity.tuple_size = head->ms; - integrity.pi_offset = head->pi_offset; - blk_integrity_register(disk, &integrity); + bi->tuple_size = head->ms; + bi->pi_offset = head->pi_offset; return true; } @@ -1922,6 +1954,23 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl, } } + +static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns, + struct nvme_id_ns *id, struct queue_limits *lim, + u32 bs, u32 atomic_bs) +{ + unsigned int boundary = 0; + + if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) { + if (le16_to_cpu(id->nabspf)) + boundary = (le16_to_cpu(id->nabspf) + 1) * bs; + } + lim->atomic_write_hw_max = atomic_bs; + lim->atomic_write_hw_boundary = boundary; + lim->atomic_write_hw_unit_min = bs; + lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); +} + static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl) { return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; @@ -1968,13 +2017,16 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; else atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + + nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs); } if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { /* NPWG = Namespace Preferred Write Granularity */ phys_bs = bs * (1 + le16_to_cpu(id->npwg)); /* NOWS = Namespace Optimal Write Size */ - io_opt = bs * (1 + le16_to_cpu(id->nows)); + if (id->nows) + io_opt = bs * (1 + le16_to_cpu(id->nows)); } /* @@ -2058,7 +2110,6 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns, static int nvme_update_ns_info_block(struct nvme_ns *ns, struct nvme_ns_info *info) { - bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT; struct queue_limits lim; struct nvme_id_ns_nvm *nvm = NULL; struct nvme_zone_info zi = {}; @@ -2107,11 +2158,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && ns->head->ids.csi == NVME_CSI_ZNS) nvme_update_zone_info(ns, &lim, &zi); - ret = queue_limits_commit_update(ns->disk->queue, &lim); - if (ret) { - blk_mq_unfreeze_queue(ns->disk->queue); - goto out; - } + + if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) + lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; + else + lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); /* * Register a metadata profile for PI, or the plain non-integrity NVMe @@ -2119,9 +2170,15 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, * I/O to namespaces with metadata except when the namespace supports * PI, as it can strip/insert in that case. */ - if (!nvme_init_integrity(ns->disk, ns->head)) + if (!nvme_init_integrity(ns->disk, ns->head, &lim)) capacity = 0; + ret = queue_limits_commit_update(ns->disk->queue, &lim); + if (ret) { + blk_mq_unfreeze_queue(ns->disk->queue); + goto out; + } + set_capacity_and_notify(ns->disk, capacity); /* @@ -2133,7 +2190,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) ns->head->features |= NVME_NS_DEAC; set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); - blk_queue_write_cache(ns->disk->queue, vwc, vwc); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue); @@ -2193,14 +2249,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) struct queue_limits lim; blk_mq_freeze_queue(ns->head->disk->queue); - if (unsupported) - ns->head->disk->flags |= GENHD_FL_HIDDEN; - else - nvme_init_integrity(ns->head->disk, ns->head); - set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); - set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); - nvme_mpath_revalidate_paths(ns); - /* * queue_limits mixes values that are the hardware limitations * for bio splitting with what is the device configuration. @@ -2223,13 +2271,48 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) lim.io_opt = ns_lim->io_opt; queue_limits_stack_bdev(&lim, ns->disk->part0, 0, ns->head->disk->disk_name); + if (unsupported) + ns->head->disk->flags |= GENHD_FL_HIDDEN; + else + nvme_init_integrity(ns->head->disk, ns->head, &lim); ret = queue_limits_commit_update(ns->head->disk->queue, &lim); + + set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); + set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); + nvme_mpath_revalidate_paths(ns); + blk_mq_unfreeze_queue(ns->head->disk->queue); } return ret; } +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], + enum blk_unique_id type) +{ + struct nvme_ns_ids *ids = &ns->head->ids; + + if (type != BLK_UID_EUI64) + return -EINVAL; + + if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) { + memcpy(id, &ids->nguid, sizeof(ids->nguid)); + return sizeof(ids->nguid); + } + if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) { + memcpy(id, &ids->eui64, sizeof(ids->eui64)); + return sizeof(ids->eui64); + } + + return -EINVAL; +} + +static int nvme_get_unique_id(struct gendisk *disk, u8 id[16], + enum blk_unique_id type) +{ + return nvme_ns_get_unique_id(disk->private_data, id, type); +} + #ifdef CONFIG_BLK_SED_OPAL static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) @@ -2285,6 +2368,7 @@ const struct block_device_operations nvme_bdev_ops = { .open = nvme_open, .release = nvme_release, .getgeo = nvme_getgeo, + .get_unique_id = nvme_get_unique_id, .report_zones = nvme_report_zones, .pr_ops = &nvme_pr_ops, }; @@ -3721,6 +3805,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { + struct queue_limits lim = { }; struct nvme_ns *ns; struct gendisk *disk; int node = ctrl->numa_node; @@ -3729,7 +3814,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) if (!ns) return; - disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns); + if (ctrl->opts && ctrl->opts->data_digest) + lim.features |= BLK_FEAT_STABLE_WRITES; + if (ctrl->ops->supports_pci_p2pdma && + ctrl->ops->supports_pci_p2pdma(ctrl)) + lim.features |= BLK_FEAT_PCI_P2PDMA; + + disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns); if (IS_ERR(disk)) goto out_free_ns; disk->fops = &nvme_bdev_ops; @@ -3737,15 +3828,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) ns->disk = disk; ns->queue = disk->queue; - - if (ctrl->opts && ctrl->opts->data_digest) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); - - blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); - if (ctrl->ops->supports_pci_p2pdma && - ctrl->ops->supports_pci_p2pdma(ctrl)) - blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); - ns->ctrl = ctrl; kref_init(&ns->kref); @@ -3887,7 +3969,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) { - int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; + int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR; if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { dev_err(ns->ctrl->device, @@ -3903,7 +3985,7 @@ out: * * TODO: we should probably schedule a delayed retry here. */ - if (ret > 0 && (ret & NVME_SC_DNR)) + if (ret > 0 && (ret & NVME_STATUS_DNR)) nvme_ns_remove(ns); } @@ -4095,7 +4177,7 @@ static void nvme_scan_work(struct work_struct *work) * they report) but don't actually support it. */ ret = nvme_scan_ns_list(ctrl); - if (ret > 0 && ret & NVME_SC_DNR) + if (ret > 0 && ret & NVME_STATUS_DNR) nvme_scan_ns_sequential(ctrl); } mutex_unlock(&ctrl->scan_lock); @@ -4489,13 +4571,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, return ret; if (ctrl->ops->flags & NVME_F_FABRICS) { - ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL); + struct queue_limits lim = { + .features = BLK_FEAT_SKIP_TAGSET_QUIESCE, + }; + + ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL); if (IS_ERR(ctrl->connect_q)) { ret = PTR_ERR(ctrl->connect_q); goto out_free_tag_set; } - blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, - ctrl->connect_q); } ctrl->tagset = set; @@ -4613,6 +4697,9 @@ static void nvme_free_ctrl(struct device *dev) * Initialize a NVMe controller structures. This needs to be called during * earliest initialization so that we have the initialized structured around * during probing. + * + * On success, the caller must use the nvme_put_ctrl() to release this when + * needed, which also invokes the ops->free_ctrl() callback. */ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, const struct nvme_ctrl_ops *ops, unsigned long quirks) @@ -4661,6 +4748,12 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, goto out; ctrl->instance = ret; + ret = nvme_auth_init_ctrl(ctrl); + if (ret) + goto out_release_instance; + + nvme_mpath_init_ctrl(ctrl); + device_initialize(&ctrl->ctrl_device); ctrl->device = &ctrl->ctrl_device; ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), @@ -4673,16 +4766,36 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->device->groups = nvme_dev_attr_groups; ctrl->device->release = nvme_free_ctrl; dev_set_drvdata(ctrl->device, ctrl); + + return ret; + +out_release_instance: + ida_free(&nvme_instance_ida, ctrl->instance); +out: + if (ctrl->discard_page) + __free_page(ctrl->discard_page); + cleanup_srcu_struct(&ctrl->srcu); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_init_ctrl); + +/* + * On success, returns with an elevated controller reference and caller must + * use nvme_uninit_ctrl() to properly free resources associated with the ctrl. + */ +int nvme_add_ctrl(struct nvme_ctrl *ctrl) +{ + int ret; + ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); if (ret) - goto out_release_instance; + return ret; - nvme_get_ctrl(ctrl); cdev_init(&ctrl->cdev, &nvme_dev_fops); - ctrl->cdev.owner = ops->module; + ctrl->cdev.owner = ctrl->ops->module; ret = cdev_device_add(&ctrl->cdev, ctrl->device); if (ret) - goto out_free_name; + return ret; /* * Initialize latency tolerance controls. The sysfs files won't @@ -4693,28 +4806,11 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, min(default_ps_max_latency_us, (unsigned long)S32_MAX)); nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); - nvme_mpath_init_ctrl(ctrl); - ret = nvme_auth_init_ctrl(ctrl); - if (ret) - goto out_free_cdev; + nvme_get_ctrl(ctrl); return 0; -out_free_cdev: - nvme_fault_inject_fini(&ctrl->fault_inject); - dev_pm_qos_hide_latency_tolerance(ctrl->device); - cdev_device_del(&ctrl->cdev, ctrl->device); -out_free_name: - nvme_put_ctrl(ctrl); - kfree_const(ctrl->device->kobj.name); -out_release_instance: - ida_free(&nvme_instance_ida, ctrl->instance); -out: - if (ctrl->discard_page) - __free_page(ctrl->discard_page); - cleanup_srcu_struct(&ctrl->srcu); - return ret; } -EXPORT_SYMBOL_GPL(nvme_init_ctrl); +EXPORT_SYMBOL_GPL(nvme_add_ctrl); /* let I/O to all namespaces fail in preparation for surprise removal */ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index ceb9c0ed3120..44e342a46f39 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -187,7 +187,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } @@ -233,7 +233,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_read64); @@ -275,11 +275,26 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_write32); +int nvmf_subsystem_reset(struct nvme_ctrl *ctrl) +{ + int ret; + + if (!nvme_wait_reset(ctrl)) + return -EBUSY; + + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, NVME_SUBSYS_RESET); + if (ret) + return ret; + + return nvme_try_sched_reset(ctrl); +} +EXPORT_SYMBOL_GPL(nvmf_subsystem_reset); + /** * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for * connect() errors. @@ -295,7 +310,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, int errval, int offset, struct nvme_command *cmd, struct nvmf_connect_data *data) { - int err_sctype = errval & ~NVME_SC_DNR; + int err_sctype = errval & ~NVME_STATUS_DNR; if (errval < 0) { dev_err(ctrl->device, @@ -573,7 +588,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); */ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status) { - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_STATUS_DNR)) return false; if (status == -EKEYREJECTED) diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 602135910ae9..21d75dc4a3a0 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -217,6 +217,7 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts) int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); +int nvmf_subsystem_reset(struct nvme_ctrl *ctrl); int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl); int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid); int nvmf_register_transport(struct nvmf_transport_ops *ops); diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c index 1ba10a5c656d..1d1b6441a339 100644 --- a/drivers/nvme/host/fault_inject.c +++ b/drivers/nvme/host/fault_inject.c @@ -75,7 +75,7 @@ void nvme_should_fail(struct request *req) /* inject status code and DNR bit */ status = fault_inject->status; if (fault_inject->dont_retry) - status |= NVME_SC_DNR; + status |= NVME_STATUS_DNR; nvme_req(req)->status = status; } } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f0b081332749..b81af7919e94 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3132,7 +3132,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ctrl->ctrl.icdoff) { dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", ctrl->ctrl.icdoff); - ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out_stop_keep_alive; } @@ -3140,7 +3140,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { dev_err(ctrl->ctrl.device, "Mandatory sgls are not supported!\n"); - ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out_stop_keep_alive; } @@ -3325,7 +3325,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); } else { if (portptr->port_state == FC_OBJSTATE_ONLINE) { - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_STATUS_DNR)) dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: reconnect failure\n", ctrl->cnum); @@ -3382,6 +3382,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_fc_free_ctrl, .submit_async_event = nvme_fc_submit_async_event, .delete_ctrl = nvme_fc_delete_ctrl, @@ -3444,12 +3445,11 @@ nvme_fc_existing_controller(struct nvme_fc_rport *rport, return found; } -static struct nvme_ctrl * -nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, +static struct nvme_fc_ctrl * +nvme_fc_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) { struct nvme_fc_ctrl *ctrl; - unsigned long flags; int ret, idx, ctrl_loss_tmo; if (!(rport->remoteport.port_role & @@ -3538,7 +3538,35 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, if (lport->dev) ctrl->ctrl.numa_node = dev_to_node(lport->dev); - /* at this point, teardown path changes to ref counting on nvme ctrl */ + return ctrl; + +out_free_queues: + kfree(ctrl->queues); +out_free_ida: + put_device(ctrl->dev); + ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); +out_free_ctrl: + kfree(ctrl); +out_fail: + /* exit via here doesn't follow ctlr ref points */ + return ERR_PTR(ret); +} + +static struct nvme_ctrl * +nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, + struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) +{ + struct nvme_fc_ctrl *ctrl; + unsigned long flags; + int ret; + + ctrl = nvme_fc_alloc_ctrl(dev, opts, lport, rport); + if (IS_ERR(ctrl)) + return ERR_CAST(ctrl); + + ret = nvme_add_ctrl(&ctrl->ctrl); + if (ret) + goto out_put_ctrl; ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, &nvme_fc_admin_mq_ops, @@ -3584,6 +3612,7 @@ fail_ctrl: /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); +out_put_ctrl: /* Remove core ctrl ref. */ nvme_put_ctrl(&ctrl->ctrl); @@ -3597,20 +3626,8 @@ fail_ctrl: nvme_fc_rport_get(rport); return ERR_PTR(-EIO); - -out_free_queues: - kfree(ctrl->queues); -out_free_ida: - put_device(ctrl->dev); - ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); -out_free_ctrl: - kfree(ctrl); -out_fail: - /* exit via here doesn't follow ctlr ref points */ - return ERR_PTR(ret); } - struct nvmet_fc_traddr { u64 nn; u64 pn; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d8b6b4648eaf..91d9eb3c22ef 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -17,6 +17,7 @@ MODULE_PARM_DESC(multipath, static const char *nvme_iopolicy_names[] = { [NVME_IOPOLICY_NUMA] = "numa", [NVME_IOPOLICY_RR] = "round-robin", + [NVME_IOPOLICY_QD] = "queue-depth", }; static int iopolicy = NVME_IOPOLICY_NUMA; @@ -29,6 +30,8 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp) iopolicy = NVME_IOPOLICY_NUMA; else if (!strncmp(val, "round-robin", 11)) iopolicy = NVME_IOPOLICY_RR; + else if (!strncmp(val, "queue-depth", 11)) + iopolicy = NVME_IOPOLICY_QD; else return -EINVAL; @@ -43,7 +46,7 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp) module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy, &iopolicy, 0644); MODULE_PARM_DESC(iopolicy, - "Default multipath I/O policy; 'numa' (default) or 'round-robin'"); + "Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'"); void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys) { @@ -83,7 +86,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; - u16 status = nvme_req(req)->status & 0x7ff; + u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK; unsigned long flags; struct bio *bio; @@ -128,6 +131,11 @@ void nvme_mpath_start_request(struct request *rq) struct nvme_ns *ns = rq->q->queuedata; struct gendisk *disk = ns->head->disk; + if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) { + atomic_inc(&ns->ctrl->nr_active); + nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE; + } + if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) return; @@ -141,6 +149,9 @@ void nvme_mpath_end_request(struct request *rq) { struct nvme_ns *ns = rq->q->queuedata; + if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE) + atomic_dec_if_positive(&ns->ctrl->nr_active); + if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) return; bdev_end_io_acct(ns->head->disk->part0, req_op(rq), @@ -291,10 +302,15 @@ static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); } -static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, - int node, struct nvme_ns *old) +static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head) { struct nvme_ns *ns, *found = NULL; + int node = numa_node_id(); + struct nvme_ns *old = srcu_dereference(head->current_path[node], + &head->srcu); + + if (unlikely(!old)) + return __nvme_find_path(head, node); if (list_is_singular(&head->list)) { if (nvme_path_is_disabled(old)) @@ -334,13 +350,49 @@ out: return found; } +static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head) +{ + struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns; + unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX; + unsigned int depth; + + list_for_each_entry_rcu(ns, &head->list, siblings) { + if (nvme_path_is_disabled(ns)) + continue; + + depth = atomic_read(&ns->ctrl->nr_active); + + switch (ns->ana_state) { + case NVME_ANA_OPTIMIZED: + if (depth < min_depth_opt) { + min_depth_opt = depth; + best_opt = ns; + } + break; + case NVME_ANA_NONOPTIMIZED: + if (depth < min_depth_nonopt) { + min_depth_nonopt = depth; + best_nonopt = ns; + } + break; + default: + break; + } + + if (min_depth_opt == 0) + return best_opt; + } + + return best_opt ? best_opt : best_nonopt; +} + static inline bool nvme_path_is_optimized(struct nvme_ns *ns) { return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE && ns->ana_state == NVME_ANA_OPTIMIZED; } -inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) +static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head) { int node = numa_node_id(); struct nvme_ns *ns; @@ -348,14 +400,23 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) ns = srcu_dereference(head->current_path[node], &head->srcu); if (unlikely(!ns)) return __nvme_find_path(head, node); - - if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR) - return nvme_round_robin_path(head, node, ns); if (unlikely(!nvme_path_is_optimized(ns))) return __nvme_find_path(head, node); return ns; } +inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) +{ + switch (READ_ONCE(head->subsys->iopolicy)) { + case NVME_IOPOLICY_QD: + return nvme_queue_depth_path(head); + case NVME_IOPOLICY_RR: + return nvme_round_robin_path(head); + default: + return nvme_numa_path(head); + } +} + static bool nvme_available_path(struct nvme_ns_head *head) { struct nvme_ns *ns; @@ -427,6 +488,21 @@ static void nvme_ns_head_release(struct gendisk *disk) nvme_put_ns_head(disk->private_data); } +static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16], + enum blk_unique_id type) +{ + struct nvme_ns_head *head = disk->private_data; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (ns) + ret = nvme_ns_get_unique_id(ns, id, type); + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + #ifdef CONFIG_BLK_DEV_ZONED static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) @@ -454,6 +530,7 @@ const struct block_device_operations nvme_ns_head_ops = { .ioctl = nvme_ns_head_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .getgeo = nvme_getgeo, + .get_unique_id = nvme_ns_head_get_unique_id, .report_zones = nvme_ns_head_report_zones, .pr_ops = &nvme_pr_ops, }; @@ -521,7 +598,6 @@ static void nvme_requeue_work(struct work_struct *work) int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) { struct queue_limits lim; - bool vwc = false; mutex_init(&head->lock); bio_list_init(&head->requeue_list); @@ -539,6 +615,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_set_stacking_limits(&lim); lim.dma_alignment = 3; + lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; if (head->ids.csi != NVME_CSI_ZNS) lim.max_zone_append_sectors = 0; @@ -549,24 +626,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) head->disk->private_data = head; sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); - - blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue); - /* - * This assumes all controllers that refer to a namespace either - * support poll queues or not. That is not a strict guarantee, - * but if the assumption is wrong the effect is only suboptimal - * performance but not correctness problem. - */ - if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL && - ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) - blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); - - /* we need to propagate up the VMC settings */ - if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) - vwc = true; - blk_queue_write_cache(head->disk->queue, vwc, vwc); return 0; } @@ -803,6 +862,29 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev, nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); } +static void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys, + int iopolicy) +{ + struct nvme_ctrl *ctrl; + int old_iopolicy = READ_ONCE(subsys->iopolicy); + + if (old_iopolicy == iopolicy) + return; + + WRITE_ONCE(subsys->iopolicy, iopolicy); + + /* iopolicy changes clear the mpath by design */ + mutex_lock(&nvme_subsystems_lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) + nvme_mpath_clear_ctrl_paths(ctrl); + mutex_unlock(&nvme_subsystems_lock); + + pr_notice("subsysnqn %s iopolicy changed from %s to %s\n", + subsys->subnqn, + nvme_iopolicy_names[old_iopolicy], + nvme_iopolicy_names[iopolicy]); +} + static ssize_t nvme_subsys_iopolicy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -812,7 +894,7 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev, for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) { if (sysfs_streq(buf, nvme_iopolicy_names[i])) { - WRITE_ONCE(subsys->iopolicy, i); + nvme_subsys_iopolicy_update(subsys, i); return count; } } @@ -875,9 +957,6 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) nvme_mpath_set_live(ns); } - if (blk_queue_stable_writes(ns->queue) && ns->head->disk) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, - ns->head->disk->queue); #ifdef CONFIG_BLK_DEV_ZONED if (blk_queue_is_zoned(ns->queue) && ns->head->disk) ns->head->disk->nr_zones = ns->disk->nr_zones; @@ -923,6 +1002,9 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)) return 0; + /* initialize this in the identify path to cover controller resets */ + atomic_set(&ctrl->nr_active, 0); + if (!ctrl->max_namespaces || ctrl->max_namespaces > le32_to_cpu(id->nn)) { dev_err(ctrl->device, diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 68b400f9c42d..f900e44243ae 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -49,6 +49,7 @@ extern unsigned int admin_timeout; extern struct workqueue_struct *nvme_wq; extern struct workqueue_struct *nvme_reset_wq; extern struct workqueue_struct *nvme_delete_wq; +extern struct mutex nvme_subsystems_lock; /* * List of workarounds for devices that required behavior not specified in @@ -195,6 +196,7 @@ enum { NVME_REQ_CANCELLED = (1 << 0), NVME_REQ_USERCMD = (1 << 1), NVME_MPATH_IO_STATS = (1 << 2), + NVME_MPATH_CNT_ACTIVE = (1 << 3), }; static inline struct nvme_request *nvme_req(struct request *req) @@ -360,6 +362,7 @@ struct nvme_ctrl { size_t ana_log_size; struct timer_list anatt_timer; struct work_struct ana_work; + atomic_t nr_active; #endif #ifdef CONFIG_NVME_HOST_AUTH @@ -408,6 +411,7 @@ static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) enum nvme_iopolicy { NVME_IOPOLICY_NUMA, NVME_IOPOLICY_RR, + NVME_IOPOLICY_QD, }; struct nvme_subsystem { @@ -551,6 +555,7 @@ struct nvme_ctrl_ops { int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); void (*free_ctrl)(struct nvme_ctrl *ctrl); void (*submit_async_event)(struct nvme_ctrl *ctrl); + int (*subsystem_reset)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); void (*stop_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); @@ -649,18 +654,9 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl); static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) { - int ret; - - if (!ctrl->subsystem) + if (!ctrl->subsystem || !ctrl->ops->subsystem_reset) return -ENOTTY; - if (!nvme_wait_reset(ctrl)) - return -EBUSY; - - ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); - if (ret) - return ret; - - return nvme_try_sched_reset(ctrl); + return ctrl->ops->subsystem_reset(ctrl); } /* @@ -689,7 +685,7 @@ static inline u32 nvme_bytes_to_numd(size_t len) static inline bool nvme_is_ana_error(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: case NVME_SC_ANA_PERSISTENT_LOSS: @@ -702,7 +698,7 @@ static inline bool nvme_is_ana_error(u16 status) static inline bool nvme_is_path_error(u16 status) { /* check for a status code type of 'path related status' */ - return (status & 0x700) == 0x300; + return (status & NVME_SCT_MASK) == NVME_SCT_PATH; } /* @@ -792,6 +788,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown); int nvme_enable_ctrl(struct nvme_ctrl *ctrl); int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, const struct nvme_ctrl_ops *ops, unsigned long quirks); +int nvme_add_ctrl(struct nvme_ctrl *ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); void nvme_start_ctrl(struct nvme_ctrl *ctrl); void nvme_stop_ctrl(struct nvme_ctrl *ctrl); @@ -877,7 +874,7 @@ enum { NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1), /* Set BLK_MQ_REQ_RESERVED when allocating request */ NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2), - /* Retry command when NVME_SC_DNR is not set in the result */ + /* Retry command when NVME_STATUS_DNR is not set in the result */ NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3), }; @@ -1062,6 +1059,9 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) } #endif /* CONFIG_NVME_MULTIPATH */ +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], + enum blk_unique_id type); + struct nvme_zone_info { u64 zone_size; unsigned int max_open_zones; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 102a9fb0c65f..21f8e1b9801f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -826,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, struct nvme_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct bio_vec bv = rq_integrity_vec(req); - iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), - rq_dma_dir(req), 0); + iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->meta_dma)) return BLK_STS_IOERR; cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); @@ -967,7 +967,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req) struct nvme_iod *iod = blk_mq_rq_to_pdu(req); dma_unmap_page(dev->dev, iod->meta_dma, - rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); + rq_integrity_vec(req).bv_len, rq_dma_dir(req)); } if (blk_rq_nr_phys_segments(req)) @@ -1143,6 +1143,41 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) spin_unlock(&nvmeq->sq_lock); } +static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl) +{ + struct nvme_dev *dev = to_nvme_dev(ctrl); + int ret = 0; + + /* + * Taking the shutdown_lock ensures the BAR mapping is not being + * altered by reset_work. Holding this lock before the RESETTING state + * change, if successful, also ensures nvme_remove won't be able to + * proceed to iounmap until we're done. + */ + mutex_lock(&dev->shutdown_lock); + if (!dev->bar_mapped_size) { + ret = -ENODEV; + goto unlock; + } + + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { + ret = -EBUSY; + goto unlock; + } + + writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR); + nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE); + + /* + * Read controller status to flush the previous write and trigger a + * pcie read error. + */ + readl(dev->bar + NVME_REG_CSTS); +unlock: + mutex_unlock(&dev->shutdown_lock); + return ret; +} + static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { struct nvme_command c = { }; @@ -2859,6 +2894,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .reg_read64 = nvme_pci_reg_read64, .free_ctrl = nvme_pci_free_ctrl, .submit_async_event = nvme_pci_submit_async_event, + .subsystem_reset = nvme_pci_subsystem_reset, .get_address = nvme_pci_get_address, .print_device_info = nvme_pci_print_device_info, .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, @@ -3015,6 +3051,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_ERR(dev)) return PTR_ERR(dev); + result = nvme_add_ctrl(&dev->ctrl); + if (result) + goto out_put_ctrl; + result = nvme_dev_map(dev); if (result) goto out_uninit_ctrl; @@ -3101,6 +3141,7 @@ out_dev_unmap: nvme_dev_unmap(dev); out_uninit_ctrl: nvme_uninit_ctrl(&dev->ctrl); +out_put_ctrl: nvme_put_ctrl(&dev->ctrl); return result; } diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index 8fa1ffcdaed4..7347ddf85f00 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -72,12 +72,12 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, return nvme_submit_sync_cmd(ns->queue, c, data, data_len); } -static int nvme_sc_to_pr_err(int nvme_sc) +static int nvme_status_to_pr_err(int status) { - if (nvme_is_path_error(nvme_sc)) + if (nvme_is_path_error(status)) return PR_STS_PATH_FAILED; - switch (nvme_sc & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: @@ -121,7 +121,7 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10, if (ret < 0) return ret; - return nvme_sc_to_pr_err(ret); + return nvme_status_to_pr_err(ret); } static int nvme_pr_register(struct block_device *bdev, u64 old, @@ -196,7 +196,7 @@ retry: if (ret < 0) return ret; - return nvme_sc_to_pr_err(ret); + return nvme_status_to_pr_err(ret); } static int nvme_pr_read_keys(struct block_device *bdev, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 51a62b0c645a..2eb33842f971 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2201,6 +2201,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_rdma_free_ctrl, .submit_async_event = nvme_rdma_submit_async_event, .delete_ctrl = nvme_rdma_delete_ctrl, @@ -2237,12 +2238,11 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts) return found; } -static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, +static struct nvme_rdma_ctrl *nvme_rdma_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { struct nvme_rdma_ctrl *ctrl; int ret; - bool changed; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) @@ -2304,6 +2304,30 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, if (ret) goto out_kfree_queues; + return ctrl; + +out_kfree_queues: + kfree(ctrl->queues); +out_free_ctrl: + kfree(ctrl); + return ERR_PTR(ret); +} + +static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, + struct nvmf_ctrl_options *opts) +{ + struct nvme_rdma_ctrl *ctrl; + bool changed; + int ret; + + ctrl = nvme_rdma_alloc_ctrl(dev, opts); + if (IS_ERR(ctrl)) + return ERR_CAST(ctrl); + + ret = nvme_add_ctrl(&ctrl->ctrl); + if (ret) + goto out_put_ctrl; + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); WARN_ON_ONCE(!changed); @@ -2322,15 +2346,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); +out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); -out_kfree_queues: - kfree(ctrl->queues); -out_free_ctrl: - kfree(ctrl); - return ERR_PTR(ret); } static struct nvmf_transport_ops nvme_rdma_transport = { diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 8b5e4327fe83..a2a47d3ab99f 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2662,6 +2662,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_tcp_free_ctrl, .submit_async_event = nvme_tcp_submit_async_event, .delete_ctrl = nvme_tcp_delete_ctrl, @@ -2686,7 +2687,7 @@ nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts) return found; } -static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, +static struct nvme_tcp_ctrl *nvme_tcp_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { struct nvme_tcp_ctrl *ctrl; @@ -2761,6 +2762,28 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, if (ret) goto out_kfree_queues; + return ctrl; +out_kfree_queues: + kfree(ctrl->queues); +out_free_ctrl: + kfree(ctrl); + return ERR_PTR(ret); +} + +static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, + struct nvmf_ctrl_options *opts) +{ + struct nvme_tcp_ctrl *ctrl; + int ret; + + ctrl = nvme_tcp_alloc_ctrl(dev, opts); + if (IS_ERR(ctrl)) + return ERR_CAST(ctrl); + + ret = nvme_add_ctrl(&ctrl->ctrl); + if (ret) + goto out_put_ctrl; + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { WARN_ON_ONCE(1); ret = -EINTR; @@ -2782,15 +2805,11 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); +out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); -out_kfree_queues: - kfree(ctrl->queues); -out_free_ctrl: - kfree(ctrl); - return ERR_PTR(ret); } static struct nvmf_transport_ops nvme_tcp_transport = { diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 77aa0f440a6d..9a06f9d98cd6 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -108,13 +108,12 @@ free_data: void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, struct nvme_zone_info *zi) { - lim->zoned = 1; + lim->features |= BLK_FEAT_ZONED; lim->max_open_zones = zi->max_open_zones; lim->max_active_zones = zi->max_active_zones; lim->max_zone_append_sectors = ns->ctrl->max_zone_append; lim->chunk_sectors = ns->head->zsze = nvme_lba_to_sect(ns->head, zi->zone_size); - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue); } static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 872dd1a0acd8..46be031f91b4 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -6,7 +6,6 @@ config NVME_TARGET depends on CONFIGFS_FS select NVME_KEYRING if NVME_TARGET_TCP_TLS select KEYS if NVME_TARGET_TCP_TLS - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY select SGL_ALLOC help This enabled target side support for the NVMe protocol, that is @@ -18,6 +17,15 @@ config NVME_TARGET To configure the NVMe target you probably want to use the nvmetcli tool from http://git.infradead.org/users/hch/nvmetcli.git. +config NVME_TARGET_DEBUGFS + bool "NVMe Target debugfs support" + depends on NVME_TARGET + help + This enables debugfs support to display the connected controllers + to each subsystem + + If unsure, say N. + config NVME_TARGET_PASSTHRU bool "NVMe Target Passthrough support" depends on NVME_TARGET diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile index c66820102493..c402c44350b2 100644 --- a/drivers/nvme/target/Makefile +++ b/drivers/nvme/target/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \ discovery.o io-cmd-file.o io-cmd-bdev.o +nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index f5b7054a4a05..f7e1156ac7ec 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -344,7 +344,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) pr_debug("unhandled lid %d on qid %d\n", req->cmd->get_log_page.lid, req->sq->qid); req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); } static void nvmet_execute_identify_ctrl(struct nvmet_req *req) @@ -496,7 +496,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -662,7 +662,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req) if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, off) != NVME_IDENTIFY_DATA_SIZE - off) - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; out: nvmet_req_complete(req, status); @@ -724,7 +724,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) pr_debug("unhandled identify cns %d on qid %d\n", req->cmd->identify.cns, req->sq->qid); req->error_loc = offsetof(struct nvme_identify, cns); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); } /* @@ -807,7 +807,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) if (val32 & ~mask) { req->error_loc = offsetof(struct nvme_common_command, cdw11); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); @@ -833,7 +833,7 @@ void nvmet_execute_set_features(struct nvmet_req *req) ncqr = (cdw11 >> 16) & 0xffff; nsqr = cdw11 & 0xffff; if (ncqr == 0xffff || nsqr == 0xffff) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } nvmet_set_result(req, @@ -846,14 +846,14 @@ void nvmet_execute_set_features(struct nvmet_req *req) status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); break; case NVME_FEAT_HOST_ID: - status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; break; case NVME_FEAT_WRITE_PROTECT: status = nvmet_set_feat_write_protect(req); break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -939,7 +939,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { req->error_loc = offsetof(struct nvme_common_command, cdw11); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -952,7 +952,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -969,7 +969,7 @@ void nvmet_execute_async_event(struct nvmet_req *req) mutex_lock(&ctrl->lock); if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR); return; } ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; @@ -1006,7 +1006,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_admin_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_parse_discovery_cmd(req); diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 7d2633940f9b..8bc3f431c77f 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -314,7 +314,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c1, challenge, shash_len); if (ret) - goto out_free_response; + goto out_free_challenge; } pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", @@ -325,7 +325,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, GFP_KERNEL); if (!shash) { ret = -ENOMEM; - goto out_free_response; + goto out_free_challenge; } shash->tfm = shash_tfm; ret = crypto_shash_init(shash); @@ -361,9 +361,10 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: + kfree(shash); +out_free_challenge: if (challenge != req->sq->dhchap_c1) kfree(challenge); - kfree(shash); out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: @@ -427,14 +428,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c2, challenge, shash_len); if (ret) - goto out_free_response; + goto out_free_challenge; } shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; - goto out_free_response; + goto out_free_challenge; } shash->tfm = shash_tfm; @@ -471,9 +472,10 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: + kfree(shash); +out_free_challenge: if (challenge != req->sq->dhchap_c2) kfree(challenge); - kfree(shash); out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 4ff460ba2826..ed2424f8a396 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -16,6 +16,7 @@ #include "trace.h" #include "nvmet.h" +#include "debugfs.h" struct kmem_cache *nvmet_bvec_cache; struct workqueue_struct *buffered_io_wq; @@ -55,18 +56,18 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) return NVME_SC_SUCCESS; case -ENOSPC: req->error_loc = offsetof(struct nvme_rw_command, length); - return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; case -EREMOTEIO: req->error_loc = offsetof(struct nvme_rw_command, slba); - return NVME_SC_LBA_RANGE | NVME_SC_DNR; + return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; case -EOPNOTSUPP: req->error_loc = offsetof(struct nvme_common_command, opcode); switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: - return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; + return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR; default: - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } break; case -ENODATA: @@ -76,7 +77,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_STATUS_DNR; } } @@ -86,7 +87,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req) req->sq->qid); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, @@ -97,7 +98,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, { if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -106,7 +107,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) { if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -115,7 +116,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) { if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -145,7 +146,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) while (ctrl->nr_async_event_cmds) { req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR); mutex_lock(&ctrl->lock); } mutex_unlock(&ctrl->lock); @@ -444,7 +445,7 @@ u16 nvmet_req_find_ns(struct nvmet_req *req) req->error_loc = offsetof(struct nvme_common_command, nsid); if (nvmet_subsys_nsid_exists(subsys, nsid)) return NVME_SC_INTERNAL_PATH_ERROR; - return NVME_SC_INVALID_NS | NVME_SC_DNR; + return NVME_SC_INVALID_NS | NVME_STATUS_DNR; } percpu_ref_get(&req->ns->ref); @@ -904,7 +905,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return nvmet_parse_fabrics_io_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; ret = nvmet_check_ctrl_status(req); if (unlikely(ret)) @@ -967,7 +968,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, /* no support for fused commands yet */ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -978,7 +979,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, */ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -996,7 +997,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, trace_nvmet_req_init(req, req->cmd); if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -1023,7 +1024,7 @@ bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) { if (unlikely(len != req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR); return false; } @@ -1035,7 +1036,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) { if (unlikely(data_len > req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR); return false; } @@ -1304,18 +1305,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req) if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { pr_err("got cmd %d while CC.EN == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } if (unlikely(!nvmet_check_auth_status(req))) { pr_warn("qid %d not authenticated\n", req->sq->qid); - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; } return 0; } @@ -1389,7 +1390,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, int ret; u16 status; - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; subsys = nvmet_find_get_subsys(req->port, subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", @@ -1405,7 +1406,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, hostnqn, subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); - status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, dptr); goto out_put_subsystem; } @@ -1456,7 +1457,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { - status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; + status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; goto out_free_sqs; } ctrl->cntlid = ret; @@ -1479,6 +1480,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, mutex_lock(&subsys->lock); list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); nvmet_setup_p2p_ns_map(ctrl, req); + nvmet_debugfs_ctrl_setup(ctrl); mutex_unlock(&subsys->lock); *ctrlp = ctrl; @@ -1513,6 +1515,8 @@ static void nvmet_ctrl_free(struct kref *ref) nvmet_destroy_auth(ctrl); + nvmet_debugfs_ctrl_free(ctrl); + ida_free(&cntlid_ida, ctrl->cntlid); nvmet_async_events_free(ctrl); @@ -1539,6 +1543,14 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); +ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len) +{ + if (!ctrl->ops->host_traddr) + return -EOPNOTSUPP; + return ctrl->ops->host_traddr(ctrl, traddr, traddr_len); +} + static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, const char *subsysnqn) { @@ -1633,8 +1645,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, INIT_LIST_HEAD(&subsys->ctrls); INIT_LIST_HEAD(&subsys->hosts); + ret = nvmet_debugfs_subsys_setup(subsys); + if (ret) + goto free_subsysnqn; + return subsys; +free_subsysnqn: + kfree(subsys->subsysnqn); free_fr: kfree(subsys->firmware_rev); free_mn: @@ -1651,6 +1669,8 @@ static void nvmet_subsys_free(struct kref *ref) WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); + nvmet_debugfs_subsys_free(subsys); + xa_destroy(&subsys->namespaces); nvmet_passthru_subsys_free(subsys); @@ -1705,11 +1725,18 @@ static int __init nvmet_init(void) if (error) goto out_free_nvmet_work_queue; - error = nvmet_init_configfs(); + error = nvmet_init_debugfs(); if (error) goto out_exit_discovery; + + error = nvmet_init_configfs(); + if (error) + goto out_exit_debugfs; + return 0; +out_exit_debugfs: + nvmet_exit_debugfs(); out_exit_discovery: nvmet_exit_discovery(); out_free_nvmet_work_queue: @@ -1726,6 +1753,7 @@ out_destroy_bvec_cache: static void __exit nvmet_exit(void) { nvmet_exit_configfs(); + nvmet_exit_debugfs(); nvmet_exit_discovery(); ida_destroy(&cntlid_ida); destroy_workqueue(nvmet_wq); diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c new file mode 100644 index 000000000000..cb2befc8619e --- /dev/null +++ b/drivers/nvme/target/debugfs.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DebugFS interface for the NVMe target. + * Copyright (c) 2022-2024 Shadow + * Copyright (c) 2024 SUSE LLC + */ + +#include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> + +#include "nvmet.h" +#include "debugfs.h" + +struct dentry *nvmet_debugfs; + +#define NVMET_DEBUGFS_ATTR(field) \ + static int field##_open(struct inode *inode, struct file *file) \ + { return single_open(file, field##_show, inode->i_private); } \ + \ + static const struct file_operations field##_fops = { \ + .open = field##_open, \ + .read = seq_read, \ + .release = single_release, \ + } + +#define NVMET_DEBUGFS_RW_ATTR(field) \ + static int field##_open(struct inode *inode, struct file *file) \ + { return single_open(file, field##_show, inode->i_private); } \ + \ + static const struct file_operations field##_fops = { \ + .open = field##_open, \ + .read = seq_read, \ + .write = field##_write, \ + .release = single_release, \ + } + +static int nvmet_ctrl_hostnqn_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + + seq_puts(m, ctrl->hostnqn); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_hostnqn); + +static int nvmet_ctrl_kato_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + + seq_printf(m, "%d\n", ctrl->kato); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_kato); + +static int nvmet_ctrl_port_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + + seq_printf(m, "%d\n", le16_to_cpu(ctrl->port->disc_addr.portid)); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_port); + +static const char *const csts_state_names[] = { + [NVME_CSTS_RDY] = "ready", + [NVME_CSTS_CFS] = "fatal", + [NVME_CSTS_NSSRO] = "reset", + [NVME_CSTS_SHST_OCCUR] = "shutdown", + [NVME_CSTS_SHST_CMPLT] = "completed", + [NVME_CSTS_PP] = "paused", +}; + +static int nvmet_ctrl_state_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + bool sep = false; + int i; + + for (i = 0; i < 7; i++) { + int state = BIT(i); + + if (!(ctrl->csts & state)) + continue; + if (sep) + seq_puts(m, "|"); + sep = true; + if (csts_state_names[state]) + seq_puts(m, csts_state_names[state]); + else + seq_printf(m, "%d", state); + } + if (sep) + seq_printf(m, "\n"); + return 0; +} + +static ssize_t nvmet_ctrl_state_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct nvmet_ctrl *ctrl = m->private; + char reset[16]; + + if (count >= sizeof(reset)) + return -EINVAL; + if (copy_from_user(reset, buf, count)) + return -EFAULT; + if (!memcmp(reset, "fatal", 5)) + nvmet_ctrl_fatal_error(ctrl); + else + return -EINVAL; + return count; +} +NVMET_DEBUGFS_RW_ATTR(nvmet_ctrl_state); + +static int nvmet_ctrl_host_traddr_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + ssize_t size; + char buf[NVMF_TRADDR_SIZE + 1]; + + size = nvmet_ctrl_host_traddr(ctrl, buf, NVMF_TRADDR_SIZE); + if (size < 0) { + buf[0] = '\0'; + size = 0; + } + buf[size] = '\0'; + seq_printf(m, "%s\n", buf); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_host_traddr); + +int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl) +{ + char name[32]; + struct dentry *parent = ctrl->subsys->debugfs_dir; + int ret; + + if (!parent) + return -ENODEV; + snprintf(name, sizeof(name), "ctrl%d", ctrl->cntlid); + ctrl->debugfs_dir = debugfs_create_dir(name, parent); + if (IS_ERR(ctrl->debugfs_dir)) { + ret = PTR_ERR(ctrl->debugfs_dir); + ctrl->debugfs_dir = NULL; + return ret; + } + debugfs_create_file("port", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_port_fops); + debugfs_create_file("hostnqn", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_hostnqn_fops); + debugfs_create_file("kato", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_kato_fops); + debugfs_create_file("state", S_IRUSR | S_IWUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_state_fops); + debugfs_create_file("host_traddr", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_host_traddr_fops); + return 0; +} + +void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl) +{ + debugfs_remove_recursive(ctrl->debugfs_dir); +} + +int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys) +{ + int ret = 0; + + subsys->debugfs_dir = debugfs_create_dir(subsys->subsysnqn, + nvmet_debugfs); + if (IS_ERR(subsys->debugfs_dir)) { + ret = PTR_ERR(subsys->debugfs_dir); + subsys->debugfs_dir = NULL; + } + return ret; +} + +void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys) +{ + debugfs_remove_recursive(subsys->debugfs_dir); +} + +int __init nvmet_init_debugfs(void) +{ + struct dentry *parent; + + parent = debugfs_create_dir("nvmet", NULL); + if (IS_ERR(parent)) { + pr_warn("%s: failed to create debugfs directory\n", "nvmet"); + return PTR_ERR(parent); + } + nvmet_debugfs = parent; + return 0; +} + +void nvmet_exit_debugfs(void) +{ + debugfs_remove_recursive(nvmet_debugfs); +} diff --git a/drivers/nvme/target/debugfs.h b/drivers/nvme/target/debugfs.h new file mode 100644 index 000000000000..cfb8bbf6a297 --- /dev/null +++ b/drivers/nvme/target/debugfs.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DebugFS interface for the NVMe target. + * Copyright (c) 2022-2024 Shadow + * Copyright (c) 2024 SUSE LLC + */ +#ifndef NVMET_DEBUGFS_H +#define NVMET_DEBUGFS_H + +#include <linux/types.h> + +#ifdef CONFIG_NVME_TARGET_DEBUGFS +int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys); +void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys); +int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl); +void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl); + +int __init nvmet_init_debugfs(void); +void nvmet_exit_debugfs(void); +#else +static inline int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys) +{ + return 0; +} +static inline void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys){} + +static inline int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl) +{ + return 0; +} +static inline void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl) {} + +static inline int __init nvmet_init_debugfs(void) +{ + return 0; +} + +static inline void nvmet_exit_debugfs(void) {} + +#endif + +#endif /* NVMET_DEBUGFS_H */ diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index ce54da8c6b36..28843df5fa7c 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) if (offset & 0x3) { req->error_loc = offsetof(struct nvme_get_log_page_command, lpo); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -256,7 +256,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { req->error_loc = offsetof(struct nvme_identify, cns); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -320,7 +320,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -345,7 +345,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -361,7 +361,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } switch (cmd->common.opcode) { @@ -386,7 +386,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) default: pr_debug("unhandled cmd %d\n", cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } } diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index cb34d644ed08..3f2857c17d95 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -189,26 +189,26 @@ void nvmet_execute_auth_send(struct nvmet_req *req) u8 dhchap_status; if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, secp); goto done; } if (req->cmd->auth_send.spsp0 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp0); goto done; } if (req->cmd->auth_send.spsp1 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp1); goto done; } tl = le32_to_cpu(req->cmd->auth_send.tl); if (!tl) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, tl); goto done; @@ -437,26 +437,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) u16 status = 0; if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, secp); goto done; } if (req->cmd->auth_receive.spsp0 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp0); goto done; } if (req->cmd->auth_receive.spsp1 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp1); goto done; } al = le32_to_cpu(req->cmd->auth_receive.al); if (!al) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, al); goto done; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 69d77d34bec1..c4b2eddd5666 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) if (req->cmd->prop_set.attrib & 1) { req->error_loc = offsetof(struct nvmf_property_set_command, attrib); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvmf_property_set_command, offset); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } out: nvmet_req_complete(req, status); @@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) val = ctrl->cap; break; default: - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } } else { @@ -65,7 +65,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) val = ctrl->csts; break; default: - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } } @@ -105,7 +105,7 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return 0; @@ -128,7 +128,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return 0; @@ -147,14 +147,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) pr_warn("queue size zero!\n"); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); - ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; goto err; } if (ctrl->sqs[qid] != NULL) { pr_warn("qid %u has already been created\n", qid); req->error_loc = offsetof(struct nvmf_connect_command, qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } /* for fabrics, this value applies to only the I/O Submission Queues */ @@ -163,14 +163,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) sqsize, mqes, ctrl->cntlid); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); - return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + return NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; } old = cmpxchg(&req->sq->ctrl, NULL, ctrl); if (old) { pr_warn("queue already connected!\n"); req->error_loc = offsetof(struct nvmf_connect_command, opcode); - return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; + return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; } /* note: convert queue size from 0's-based value to 1's-based value */ @@ -230,14 +230,14 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); req->error_loc = offsetof(struct nvmf_connect_command, recfmt); - status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; + status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { pr_warn("connect attempt for invalid controller ID %#x\n", d->cntlid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); goto out; } @@ -257,7 +257,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) dhchap_status); nvmet_ctrl_put(ctrl); if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) - status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); + status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR); else status = NVME_SC_INTERNAL; goto out; @@ -305,7 +305,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); - status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; + status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } @@ -314,13 +314,13 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, le16_to_cpu(d->cntlid), req); if (!ctrl) { - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; goto out; } if (unlikely(qid > ctrl->subsys->max_qid)) { pr_warn("invalid queue id (%d)\n", qid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); goto out_ctrl_put; } @@ -350,13 +350,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req) pr_debug("invalid command 0x%x on unconnected queue.\n", cmd->fabrics.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { pr_debug("invalid capsule type 0x%x on unconnected queue.\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } if (cmd->connect.qid == 0) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 381b4394731f..3ef4beacde32 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -2934,6 +2934,38 @@ nvmet_fc_discovery_chg(struct nvmet_port *port) tgtport->ops->discovery_event(&tgtport->fc_target_port); } +static ssize_t +nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_size) +{ + struct nvmet_sq *sq = ctrl->sqs[0]; + struct nvmet_fc_tgt_queue *queue = + container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq); + struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; + struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; + u64 wwnn, wwpn; + ssize_t ret = 0; + + if (!tgtport || !nvmet_fc_tgtport_get(tgtport)) + return -ENODEV; + if (!hostport || !nvmet_fc_hostport_get(hostport)) { + ret = -ENODEV; + goto out_put; + } + + if (tgtport->ops->host_traddr) { + ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); + if (ret) + goto out_put_host; + ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); + } +out_put_host: + nvmet_fc_hostport_put(hostport); +out_put: + nvmet_fc_tgtport_put(tgtport); + return ret; +} + static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_FC, @@ -2943,6 +2975,7 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .queue_response = nvmet_fc_fcp_nvme_cmd_done, .delete_ctrl = nvmet_fc_delete_ctrl, .discovery_chg = nvmet_fc_discovery_chg, + .host_traddr = nvmet_fc_host_traddr, }; static int __init nvmet_fc_init_module(void) diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 913cd2ec7a6f..e1abb27927ff 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -492,6 +492,16 @@ fcloop_t2h_host_release(void *hosthandle) /* host handle ignored for now */ } +static int +fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn) +{ + struct fcloop_rport *rport = hosthandle; + + *wwnn = rport->lport->localport->node_name; + *wwpn = rport->lport->localport->port_name; + return 0; +} + /* * Simulate reception of RSCN and converting it to a initiator transport * call to rescan a remote port. @@ -1074,6 +1084,7 @@ static struct nvmet_fc_target_template tgttemplate = { .ls_req = fcloop_t2h_ls_req, .ls_abort = fcloop_t2h_ls_abort, .host_release = fcloop_t2h_host_release, + .host_traddr = fcloop_t2h_host_traddr, .max_hw_queues = FCLOOP_HW_QUEUES, .max_sgl_segments = FCLOOP_SGL_SEGS, .max_dif_sgl_segments = FCLOOP_SGL_SEGS, diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 6426aac2634a..0bda83d0fc3e 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -61,15 +61,17 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) { struct blk_integrity *bi = bdev_get_integrity(ns->bdev); - if (bi) { + if (!bi) + return; + + if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) { ns->metadata_size = bi->tuple_size; - if (bi->profile == &t10_pi_type1_crc) + if (bi->flags & BLK_INTEGRITY_REF_TAG) ns->pi_type = NVME_NS_DPS_PI_TYPE1; - else if (bi->profile == &t10_pi_type3_crc) - ns->pi_type = NVME_NS_DPS_PI_TYPE3; else - /* Unsupported metadata type */ - ns->metadata_size = 0; + ns->pi_type = NVME_NS_DPS_PI_TYPE3; + } else { + ns->metadata_size = 0; } } @@ -102,7 +104,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) ns->pi_type = 0; ns->metadata_size = 0; - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10)) + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) nvmet_bdev_ns_enable_integrity(ns); if (bdev_is_zoned(ns->bdev)) { @@ -135,11 +137,11 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) */ switch (blk_sts) { case BLK_STS_NOSPC: - status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, length); break; case BLK_STS_TARGET: - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, slba); break; case BLK_STS_NOTSUPP: @@ -147,10 +149,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) switch (req->cmd->common.opcode) { case nvme_cmd_dsm: case nvme_cmd_write_zeroes: - status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; + status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR; break; default: - status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } break; case BLK_STS_MEDIUM: @@ -159,7 +161,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) break; case BLK_STS_IOERR: default: - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); } @@ -356,7 +358,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req) return 0; if (blkdev_issue_flush(req->ns->bdev)) - return NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_STATUS_DNR; return 0; } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index e589915ddef8..e32790d8fc26 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -555,6 +555,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, goto out; } + ret = nvme_add_ctrl(&ctrl->ctrl); + if (ret) + goto out_put_ctrl; + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) WARN_ON_ONCE(1); @@ -611,6 +615,7 @@ out_free_queues: kfree(ctrl->queues); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); +out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); out: if (ret > 0) diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 2f22b07eab29..190f55e6d753 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -230,7 +230,9 @@ struct nvmet_ctrl { struct device *p2p_client; struct radix_tree_root p2p_ns_map; - +#ifdef CONFIG_NVME_TARGET_DEBUGFS + struct dentry *debugfs_dir; +#endif spinlock_t error_lock; u64 err_counter; struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; @@ -262,7 +264,9 @@ struct nvmet_subsys { struct list_head hosts; bool allow_any_host; - +#ifdef CONFIG_NVME_TARGET_DEBUGFS + struct dentry *debugfs_dir; +#endif u16 max_qid; u64 ver; @@ -350,6 +354,8 @@ struct nvmet_fabrics_ops { void (*delete_ctrl)(struct nvmet_ctrl *ctrl); void (*disc_traddr)(struct nvmet_req *req, struct nvmet_port *port, char *traddr); + ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len); u16 (*install_queue)(struct nvmet_sq *nvme_sq); void (*discovery_chg)(struct nvmet_port *port); u8 (*get_mdts)(const struct nvmet_ctrl *ctrl); @@ -498,6 +504,8 @@ struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, struct nvmet_req *req); void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); u16 nvmet_check_ctrl_status(struct nvmet_req *req); +ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len); struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, enum nvme_subsys_type type); diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index f003782d4ecf..24d0e2418d2e 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -306,7 +306,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ns = nvme_find_get_ns(ctrl, nsid); if (unlikely(!ns)) { pr_err("failed to get passthru ns nsid:%u\n", nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -426,7 +426,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) * emulated in the future if regular targets grow support for * this feature. */ - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return nvmet_setup_passthru_command(req); @@ -478,7 +478,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) case NVME_FEAT_RESV_PERSIST: /* No reservations, see nvmet_parse_passthru_io_cmd() */ default: - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } } @@ -546,7 +546,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->p.use_workqueue = true; return NVME_SC_SUCCESS; } - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; case NVME_ID_CNS_NS: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; @@ -558,7 +558,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->p.use_workqueue = true; return NVME_SC_SUCCESS; } - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; default: return nvmet_setup_passthru_command(req); } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 689bb5d3cfdc..1eff8ca6a5f1 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -852,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) if (!nvme_is_write(rsp->req.cmd)) { rsp->req.error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (off + len > rsp->queue->dev->inline_data_size) { pr_err("invalid inline data offset!\n"); - return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; } /* no data command? */ @@ -919,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } case NVME_KEY_SGL_FMT_DATA_DESC: switch (sgl->type & 0xf) { @@ -931,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } default: pr_err("invalid SGL type: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; } } @@ -2000,6 +2000,17 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, } } +static ssize_t nvmet_rdma_host_port_addr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len) +{ + struct nvmet_sq *nvme_sq = ctrl->sqs[0]; + struct nvmet_rdma_queue *queue = + container_of(nvme_sq, struct nvmet_rdma_queue, nvme_sq); + + return snprintf(traddr, traddr_len, "%pISc", + (struct sockaddr *)&queue->cm_id->route.addr.dst_addr); +} + static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) { if (ctrl->pi_support) @@ -2024,6 +2035,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .queue_response = nvmet_rdma_queue_response, .delete_ctrl = nvmet_rdma_delete_ctrl, .disc_traddr = nvmet_rdma_disc_port_addr, + .host_traddr = nvmet_rdma_host_port_addr, .get_mdts = nvmet_rdma_get_mdts, .get_max_queue_size = nvmet_rdma_get_max_queue_size, }; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 380f22ee3ebb..5bff0d5464d1 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -416,10 +416,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET)) { if (!nvme_is_write(cmd->req.cmd)) - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; if (len > cmd->req.port->inline_data_size) - return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; cmd->pdu_len = len; } cmd->req.transfer_len += len; @@ -2167,6 +2167,19 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, } } +static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len) +{ + struct nvmet_sq *sq = ctrl->sqs[0]; + struct nvmet_tcp_queue *queue = + container_of(sq, struct nvmet_tcp_queue, nvme_sq); + + if (queue->sockaddr_peer.ss_family == AF_UNSPEC) + return -EINVAL; + return snprintf(traddr, traddr_len, "%pISc", + (struct sockaddr *)&queue->sockaddr_peer); +} + static const struct nvmet_fabrics_ops nvmet_tcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_TCP, @@ -2177,6 +2190,7 @@ static const struct nvmet_fabrics_ops nvmet_tcp_ops = { .delete_ctrl = nvmet_tcp_delete_ctrl, .install_queue = nvmet_tcp_install_queue, .disc_traddr = nvmet_tcp_disc_port_addr, + .host_traddr = nvmet_tcp_host_port_addr, }; static int __init nvmet_tcp_init(void) diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 0021d06041c1..af9e13be7678 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -100,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -121,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) } if (!bdev_is_zoned(req->ns->bdev)) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_identify, nsid); goto out; } @@ -158,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); - return NVME_SC_LBA_RANGE | NVME_SC_DNR; + return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; } if (out_bufsize < sizeof(struct nvme_zone_report)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } switch (req->cmd->zmr.pr) { @@ -177,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) break; default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } switch (req->cmd->zmr.zrasf) { @@ -193,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } return NVME_SC_SUCCESS; @@ -341,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) return NVME_SC_SUCCESS; case -EINVAL: case -EIO: - return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + return NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR; default: return NVME_SC_INTERNAL; } @@ -463,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) default: /* this is needed to quiet compiler warning */ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } return NVME_SC_SUCCESS; @@ -481,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) if (op == REQ_OP_LAST) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); - status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR; goto out; } @@ -493,13 +493,13 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) if (sect >= get_capacity(bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; goto out; } if (sect & (zone_sectors - 1)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -551,13 +551,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; goto out; } if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -590,7 +590,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) } if (total_len != nvmet_rw_data_len(req)) { - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto out_put_bio; } diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index e1ec3b7200d7..f8dd7eb40fbe 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -396,10 +396,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, if (!config->base_dev) return -EINVAL; - if (config->type == NVMEM_TYPE_FRAM) - bin_attr_nvmem_eeprom_compat.attr.name = "fram"; - nvmem->eeprom = bin_attr_nvmem_eeprom_compat; + if (config->type == NVMEM_TYPE_FRAM) + nvmem->eeprom.attr.name = "fram"; nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); nvmem->eeprom.size = nvmem->size; #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -463,7 +462,7 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) "%s@%x,%x", entry->name, entry->offset, entry->bit_offset); - attrs[i].attr.mode = 0444; + attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem); attrs[i].size = entry->bytes; attrs[i].read = &nvmem_cell_attr_read; attrs[i].private = entry; diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c index 33678d0af2c2..6c2f80e166e2 100644 --- a/drivers/nvmem/meson-efuse.c +++ b/drivers/nvmem/meson-efuse.c @@ -18,18 +18,24 @@ static int meson_efuse_read(void *context, unsigned int offset, void *val, size_t bytes) { struct meson_sm_firmware *fw = context; + int ret; - return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, - bytes, 0, 0, 0); + ret = meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, + bytes, 0, 0, 0); + + return ret < 0 ? ret : 0; } static int meson_efuse_write(void *context, unsigned int offset, void *val, size_t bytes) { struct meson_sm_firmware *fw = context; + int ret; + + ret = meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, + bytes, 0, 0, 0); - return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, - bytes, 0, 0, 0); + return ret < 0 ? ret : 0; } static const struct of_device_id meson_efuse_match[] = { diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c index 752d0bf4445e..7f907c5a445e 100644 --- a/drivers/nvmem/rmem.c +++ b/drivers/nvmem/rmem.c @@ -46,7 +46,10 @@ static int rmem_read(void *context, unsigned int offset, memunmap(addr); - return count; + if (count < 0) + return count; + + return count == bytes ? 0 : -EIO; } static int rmem_probe(struct platform_device *pdev) diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 462375b293e4..c94203ce65bb 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -81,7 +81,8 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent); /* * These interrupt controllers abuse interrupt-map for unspeakable * reasons and rely on the core code to *ignore* it (the drivers do - * their own parsing of the property). + * their own parsing of the property). The PAsemi entry covers a + * non-sensical interrupt-map that is better left ignored. * * If you think of adding to the list for something *new*, think * again. There is a high chance that you will be sent back to the @@ -95,6 +96,7 @@ static const char * const of_irq_imap_abusers[] = { "fsl,ls1043a-extirq", "fsl,ls1088a-extirq", "renesas,rza1-irqc", + "pasemi,rootbus", NULL, }; @@ -293,20 +295,8 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) imaplen -= imap - oldimap; pr_debug(" -> imaplen=%d\n", imaplen); } - if (!match) { - if (intc) { - /* - * The PASEMI Nemo is a known offender, so - * let's only warn for anyone else. - */ - WARN(!IS_ENABLED(CONFIG_PPC_PASEMI), - "%pOF interrupt-map failed, using interrupt-controller\n", - ipar); - return 0; - } - + if (!match) goto fail; - } /* * Successfully parsed an interrupt-map translation; copy new diff --git a/drivers/opp/core.c b/drivers/opp/core.c index cb4611fe1b5b..5f4598246a87 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1102,8 +1102,7 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table, return 0; } -static int _set_opp_level(struct device *dev, struct opp_table *opp_table, - struct dev_pm_opp *opp) +static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp) { unsigned int level = 0; int ret = 0; @@ -1171,7 +1170,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) if (opp_table->regulators) regulator_disable(opp_table->regulators[0]); - ret = _set_opp_level(dev, opp_table, NULL); + ret = _set_opp_level(dev, NULL); if (ret) goto out; @@ -1220,7 +1219,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, return ret; } - ret = _set_opp_level(dev, opp_table, opp); + ret = _set_opp_level(dev, opp); if (ret) return ret; @@ -1267,7 +1266,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, return ret; } - ret = _set_opp_level(dev, opp_table, opp); + ret = _set_opp_level(dev, opp); if (ret) return ret; @@ -2443,8 +2442,10 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, * Cross check it again and fix if required. */ gdev = dev_to_genpd_dev(virt_dev); - if (IS_ERR(gdev)) - return PTR_ERR(gdev); + if (IS_ERR(gdev)) { + ret = PTR_ERR(gdev); + goto err; + } genpd_table = _find_opp_table(gdev); if (!IS_ERR(genpd_table)) { diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 282eb5966fd0..55c8cfef97d4 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1444,6 +1444,38 @@ put_required_np: EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); /** + * dev_pm_opp_of_has_required_opp - Find out if a required-opps exists. + * @dev: The device to investigate. + * + * Returns true if the device's node has a "operating-points-v2" property and if + * the corresponding node for the opp-table describes opp nodes that uses the + * "required-opps" property. + * + * Return: True if a required-opps is present, else false. + */ +bool dev_pm_opp_of_has_required_opp(struct device *dev) +{ + struct device_node *opp_np, *np; + int count; + + opp_np = _opp_of_get_opp_desc_node(dev->of_node, 0); + if (!opp_np) + return false; + + np = of_get_next_available_child(opp_np, NULL); + of_node_put(opp_np); + if (!np) { + dev_warn(dev, "Empty OPP table\n"); + return false; + } + + count = of_count_phandle_with_args(np, "required-opps", NULL); + of_node_put(np); + + return count > 0; +} + +/** * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp * @opp: opp for which DT node has to be returned for * diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index e3b97cd1fbbf..ec0056a4bb13 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -393,10 +393,12 @@ static int ti_opp_supply_probe(struct platform_device *pdev) } ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators); - if (ret < 0) + if (ret < 0) { _free_optimized_voltages(dev, &opp_data); + return ret; + } - return ret; + return 0; } static struct platform_driver ti_opp_supply_driver = { diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index d35001589d88..aa4d1833f442 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -296,5 +296,6 @@ source "drivers/pci/hotplug/Kconfig" source "drivers/pci/controller/Kconfig" source "drivers/pci/endpoint/Kconfig" source "drivers/pci/switch/Kconfig" +source "drivers/pci/pwrctl/Kconfig" endif diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 175302036890..8ddad57934a6 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \ obj-$(CONFIG_PCI) += msi/ obj-$(CONFIG_PCI) += pcie/ +obj-$(CONFIG_PCI) += pwrctl/ ifdef CONFIG_PCI obj-$(CONFIG_PROC_FS) += proc.o diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 826b5016a101..8765e2d2aafa 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -12,6 +12,7 @@ #include <linux/errno.h> #include <linux/ioport.h> #include <linux/of.h> +#include <linux/of_platform.h> #include <linux/proc_fs.h> #include <linux/slab.h> @@ -354,6 +355,14 @@ void pci_bus_add_device(struct pci_dev *dev) pci_warn(dev, "device attach failed (%d)\n", retval); pci_dev_assign_added(dev, true); + + if (IS_ENABLED(CONFIG_OF) && pci_is_bridge(dev)) { + retval = of_platform_populate(dev->dev.of_node, NULL, NULL, + &dev->dev); + if (retval) + pci_err(dev, "failed to populate child OF nodes (%d)\n", + retval); + } } EXPORT_SYMBOL_GPL(pci_bus_add_device); diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 51e3dd0ea5ab..b908fe1ae951 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -6,6 +6,7 @@ */ #define pr_fmt(fmt) "PCI: OF: " fmt +#include <linux/cleanup.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/pci.h> @@ -13,6 +14,7 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_pci.h> +#include <linux/platform_device.h> #include "pci.h" #ifdef CONFIG_PCI @@ -25,16 +27,20 @@ */ int pci_set_of_node(struct pci_dev *dev) { - struct device_node *node; - if (!dev->bus->dev.of_node) return 0; - node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn); + struct device_node *node __free(device_node) = + of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn); if (!node) return 0; - device_set_node(&dev->dev, of_fwnode_handle(node)); + struct device *pdev __free(put_device) = + bus_find_device_by_of_node(&platform_bus_type, node); + if (pdev) + dev->bus->dev.of_node_reused = true; + + device_set_node(&dev->dev, of_fwnode_handle(no_free_ptr(node))); return 0; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5fbabb4e3425..4c367f13acdc 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -3069,7 +3069,9 @@ int pci_host_probe(struct pci_host_bridge *bridge) struct pci_bus *bus, *child; int ret; + pci_lock_rescan_remove(); ret = pci_scan_root_bus_bridge(bridge); + pci_unlock_rescan_remove(); if (ret < 0) { dev_err(bridge->dev.parent, "Scanning root bridge failed"); return ret; diff --git a/drivers/pci/pwrctl/Kconfig b/drivers/pci/pwrctl/Kconfig new file mode 100644 index 000000000000..f1b824955d4b --- /dev/null +++ b/drivers/pci/pwrctl/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only + +menu "PCI Power control drivers" + +config PCI_PWRCTL + tristate + +config PCI_PWRCTL_PWRSEQ + tristate "PCI Power Control driver using the Power Sequencing subsystem" + select POWER_SEQUENCING + select PCI_PWRCTL + default m if ((ATH11K_PCI || ATH12K) && ARCH_QCOM) + help + Enable support for the PCI power control driver for device + drivers using the Power Sequencing subsystem. + +endmenu diff --git a/drivers/pci/pwrctl/Makefile b/drivers/pci/pwrctl/Makefile new file mode 100644 index 000000000000..d308aae4800c --- /dev/null +++ b/drivers/pci/pwrctl/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctl-core.o +pci-pwrctl-core-y := core.o + +obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctl-pwrseq.o diff --git a/drivers/pci/pwrctl/core.c b/drivers/pci/pwrctl/core.c new file mode 100644 index 000000000000..feca26ad2f6a --- /dev/null +++ b/drivers/pci/pwrctl/core.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/pci-pwrctl.h> +#include <linux/property.h> +#include <linux/slab.h> + +static int pci_pwrctl_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct pci_pwrctl *pwrctl = container_of(nb, struct pci_pwrctl, nb); + struct device *dev = data; + + if (dev_fwnode(dev) != dev_fwnode(pwrctl->dev)) + return NOTIFY_DONE; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + /* + * We will have two struct device objects bound to two different + * drivers on different buses but consuming the same DT node. We + * must not bind the pins twice in this case but only once for + * the first device to be added. + * + * If we got here then the PCI device is the second after the + * power control platform device. Mark its OF node as reused. + */ + dev->of_node_reused = true; + break; + case BUS_NOTIFY_BOUND_DRIVER: + pwrctl->link = device_link_add(dev, pwrctl->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!pwrctl->link) + dev_err(pwrctl->dev, "Failed to add device link\n"); + break; + case BUS_NOTIFY_UNBOUND_DRIVER: + if (pwrctl->link) + device_link_remove(dev, pwrctl->dev); + break; + } + + return NOTIFY_DONE; +} + +/** + * pci_pwrctl_device_set_ready() - Notify the pwrctl subsystem that the PCI + * device is powered-up and ready to be detected. + * + * @pwrctl: PCI power control data. + * + * Returns: + * 0 on success, negative error number on error. + * + * Note: + * This function returning 0 doesn't mean the device was detected. It means, + * that the bus rescan was successfully started. The device will get bound to + * its PCI driver asynchronously. + */ +int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl) +{ + int ret; + + if (!pwrctl->dev) + return -ENODEV; + + pwrctl->nb.notifier_call = pci_pwrctl_notify; + ret = bus_register_notifier(&pci_bus_type, &pwrctl->nb); + if (ret) + return ret; + + pci_lock_rescan_remove(); + pci_rescan_bus(to_pci_dev(pwrctl->dev->parent)->bus); + pci_unlock_rescan_remove(); + + return 0; +} +EXPORT_SYMBOL_GPL(pci_pwrctl_device_set_ready); + +/** + * pci_pwrctl_device_unset_ready() - Notify the pwrctl subsystem that the PCI + * device is about to be powered-down. + * + * @pwrctl: PCI power control data. + */ +void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl) +{ + /* + * We don't have to delete the link here. Typically, this function + * is only called when the power control device is being detached. If + * it is being detached then the child PCI device must have already + * been unbound too or the device core wouldn't let us unbind. + */ + bus_unregister_notifier(&pci_bus_type, &pwrctl->nb); +} +EXPORT_SYMBOL_GPL(pci_pwrctl_device_unset_ready); + +static void devm_pci_pwrctl_device_unset_ready(void *data) +{ + struct pci_pwrctl *pwrctl = data; + + pci_pwrctl_device_unset_ready(pwrctl); +} + +/** + * devm_pci_pwrctl_device_set_ready - Managed variant of + * pci_pwrctl_device_set_ready(). + * + * @dev: Device managing this pwrctl provider. + * @pwrctl: PCI power control data. + * + * Returns: + * 0 on success, negative error number on error. + */ +int devm_pci_pwrctl_device_set_ready(struct device *dev, + struct pci_pwrctl *pwrctl) +{ + int ret; + + ret = pci_pwrctl_device_set_ready(pwrctl); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, + devm_pci_pwrctl_device_unset_ready, + pwrctl); +} +EXPORT_SYMBOL_GPL(devm_pci_pwrctl_device_set_ready); + +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_DESCRIPTION("PCI Device Power Control core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c new file mode 100644 index 000000000000..c7a113a76c0c --- /dev/null +++ b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pci-pwrctl.h> +#include <linux/platform_device.h> +#include <linux/pwrseq/consumer.h> +#include <linux/slab.h> +#include <linux/types.h> + +struct pci_pwrctl_pwrseq_data { + struct pci_pwrctl ctx; + struct pwrseq_desc *pwrseq; +}; + +static void devm_pci_pwrctl_pwrseq_power_off(void *data) +{ + struct pwrseq_desc *pwrseq = data; + + pwrseq_power_off(pwrseq); +} + +static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev) +{ + struct pci_pwrctl_pwrseq_data *data; + struct device *dev = &pdev->dev; + int ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->pwrseq = devm_pwrseq_get(dev, of_device_get_match_data(dev)); + if (IS_ERR(data->pwrseq)) + return dev_err_probe(dev, PTR_ERR(data->pwrseq), + "Failed to get the power sequencer\n"); + + ret = pwrseq_power_on(data->pwrseq); + if (ret) + return dev_err_probe(dev, ret, + "Failed to power-on the device\n"); + + ret = devm_add_action_or_reset(dev, devm_pci_pwrctl_pwrseq_power_off, + data->pwrseq); + if (ret) + return ret; + + data->ctx.dev = dev; + + ret = devm_pci_pwrctl_device_set_ready(dev, &data->ctx); + if (ret) + return dev_err_probe(dev, ret, + "Failed to register the pwrctl wrapper\n"); + + return 0; +} + +static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = { + { + /* ATH11K in QCA6390 package. */ + .compatible = "pci17cb,1101", + .data = "wlan", + }, + { + /* ATH12K in WCN7850 package. */ + .compatible = "pci17cb,1107", + .data = "wlan", + }, + { } +}; +MODULE_DEVICE_TABLE(of, pci_pwrctl_pwrseq_of_match); + +static struct platform_driver pci_pwrctl_pwrseq_driver = { + .driver = { + .name = "pci-pwrctl-pwrseq", + .of_match_table = pci_pwrctl_pwrseq_of_match, + }, + .probe = pci_pwrctl_pwrseq_probe, +}; +module_platform_driver(pci_pwrctl_pwrseq_driver); + +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_DESCRIPTION("Generic PCI Power Control module for power sequenced devices"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index d749ea8250d6..910387e5bdbf 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/pci.h> #include <linux/module.h> +#include <linux/of_platform.h> #include "pci.h" static void pci_free_resources(struct pci_dev *dev) @@ -18,7 +19,7 @@ static void pci_stop_dev(struct pci_dev *dev) pci_pme_active(dev, false); if (pci_dev_is_added(dev)) { - + of_platform_depopulate(&dev->dev); device_release_driver(&dev->dev); pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 7526a9e714fa..aa9530b4064f 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -56,6 +56,18 @@ config ARM_PMU Say y if you want to use CPU performance monitors on ARM-based systems. +config ARM_V6_PMU + depends on ARM_PMU && (CPU_V6 || CPU_V6K) + def_bool y + +config ARM_V7_PMU + depends on ARM_PMU && CPU_V7 + def_bool y + +config ARM_XSCALE_PMU + depends on ARM_PMU && CPU_XSCALE + def_bool y + config RISCV_PMU depends on RISCV bool "RISC-V PMU framework" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 29b1c28203ef..d43df81d52f7 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -6,6 +6,9 @@ obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o +obj-$(CONFIG_ARM_V6_PMU) += arm_v6_pmu.o +obj-$(CONFIG_ARM_V7_PMU) += arm_v7_pmu.o +obj-$(CONFIG_ARM_XSCALE_PMU) += arm_xscale_pmu.o obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o obj-$(CONFIG_FSL_IMX9_DDR_PMU) += fsl_imx9_ddr_perf.o diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 86ef31ac7503..5c66b9278862 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1561,4 +1561,5 @@ module_init(arm_ccn_init); module_exit(arm_ccn_exit); MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); +MODULE_DESCRIPTION("ARM CCN (Cache Coherent Network) Performance Monitor Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index e26ad1d3ed0b..c932d9d355cf 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -174,9 +174,8 @@ #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27) #define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48) #define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51) -/* Note that we don't yet support the tertiary match group on newer IPs */ -#define CMN_CONFIG_WP_GRP BIT_ULL(56) -#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57) +#define CMN_CONFIG_WP_GRP GENMASK_ULL(57, 56) +#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(58) #define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0) #define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0) @@ -590,6 +589,13 @@ struct arm_cmn_hw_event { s8 dtc_idx[CMN_MAX_DTCS]; u8 num_dns; u8 dtm_offset; + + /* + * WP config registers are divided to UP and DOWN events. We need to + * keep to track only one of them. + */ + DECLARE_BITMAP(wp_idx, CMN_MAX_XPS); + bool wide_sel; enum cmn_filter_select filter_sel; }; @@ -617,6 +623,17 @@ static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos) return (x[pos / 32] >> ((pos % 32) * 2)) & 3; } +static void arm_cmn_set_wp_idx(unsigned long *wp_idx, unsigned int pos, bool val) +{ + if (val) + set_bit(pos, wp_idx); +} + +static unsigned int arm_cmn_get_wp_idx(unsigned long *wp_idx, unsigned int pos) +{ + return test_bit(pos, wp_idx); +} + struct arm_cmn_event_attr { struct device_attribute attr; enum cmn_model model; @@ -1336,12 +1353,37 @@ static const struct attribute_group *arm_cmn_attr_groups[] = { NULL }; -static int arm_cmn_wp_idx(struct perf_event *event) +static int arm_cmn_find_free_wp_idx(struct arm_cmn_dtm *dtm, + struct perf_event *event) +{ + int wp_idx = CMN_EVENT_EVENTID(event); + + if (dtm->wp_event[wp_idx] >= 0) + if (dtm->wp_event[++wp_idx] >= 0) + return -ENOSPC; + + return wp_idx; +} + +static int arm_cmn_get_assigned_wp_idx(struct perf_event *event, + struct arm_cmn_hw_event *hw, + unsigned int pos) +{ + return CMN_EVENT_EVENTID(event) + arm_cmn_get_wp_idx(hw->wp_idx, pos); +} + +static void arm_cmn_claim_wp_idx(struct arm_cmn_dtm *dtm, + struct perf_event *event, + unsigned int dtc, int wp_idx, + unsigned int pos) { - return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + + dtm->wp_event[wp_idx] = hw->dtc_idx[dtc]; + arm_cmn_set_wp_idx(hw->wp_idx, pos, wp_idx - CMN_EVENT_EVENTID(event)); } -static u32 arm_cmn_wp_config(struct perf_event *event) +static u32 arm_cmn_wp_config(struct perf_event *event, int wp_idx) { u32 config; u32 dev = CMN_EVENT_WP_DEV_SEL(event); @@ -1351,6 +1393,10 @@ static u32 arm_cmn_wp_config(struct perf_event *event) u32 combine = CMN_EVENT_WP_COMBINE(event); bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600; + /* CMN-600 supports only primary and secondary matching groups */ + if (is_cmn600) + grp &= 1; + config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) | FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) | FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) | @@ -1358,7 +1404,9 @@ static u32 arm_cmn_wp_config(struct perf_event *event) if (exc) config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE : CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE; - if (combine && !grp) + + /* wp_combine is available only on WP0 and WP2 */ + if (combine && !(wp_idx & 0x1)) config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE : CMN_DTM_WPn_CONFIG_WP_COMBINE; return config; @@ -1520,12 +1568,12 @@ static void arm_cmn_event_start(struct perf_event *event, int flags) writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR); cmn->dtc[i].cc_active = true; } else if (type == CMN_TYPE_WP) { - int wp_idx = arm_cmn_wp_idx(event); u64 val = CMN_EVENT_WP_VAL(event); u64 mask = CMN_EVENT_WP_MASK(event); for_each_hw_dn(hw, dn, i) { void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); + int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i); writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx)); writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx)); @@ -1550,10 +1598,9 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) i = hw->dtc_idx[0]; cmn->dtc[i].cc_active = false; } else if (type == CMN_TYPE_WP) { - int wp_idx = arm_cmn_wp_idx(event); - for_each_hw_dn(hw, dn, i) { void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); + int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i); writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx)); writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx)); @@ -1571,10 +1618,23 @@ struct arm_cmn_val { u8 dtm_count[CMN_MAX_DTMS]; u8 occupid[CMN_MAX_DTMS][SEL_MAX]; u8 wp[CMN_MAX_DTMS][4]; + u8 wp_combine[CMN_MAX_DTMS][2]; int dtc_count[CMN_MAX_DTCS]; bool cycles; }; +static int arm_cmn_val_find_free_wp_config(struct perf_event *event, + struct arm_cmn_val *val, int dtm) +{ + int wp_idx = CMN_EVENT_EVENTID(event); + + if (val->wp[dtm][wp_idx]) + if (val->wp[dtm][++wp_idx]) + return -ENOSPC; + + return wp_idx; +} + static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val, struct perf_event *event) { @@ -1606,8 +1666,9 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val, if (type != CMN_TYPE_WP) continue; - wp_idx = arm_cmn_wp_idx(event); - val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; + wp_idx = arm_cmn_val_find_free_wp_config(event, val, dtm); + val->wp[dtm][wp_idx] = 1; + val->wp_combine[dtm][wp_idx >> 1] += !!CMN_EVENT_WP_COMBINE(event); } } @@ -1631,6 +1692,7 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event) return -ENOMEM; arm_cmn_val_add_event(cmn, val, leader); + for_each_sibling_event(sibling, leader) arm_cmn_val_add_event(cmn, val, sibling); @@ -1645,7 +1707,7 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event) goto done; for_each_hw_dn(hw, dn, i) { - int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel; + int wp_idx, dtm = dn->dtm, sel = hw->filter_sel; if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) goto done; @@ -1657,12 +1719,12 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event) if (type != CMN_TYPE_WP) continue; - wp_idx = arm_cmn_wp_idx(event); - if (val->wp[dtm][wp_idx]) + wp_idx = arm_cmn_val_find_free_wp_config(event, val, dtm); + if (wp_idx < 0) goto done; - wp_cmb = val->wp[dtm][wp_idx ^ 1]; - if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1) + if (wp_idx & 1 && + val->wp_combine[dtm][wp_idx >> 1] != !!CMN_EVENT_WP_COMBINE(event)) goto done; } @@ -1773,8 +1835,11 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset; unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); - if (type == CMN_TYPE_WP) - dtm->wp_event[arm_cmn_wp_idx(event)] = -1; + if (type == CMN_TYPE_WP) { + int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i); + + dtm->wp_event[wp_idx] = -1; + } if (hw->filter_sel > SEL_NONE) hw->dn[i].occupid[hw->filter_sel].count--; @@ -1783,6 +1848,7 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); } memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx)); + memset(hw->wp_idx, 0, sizeof(hw->wp_idx)); for_each_hw_dtc_idx(hw, j, idx) cmn->dtc[j].counters[idx] = NULL; @@ -1836,19 +1902,23 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) if (type == CMN_TYPE_XP) { input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx; } else if (type == CMN_TYPE_WP) { - int tmp, wp_idx = arm_cmn_wp_idx(event); - u32 cfg = arm_cmn_wp_config(event); + int tmp, wp_idx; + u32 cfg; - if (dtm->wp_event[wp_idx] >= 0) + wp_idx = arm_cmn_find_free_wp_idx(dtm, event); + if (wp_idx < 0) goto free_dtms; + cfg = arm_cmn_wp_config(event, wp_idx); + tmp = dtm->wp_event[wp_idx ^ 1]; if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) != CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp])) goto free_dtms; input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx; - dtm->wp_event[wp_idx] = hw->dtc_idx[d]; + + arm_cmn_claim_wp_idx(dtm, event, d, wp_idx, i); writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); } else { struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); diff --git a/drivers/perf/arm_cspmu/ampere_cspmu.c b/drivers/perf/arm_cspmu/ampere_cspmu.c index f146a455e838..f72f5689923c 100644 --- a/drivers/perf/arm_cspmu/ampere_cspmu.c +++ b/drivers/perf/arm_cspmu/ampere_cspmu.c @@ -269,4 +269,5 @@ static void __exit ampere_cspmu_exit(void) module_init(ampere_cspmu_init); module_exit(ampere_cspmu_exit); +MODULE_DESCRIPTION("Ampere SoC Performance Monitor Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c index c318dc909767..2158a5975c90 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.c +++ b/drivers/perf/arm_cspmu/arm_cspmu.c @@ -1427,4 +1427,5 @@ EXPORT_SYMBOL_GPL(arm_cspmu_impl_unregister); module_init(arm_cspmu_init); module_exit(arm_cspmu_exit); +MODULE_DESCRIPTION("ARM CoreSight Architecture Performance Monitor Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c index 5b84b701ad62..d0ef611240aa 100644 --- a/drivers/perf/arm_cspmu/nvidia_cspmu.c +++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c @@ -417,4 +417,5 @@ static void __exit nvidia_cspmu_exit(void) module_init(nvidia_cspmu_init); module_exit(nvidia_cspmu_exit); +MODULE_DESCRIPTION("NVIDIA Coresight Architecture Performance Monitor Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 23fa6c5da82c..cf0430c266a6 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -25,8 +25,6 @@ #include <linux/smp.h> #include <linux/nmi.h> -#include <asm/arm_pmuv3.h> - /* ARMv8 Cortex-A53 specific event types. */ #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 @@ -338,6 +336,11 @@ static bool armv8pmu_event_want_user_access(struct perf_event *event) return ATTR_CFG_GET_FLD(&event->attr, rdpmc); } +static u32 armv8pmu_event_get_threshold(struct perf_event_attr *attr) +{ + return ATTR_CFG_GET_FLD(attr, threshold); +} + static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr) { u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare); @@ -941,7 +944,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; /* Always prefer to place a cycle counter into the cycle counter. */ - if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { + if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && + !armv8pmu_event_get_threshold(&event->attr)) { if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) return ARMV8_IDX_CYCLE_COUNTER; else if (armv8pmu_event_is_64bit(event) && @@ -1033,13 +1037,13 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will * be 0 and will also trigger this check, preventing it from being used. */ - th = ATTR_CFG_GET_FLD(attr, threshold); + th = armv8pmu_event_get_threshold(attr); if (th > threshold_max(cpu_pmu)) { pr_debug("PMU event threshold exceeds max value\n"); return -EINVAL; } - if (IS_ENABLED(CONFIG_ARM64) && th) { + if (th) { config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th); config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC, armv8pmu_event_threshold_control(attr)); @@ -1340,14 +1344,20 @@ PMUV3_INIT_SIMPLE(armv9_cortex_a520) PMUV3_INIT_SIMPLE(armv9_cortex_a710) PMUV3_INIT_SIMPLE(armv9_cortex_a715) PMUV3_INIT_SIMPLE(armv9_cortex_a720) +PMUV3_INIT_SIMPLE(armv9_cortex_a725) PMUV3_INIT_SIMPLE(armv8_cortex_x1) PMUV3_INIT_SIMPLE(armv9_cortex_x2) PMUV3_INIT_SIMPLE(armv9_cortex_x3) PMUV3_INIT_SIMPLE(armv9_cortex_x4) +PMUV3_INIT_SIMPLE(armv9_cortex_x925) PMUV3_INIT_SIMPLE(armv8_neoverse_e1) PMUV3_INIT_SIMPLE(armv8_neoverse_n1) PMUV3_INIT_SIMPLE(armv9_neoverse_n2) +PMUV3_INIT_SIMPLE(armv9_neoverse_n3) PMUV3_INIT_SIMPLE(armv8_neoverse_v1) +PMUV3_INIT_SIMPLE(armv8_neoverse_v2) +PMUV3_INIT_SIMPLE(armv8_neoverse_v3) +PMUV3_INIT_SIMPLE(armv8_neoverse_v3ae) PMUV3_INIT_SIMPLE(armv8_nvidia_carmel) PMUV3_INIT_SIMPLE(armv8_nvidia_denver) @@ -1379,14 +1389,20 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = { {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init}, {.compatible = "arm,cortex-a715-pmu", .data = armv9_cortex_a715_pmu_init}, {.compatible = "arm,cortex-a720-pmu", .data = armv9_cortex_a720_pmu_init}, + {.compatible = "arm,cortex-a725-pmu", .data = armv9_cortex_a725_pmu_init}, {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init}, {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init}, {.compatible = "arm,cortex-x3-pmu", .data = armv9_cortex_x3_pmu_init}, {.compatible = "arm,cortex-x4-pmu", .data = armv9_cortex_x4_pmu_init}, + {.compatible = "arm,cortex-x925-pmu", .data = armv9_cortex_x925_pmu_init}, {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init}, {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init}, {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init}, + {.compatible = "arm,neoverse-n3-pmu", .data = armv9_neoverse_n3_pmu_init}, {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init}, + {.compatible = "arm,neoverse-v2-pmu", .data = armv8_neoverse_v2_pmu_init}, + {.compatible = "arm,neoverse-v3-pmu", .data = armv8_neoverse_v3_pmu_init}, + {.compatible = "arm,neoverse-v3ae-pmu", .data = armv8_neoverse_v3ae_pmu_init}, {.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init}, {.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init}, {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init}, diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c new file mode 100644 index 000000000000..0bb685b4bac5 --- /dev/null +++ b/drivers/perf/arm_v6_pmu.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARMv6 Performance counter handling code. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * + * ARMv6 has 2 configurable performance counters and a single cycle counter. + * They all share a single reset bit but can be written to zero so we can use + * that for a reset. + * + * The counters can't be individually enabled or disabled so when we remove + * one event and replace it with another we could get spurious counts from the + * wrong event. However, we can take advantage of the fact that the + * performance counters can export events to the event bus, and the event bus + * itself can be monitored. This requires that we *don't* export the events to + * the event bus. The procedure for disabling a configurable counter is: + * - change the counter to count the ETMEXTOUT[0] signal (0x20). This + * effectively stops the counter from counting. + * - disable the counter's interrupt generation (each counter has it's + * own interrupt enable bit). + * Once stopped, the counter value can be written as 0 to reset. + * + * To enable a counter: + * - enable the counter's interrupt generation. + * - set the new event type. + * + * Note: the dedicated cycle counter only counts cycles and can't be + * enabled/disabled independently of the others. When we want to disable the + * cycle counter, we have to just disable the interrupt reporting and start + * ignoring that counter. When re-enabling, we have to reset the value and + * enable the interrupt. + */ + +#include <asm/cputype.h> +#include <asm/irq_regs.h> + +#include <linux/of.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> + +enum armv6_perf_types { + ARMV6_PERFCTR_ICACHE_MISS = 0x0, + ARMV6_PERFCTR_IBUF_STALL = 0x1, + ARMV6_PERFCTR_DDEP_STALL = 0x2, + ARMV6_PERFCTR_ITLB_MISS = 0x3, + ARMV6_PERFCTR_DTLB_MISS = 0x4, + ARMV6_PERFCTR_BR_EXEC = 0x5, + ARMV6_PERFCTR_BR_MISPREDICT = 0x6, + ARMV6_PERFCTR_INSTR_EXEC = 0x7, + ARMV6_PERFCTR_DCACHE_HIT = 0x9, + ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, + ARMV6_PERFCTR_DCACHE_MISS = 0xB, + ARMV6_PERFCTR_DCACHE_WBACK = 0xC, + ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, + ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, + ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, + ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, + ARMV6_PERFCTR_WBUF_DRAINED = 0x12, + ARMV6_PERFCTR_CPU_CYCLES = 0xFF, + ARMV6_PERFCTR_NOP = 0x20, +}; + +enum armv6_counters { + ARMV6_CYCLE_COUNTER = 0, + ARMV6_COUNTER0, + ARMV6_COUNTER1, +}; + +/* + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL, +}; + +static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + /* + * The performance counters don't differentiate between read and write + * accesses/misses so this isn't strictly correct, but it's the best we + * can do. Writes and reads get combined. + */ + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, + + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + + /* + * The ARM performance counters can count micro DTLB misses, micro ITLB + * misses and main TLB misses. There isn't an event for TLB misses, so + * use the micro misses here and if users want the main TLB misses they + * can use a raw counter. + */ + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, +}; + +static inline unsigned long +armv6_pmcr_read(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); + return val; +} + +static inline void +armv6_pmcr_write(unsigned long val) +{ + asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); +} + +#define ARMV6_PMCR_ENABLE (1 << 0) +#define ARMV6_PMCR_CTR01_RESET (1 << 1) +#define ARMV6_PMCR_CCOUNT_RESET (1 << 2) +#define ARMV6_PMCR_CCOUNT_DIV (1 << 3) +#define ARMV6_PMCR_COUNT0_IEN (1 << 4) +#define ARMV6_PMCR_COUNT1_IEN (1 << 5) +#define ARMV6_PMCR_CCOUNT_IEN (1 << 6) +#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) +#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) +#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) +#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 +#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) +#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 +#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) + +#define ARMV6_PMCR_OVERFLOWED_MASK \ + (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ + ARMV6_PMCR_CCOUNT_OVERFLOW) + +static inline int +armv6_pmcr_has_overflowed(unsigned long pmcr) +{ + return pmcr & ARMV6_PMCR_OVERFLOWED_MASK; +} + +static inline int +armv6_pmcr_counter_has_overflowed(unsigned long pmcr, + enum armv6_counters counter) +{ + int ret = 0; + + if (ARMV6_CYCLE_COUNTER == counter) + ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; + else if (ARMV6_COUNTER0 == counter) + ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; + else if (ARMV6_COUNTER1 == counter) + ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + + return ret; +} + +static inline u64 armv6pmu_read_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + unsigned long value = 0; + + if (ARMV6_CYCLE_COUNTER == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); + else if (ARMV6_COUNTER0 == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); + else if (ARMV6_COUNTER1 == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + + return value; +} + +static inline void armv6pmu_write_counter(struct perf_event *event, u64 value) +{ + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + if (ARMV6_CYCLE_COUNTER == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); + else if (ARMV6_COUNTER0 == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); + else if (ARMV6_COUNTER1 == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); +} + +static void armv6pmu_enable_event(struct perf_event *event) +{ + unsigned long val, mask, evt; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (ARMV6_CYCLE_COUNTER == idx) { + mask = 0; + evt = ARMV6_PMCR_CCOUNT_IEN; + } else if (ARMV6_COUNTER0 == idx) { + mask = ARMV6_PMCR_EVT_COUNT0_MASK; + evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | + ARMV6_PMCR_COUNT0_IEN; + } else if (ARMV6_COUNTER1 == idx) { + mask = ARMV6_PMCR_EVT_COUNT1_MASK; + evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | + ARMV6_PMCR_COUNT1_IEN; + } else { + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + /* + * Mask out the current event and set the counter to count the event + * that we're interested in. + */ + val = armv6_pmcr_read(); + val &= ~mask; + val |= evt; + armv6_pmcr_write(val); +} + +static irqreturn_t +armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) +{ + unsigned long pmcr = armv6_pmcr_read(); + struct perf_sample_data data; + struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); + struct pt_regs *regs; + int idx; + + if (!armv6_pmcr_has_overflowed(pmcr)) + return IRQ_NONE; + + regs = get_irq_regs(); + + /* + * The interrupts are cleared by writing the overflow flags back to + * the control register. All of the other bits don't have any effect + * if they are rewritten, so write the whole value back. + */ + armv6_pmcr_write(pmcr); + + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + /* Ignore if we don't have an event. */ + if (!event) + continue; + + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event); + perf_sample_data_init(&data, 0, hwc->last_period); + if (!armpmu_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + cpu_pmu->disable(event); + } + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this + * will not work. + */ + irq_work_run(); + + return IRQ_HANDLED; +} + +static void armv6pmu_start(struct arm_pmu *cpu_pmu) +{ + unsigned long val; + + val = armv6_pmcr_read(); + val |= ARMV6_PMCR_ENABLE; + armv6_pmcr_write(val); +} + +static void armv6pmu_stop(struct arm_pmu *cpu_pmu) +{ + unsigned long val; + + val = armv6_pmcr_read(); + val &= ~ARMV6_PMCR_ENABLE; + armv6_pmcr_write(val); +} + +static int +armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + /* Always place a cycle counter into the cycle counter. */ + if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) { + if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return ARMV6_CYCLE_COUNTER; + } else { + /* + * For anything other than a cycle counter, try and use + * counter0 and counter1. + */ + if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) + return ARMV6_COUNTER1; + + if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) + return ARMV6_COUNTER0; + + /* The counters are all in use. */ + return -EAGAIN; + } +} + +static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + clear_bit(event->hw.idx, cpuc->used_mask); +} + +static void armv6pmu_disable_event(struct perf_event *event) +{ + unsigned long val, mask, evt; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (ARMV6_CYCLE_COUNTER == idx) { + mask = ARMV6_PMCR_CCOUNT_IEN; + evt = 0; + } else if (ARMV6_COUNTER0 == idx) { + mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; + evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; + } else if (ARMV6_COUNTER1 == idx) { + mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; + evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; + } else { + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + /* + * Mask out the current event and set the counter to count the number + * of ETM bus signal assertion cycles. The external reporting should + * be disabled and so this should never increment. + */ + val = armv6_pmcr_read(); + val &= ~mask; + val |= evt; + armv6_pmcr_write(val); +} + +static int armv6_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv6_perf_map, + &armv6_perf_cache_map, 0xFF); +} + +static void armv6pmu_init(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->handle_irq = armv6pmu_handle_irq; + cpu_pmu->enable = armv6pmu_enable_event; + cpu_pmu->disable = armv6pmu_disable_event; + cpu_pmu->read_counter = armv6pmu_read_counter; + cpu_pmu->write_counter = armv6pmu_write_counter; + cpu_pmu->get_event_idx = armv6pmu_get_event_idx; + cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx; + cpu_pmu->start = armv6pmu_start; + cpu_pmu->stop = armv6pmu_stop; + cpu_pmu->map_event = armv6_map_event; + cpu_pmu->num_events = 3; +} + +static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv6pmu_init(cpu_pmu); + cpu_pmu->name = "armv6_1136"; + return 0; +} + +static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv6pmu_init(cpu_pmu); + cpu_pmu->name = "armv6_1176"; + return 0; +} + +static const struct of_device_id armv6_pmu_of_device_ids[] = { + {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, + {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, + { /* sentinel value */ } +}; + +static int armv6_pmu_device_probe(struct platform_device *pdev) +{ + return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids, NULL); +} + +static struct platform_driver armv6_pmu_driver = { + .driver = { + .name = "armv6-pmu", + .of_match_table = armv6_pmu_of_device_ids, + }, + .probe = armv6_pmu_device_probe, +}; + +builtin_platform_driver(armv6_pmu_driver); diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c new file mode 100644 index 000000000000..928ac3d626ed --- /dev/null +++ b/drivers/perf/arm_v7_pmu.c @@ -0,0 +1,1994 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. + * + * ARMv7 support: Jean Pihet <jpihet@mvista.com> + * 2010 (c) MontaVista Software, LLC. + * + * Copied from ARMv6 code, with the low level code inspired + * by the ARMv7 Oprofile code. + * + * Cortex-A8 has up to 4 configurable performance counters and + * a single cycle counter. + * Cortex-A9 has up to 31 configurable performance counters and + * a single cycle counter. + * + * All counters can be enabled/disabled and IRQ masked separately. The cycle + * counter and all 4 performance counters together can be reset separately. + */ + +#include <asm/cp15.h> +#include <asm/cputype.h> +#include <asm/irq_regs.h> +#include <asm/vfp.h> +#include "../vfp/vfpinstr.h" + +#include <linux/of.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> + +/* + * Common ARMv7 event types + * + * Note: An implementation may not be able to count all of these events + * but the encodings are considered to be `reserved' in the case that + * they are not available. + */ +#define ARMV7_PERFCTR_PMNC_SW_INCR 0x00 +#define ARMV7_PERFCTR_L1_ICACHE_REFILL 0x01 +#define ARMV7_PERFCTR_ITLB_REFILL 0x02 +#define ARMV7_PERFCTR_L1_DCACHE_REFILL 0x03 +#define ARMV7_PERFCTR_L1_DCACHE_ACCESS 0x04 +#define ARMV7_PERFCTR_DTLB_REFILL 0x05 +#define ARMV7_PERFCTR_MEM_READ 0x06 +#define ARMV7_PERFCTR_MEM_WRITE 0x07 +#define ARMV7_PERFCTR_INSTR_EXECUTED 0x08 +#define ARMV7_PERFCTR_EXC_TAKEN 0x09 +#define ARMV7_PERFCTR_EXC_EXECUTED 0x0A +#define ARMV7_PERFCTR_CID_WRITE 0x0B + +/* + * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. + * It counts: + * - all (taken) branch instructions, + * - instructions that explicitly write the PC, + * - exception generating instructions. + */ +#define ARMV7_PERFCTR_PC_WRITE 0x0C +#define ARMV7_PERFCTR_PC_IMM_BRANCH 0x0D +#define ARMV7_PERFCTR_PC_PROC_RETURN 0x0E +#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F +#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED 0x10 +#define ARMV7_PERFCTR_CLOCK_CYCLES 0x11 +#define ARMV7_PERFCTR_PC_BRANCH_PRED 0x12 + +/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ +#define ARMV7_PERFCTR_MEM_ACCESS 0x13 +#define ARMV7_PERFCTR_L1_ICACHE_ACCESS 0x14 +#define ARMV7_PERFCTR_L1_DCACHE_WB 0x15 +#define ARMV7_PERFCTR_L2_CACHE_ACCESS 0x16 +#define ARMV7_PERFCTR_L2_CACHE_REFILL 0x17 +#define ARMV7_PERFCTR_L2_CACHE_WB 0x18 +#define ARMV7_PERFCTR_BUS_ACCESS 0x19 +#define ARMV7_PERFCTR_MEM_ERROR 0x1A +#define ARMV7_PERFCTR_INSTR_SPEC 0x1B +#define ARMV7_PERFCTR_TTBR_WRITE 0x1C +#define ARMV7_PERFCTR_BUS_CYCLES 0x1D + +#define ARMV7_PERFCTR_CPU_CYCLES 0xFF + +/* ARMv7 Cortex-A8 specific event types */ +#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS 0x43 +#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL 0x44 +#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS 0x50 +#define ARMV7_A8_PERFCTR_STALL_ISIDE 0x56 + +/* ARMv7 Cortex-A9 specific event types */ +#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME 0x68 +#define ARMV7_A9_PERFCTR_STALL_ICACHE 0x60 +#define ARMV7_A9_PERFCTR_STALL_DISPATCH 0x66 + +/* ARMv7 Cortex-A5 specific event types */ +#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL 0xc2 +#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP 0xc3 + +/* ARMv7 Cortex-A15 specific event types */ +#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ 0x40 +#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41 +#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ 0x42 +#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE 0x43 + +#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ 0x4C +#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE 0x4D + +#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ 0x50 +#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51 +#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ 0x52 +#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE 0x53 + +#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC 0x76 + +/* ARMv7 Cortex-A12 specific event types */ +#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ 0x40 +#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41 + +#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ 0x50 +#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51 + +#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC 0x76 + +#define ARMV7_A12_PERFCTR_PF_TLB_REFILL 0xe7 + +/* ARMv7 Krait specific event types */ +#define KRAIT_PMRESR0_GROUP0 0xcc +#define KRAIT_PMRESR1_GROUP0 0xd0 +#define KRAIT_PMRESR2_GROUP0 0xd4 +#define KRAIT_VPMRESR0_GROUP0 0xd8 + +#define KRAIT_PERFCTR_L1_ICACHE_ACCESS 0x10011 +#define KRAIT_PERFCTR_L1_ICACHE_MISS 0x10010 + +#define KRAIT_PERFCTR_L1_ITLB_ACCESS 0x12222 +#define KRAIT_PERFCTR_L1_DTLB_ACCESS 0x12210 + +/* ARMv7 Scorpion specific event types */ +#define SCORPION_LPM0_GROUP0 0x4c +#define SCORPION_LPM1_GROUP0 0x50 +#define SCORPION_LPM2_GROUP0 0x54 +#define SCORPION_L2LPM_GROUP0 0x58 +#define SCORPION_VLPM_GROUP0 0x5c + +#define SCORPION_ICACHE_ACCESS 0x10053 +#define SCORPION_ICACHE_MISS 0x10052 + +#define SCORPION_DTLB_ACCESS 0x12013 +#define SCORPION_DTLB_MISS 0x12012 + +#define SCORPION_ITLB_MISS 0x12021 + +/* + * Cortex-A8 HW events mapping + * + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE, +}; + +static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + /* + * The performance counters don't differentiate between read and write + * accesses/misses so this isn't strictly correct, but it's the best we + * can do. Writes and reads get combined. + */ + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + + [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, + [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, + [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, + [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +/* + * Cortex-A9 HW events mapping + */ +static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH, +}; + +static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + /* + * The performance counters don't differentiate between read and write + * accesses/misses so this isn't strictly correct, but it's the best we + * can do. Writes and reads get combined. + */ + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +/* + * Cortex-A5 HW events mapping + */ +static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, + [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, + + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + /* + * The prefetch counters don't differentiate between the I side and the + * D side. + */ + [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, + [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +/* + * Cortex-A15 HW events mapping + */ +static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, +}; + +static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE, + + /* + * Not all performance counters differentiate between read and write + * accesses/misses so we're not always strictly correct, but it's the + * best we can do. Writes and reads get combined in these cases. + */ + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + + [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ, + [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ, + [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE, + [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +/* + * Cortex-A7 HW events mapping + */ +static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, +}; + +static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + /* + * The performance counters don't differentiate between read and write + * accesses/misses so this isn't strictly correct, but it's the best we + * can do. Writes and reads get combined. + */ + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + + [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, + [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, + [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, + [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +/* + * Cortex-A12 HW events mapping + */ +static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, +}; + +static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + + /* + * Not all performance counters differentiate between read and write + * accesses/misses so we're not always strictly correct, but it's the + * best we can do. Writes and reads get combined in these cases. + */ + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + + [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ, + [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, + [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE, + [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +/* + * Krait HW events mapping + */ +static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + /* + * The performance counters don't differentiate between read and write + * accesses/misses so this isn't strictly correct, but it's the best we + * can do. Writes and reads get combined. + */ + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS, + + [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS, + + [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS, + [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +/* + * Scorpion HW events mapping + */ +static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + /* + * The performance counters don't differentiate between read and write + * accesses/misses so this isn't strictly correct, but it's the best we + * can do. Writes and reads get combined. + */ + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS, + /* + * Only ITLB misses and DTLB refills are supported. If users want the + * DTLB refills misses a raw counter must be used. + */ + [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); + +static struct attribute *armv7_pmu_format_attrs[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group armv7_pmu_format_attr_group = { + .name = "format", + .attrs = armv7_pmu_format_attrs, +}; + +#define ARMV7_EVENT_ATTR_RESOLVE(m) #m +#define ARMV7_EVENT_ATTR(name, config) \ + PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \ + "event=" ARMV7_EVENT_ATTR_RESOLVE(config)) + +ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR); +ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL); +ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL); +ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL); +ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS); +ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL); +ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ); +ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE); +ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED); +ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN); +ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED); +ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE); +ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE); +ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH); +ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN); +ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS); +ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED); +ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES); +ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED); + +static struct attribute *armv7_pmuv1_event_attrs[] = { + &armv7_event_attr_sw_incr.attr.attr, + &armv7_event_attr_l1i_cache_refill.attr.attr, + &armv7_event_attr_l1i_tlb_refill.attr.attr, + &armv7_event_attr_l1d_cache_refill.attr.attr, + &armv7_event_attr_l1d_cache.attr.attr, + &armv7_event_attr_l1d_tlb_refill.attr.attr, + &armv7_event_attr_ld_retired.attr.attr, + &armv7_event_attr_st_retired.attr.attr, + &armv7_event_attr_inst_retired.attr.attr, + &armv7_event_attr_exc_taken.attr.attr, + &armv7_event_attr_exc_return.attr.attr, + &armv7_event_attr_cid_write_retired.attr.attr, + &armv7_event_attr_pc_write_retired.attr.attr, + &armv7_event_attr_br_immed_retired.attr.attr, + &armv7_event_attr_br_return_retired.attr.attr, + &armv7_event_attr_unaligned_ldst_retired.attr.attr, + &armv7_event_attr_br_mis_pred.attr.attr, + &armv7_event_attr_cpu_cycles.attr.attr, + &armv7_event_attr_br_pred.attr.attr, + NULL, +}; + +static struct attribute_group armv7_pmuv1_events_attr_group = { + .name = "events", + .attrs = armv7_pmuv1_event_attrs, +}; + +ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS); +ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS); +ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB); +ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS); +ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL); +ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB); +ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS); +ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR); +ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC); +ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE); +ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES); + +static struct attribute *armv7_pmuv2_event_attrs[] = { + &armv7_event_attr_sw_incr.attr.attr, + &armv7_event_attr_l1i_cache_refill.attr.attr, + &armv7_event_attr_l1i_tlb_refill.attr.attr, + &armv7_event_attr_l1d_cache_refill.attr.attr, + &armv7_event_attr_l1d_cache.attr.attr, + &armv7_event_attr_l1d_tlb_refill.attr.attr, + &armv7_event_attr_ld_retired.attr.attr, + &armv7_event_attr_st_retired.attr.attr, + &armv7_event_attr_inst_retired.attr.attr, + &armv7_event_attr_exc_taken.attr.attr, + &armv7_event_attr_exc_return.attr.attr, + &armv7_event_attr_cid_write_retired.attr.attr, + &armv7_event_attr_pc_write_retired.attr.attr, + &armv7_event_attr_br_immed_retired.attr.attr, + &armv7_event_attr_br_return_retired.attr.attr, + &armv7_event_attr_unaligned_ldst_retired.attr.attr, + &armv7_event_attr_br_mis_pred.attr.attr, + &armv7_event_attr_cpu_cycles.attr.attr, + &armv7_event_attr_br_pred.attr.attr, + &armv7_event_attr_mem_access.attr.attr, + &armv7_event_attr_l1i_cache.attr.attr, + &armv7_event_attr_l1d_cache_wb.attr.attr, + &armv7_event_attr_l2d_cache.attr.attr, + &armv7_event_attr_l2d_cache_refill.attr.attr, + &armv7_event_attr_l2d_cache_wb.attr.attr, + &armv7_event_attr_bus_access.attr.attr, + &armv7_event_attr_memory_error.attr.attr, + &armv7_event_attr_inst_spec.attr.attr, + &armv7_event_attr_ttbr_write_retired.attr.attr, + &armv7_event_attr_bus_cycles.attr.attr, + NULL, +}; + +static struct attribute_group armv7_pmuv2_events_attr_group = { + .name = "events", + .attrs = armv7_pmuv2_event_attrs, +}; + +/* + * Perf Events' indices + */ +#define ARMV7_IDX_CYCLE_COUNTER 0 +#define ARMV7_IDX_COUNTER0 1 +#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ + (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) + +#define ARMV7_MAX_COUNTERS 32 +#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) + +/* + * ARMv7 low level PMNC access + */ + +/* + * Perf Event to low level counters mapping + */ +#define ARMV7_IDX_TO_COUNTER(x) \ + (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) + +/* + * Per-CPU PMNC: config reg + */ +#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ +#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ +#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ +#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ +#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ +#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ +#define ARMV7_PMNC_N_MASK 0x1f +#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ + +/* + * FLAG: counters overflow flag status reg + */ +#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ +#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK + +/* + * PMXEVTYPER: Event selection reg + */ +#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ +#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ + +/* + * Event filters for PMUv2 + */ +#define ARMV7_EXCLUDE_PL1 BIT(31) +#define ARMV7_EXCLUDE_USER BIT(30) +#define ARMV7_INCLUDE_HYP BIT(27) + +/* + * Secure debug enable reg + */ +#define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */ + +static inline u32 armv7_pmnc_read(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); + return val; +} + +static inline void armv7_pmnc_write(u32 val) +{ + val &= ARMV7_PMNC_MASK; + isb(); + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); +} + +static inline int armv7_pmnc_has_overflowed(u32 pmnc) +{ + return pmnc & ARMV7_OVERFLOWED_MASK; +} + +static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) +{ + return idx >= ARMV7_IDX_CYCLE_COUNTER && + idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); +} + +static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) +{ + return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); +} + +static inline void armv7_pmnc_select_counter(int idx) +{ + u32 counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); + isb(); +} + +static inline u64 armv7pmu_read_counter(struct perf_event *event) +{ + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + u32 value = 0; + + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { + pr_err("CPU%u reading wrong counter %d\n", + smp_processor_id(), idx); + } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); + } else { + armv7_pmnc_select_counter(idx); + asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); + } + + return value; +} + +static inline void armv7pmu_write_counter(struct perf_event *event, u64 value) +{ + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { + pr_err("CPU%u writing wrong counter %d\n", + smp_processor_id(), idx); + } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value)); + } else { + armv7_pmnc_select_counter(idx); + asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value)); + } +} + +static inline void armv7_pmnc_write_evtsel(int idx, u32 val) +{ + armv7_pmnc_select_counter(idx); + val &= ARMV7_EVTYPE_MASK; + asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); +} + +static inline void armv7_pmnc_enable_counter(int idx) +{ + u32 counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); +} + +static inline void armv7_pmnc_disable_counter(int idx) +{ + u32 counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); +} + +static inline void armv7_pmnc_enable_intens(int idx) +{ + u32 counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); +} + +static inline void armv7_pmnc_disable_intens(int idx) +{ + u32 counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); + isb(); + /* Clear the overflow flag in case an interrupt is pending. */ + asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter))); + isb(); +} + +static inline u32 armv7_pmnc_getreset_flags(void) +{ + u32 val; + + /* Read */ + asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); + + /* Write to clear flags */ + val &= ARMV7_FLAG_MASK; + asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); + + return val; +} + +#ifdef DEBUG +static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) +{ + u32 val; + unsigned int cnt; + + pr_info("PMNC registers dump:\n"); + + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); + pr_info("PMNC =0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); + pr_info("CNTENS=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); + pr_info("INTENS=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); + pr_info("FLAGS =0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); + pr_info("SELECT=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); + pr_info("CCNT =0x%08x\n", val); + + for (cnt = ARMV7_IDX_COUNTER0; + cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { + armv7_pmnc_select_counter(cnt); + asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); + pr_info("CNT[%d] count =0x%08x\n", + ARMV7_IDX_TO_COUNTER(cnt), val); + asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); + pr_info("CNT[%d] evtsel=0x%08x\n", + ARMV7_IDX_TO_COUNTER(cnt), val); + } +} +#endif + +static void armv7pmu_enable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + int idx = hwc->idx; + + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { + pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return; + } + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + + /* + * Disable counter + */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We only need to set the event for the cycle counter if we + * have the ability to perform event filtering. + */ + if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) + armv7_pmnc_write_evtsel(idx, hwc->config_base); + + /* + * Enable interrupt for this counter + */ + armv7_pmnc_enable_intens(idx); + + /* + * Enable counter + */ + armv7_pmnc_enable_counter(idx); +} + +static void armv7pmu_disable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + int idx = hwc->idx; + + if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { + pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return; + } + + /* + * Disable counter and interrupt + */ + + /* + * Disable counter + */ + armv7_pmnc_disable_counter(idx); + + /* + * Disable interrupt for this counter + */ + armv7_pmnc_disable_intens(idx); +} + +static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) +{ + u32 pmnc; + struct perf_sample_data data; + struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); + struct pt_regs *regs; + int idx; + + /* + * Get and reset the IRQ flags + */ + pmnc = armv7_pmnc_getreset_flags(); + + /* + * Did an overflow occur? + */ + if (!armv7_pmnc_has_overflowed(pmnc)) + return IRQ_NONE; + + /* + * Handle the counter(s) overflow(s) + */ + regs = get_irq_regs(); + + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + /* Ignore if we don't have an event. */ + if (!event) + continue; + + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event); + perf_sample_data_init(&data, 0, hwc->last_period); + if (!armpmu_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + cpu_pmu->disable(event); + } + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this + * will not work. + */ + irq_work_run(); + + return IRQ_HANDLED; +} + +static void armv7pmu_start(struct arm_pmu *cpu_pmu) +{ + /* Enable all counters */ + armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); +} + +static void armv7pmu_stop(struct arm_pmu *cpu_pmu) +{ + /* Disable all counters */ + armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); +} + +static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int idx; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT; + + /* Always place a cycle counter into the cycle counter. */ + if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { + if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return ARMV7_IDX_CYCLE_COUNTER; + } + + /* + * For anything other than a cycle counter, try and use + * the events counters + */ + for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { + if (!test_and_set_bit(idx, cpuc->used_mask)) + return idx; + } + + /* The counters are all in use. */ + return -EAGAIN; +} + +static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + clear_bit(event->hw.idx, cpuc->used_mask); +} + +/* + * Add an event filter to a given event. This will only work for PMUv2 PMUs. + */ +static int armv7pmu_set_event_filter(struct hw_perf_event *event, + struct perf_event_attr *attr) +{ + unsigned long config_base = 0; + + if (attr->exclude_idle) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EOPNOTSUPP; + } + if (attr->exclude_user) + config_base |= ARMV7_EXCLUDE_USER; + if (attr->exclude_kernel) + config_base |= ARMV7_EXCLUDE_PL1; + if (!attr->exclude_hv) + config_base |= ARMV7_INCLUDE_HYP; + + /* + * Install the filter into config_base as this is used to + * construct the event type. + */ + event->config_base = config_base; + + return 0; +} + +static void armv7pmu_reset(void *info) +{ + struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; + u32 idx, nb_cnt = cpu_pmu->num_events, val; + + if (cpu_pmu->secure_access) { + asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val)); + val |= ARMV7_SDER_SUNIDEN; + asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val)); + } + + /* The counter and interrupt enable registers are unknown at reset. */ + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv7_pmnc_disable_counter(idx); + armv7_pmnc_disable_intens(idx); + } + + /* Initialize & Reset PMNC: C and P bits */ + armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); +} + +static int armv7_a8_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv7_a8_perf_map, + &armv7_a8_perf_cache_map, 0xFF); +} + +static int armv7_a9_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv7_a9_perf_map, + &armv7_a9_perf_cache_map, 0xFF); +} + +static int armv7_a5_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv7_a5_perf_map, + &armv7_a5_perf_cache_map, 0xFF); +} + +static int armv7_a15_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv7_a15_perf_map, + &armv7_a15_perf_cache_map, 0xFF); +} + +static int armv7_a7_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv7_a7_perf_map, + &armv7_a7_perf_cache_map, 0xFF); +} + +static int armv7_a12_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv7_a12_perf_map, + &armv7_a12_perf_cache_map, 0xFF); +} + +static int krait_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &krait_perf_map, + &krait_perf_cache_map, 0xFFFFF); +} + +static int krait_map_event_no_branch(struct perf_event *event) +{ + return armpmu_map_event(event, &krait_perf_map_no_branch, + &krait_perf_cache_map, 0xFFFFF); +} + +static int scorpion_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &scorpion_perf_map, + &scorpion_perf_cache_map, 0xFFFFF); +} + +static void armv7pmu_init(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->handle_irq = armv7pmu_handle_irq; + cpu_pmu->enable = armv7pmu_enable_event; + cpu_pmu->disable = armv7pmu_disable_event; + cpu_pmu->read_counter = armv7pmu_read_counter; + cpu_pmu->write_counter = armv7pmu_write_counter; + cpu_pmu->get_event_idx = armv7pmu_get_event_idx; + cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx; + cpu_pmu->start = armv7pmu_start; + cpu_pmu->stop = armv7pmu_stop; + cpu_pmu->reset = armv7pmu_reset; +}; + +static void armv7_read_num_pmnc_events(void *info) +{ + int *nb_cnt = info; + + /* Read the nb of CNTx counters supported from PMNC */ + *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; + + /* Add the CPU cycles counter */ + *nb_cnt += 1; +} + +static int armv7_probe_num_events(struct arm_pmu *arm_pmu) +{ + return smp_call_function_any(&arm_pmu->supported_cpus, + armv7_read_num_pmnc_events, + &arm_pmu->num_events, 1); +} + +static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_cortex_a8"; + cpu_pmu->map_event = armv7_a8_map_event; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv1_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; + return armv7_probe_num_events(cpu_pmu); +} + +static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_cortex_a9"; + cpu_pmu->map_event = armv7_a9_map_event; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv1_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; + return armv7_probe_num_events(cpu_pmu); +} + +static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_cortex_a5"; + cpu_pmu->map_event = armv7_a5_map_event; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv1_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; + return armv7_probe_num_events(cpu_pmu); +} + +static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_cortex_a15"; + cpu_pmu->map_event = armv7_a15_map_event; + cpu_pmu->set_event_filter = armv7pmu_set_event_filter; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv2_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; + return armv7_probe_num_events(cpu_pmu); +} + +static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_cortex_a7"; + cpu_pmu->map_event = armv7_a7_map_event; + cpu_pmu->set_event_filter = armv7pmu_set_event_filter; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv2_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; + return armv7_probe_num_events(cpu_pmu); +} + +static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_cortex_a12"; + cpu_pmu->map_event = armv7_a12_map_event; + cpu_pmu->set_event_filter = armv7pmu_set_event_filter; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv2_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; + return armv7_probe_num_events(cpu_pmu); +} + +static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) +{ + int ret = armv7_a12_pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_cortex_a17"; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv2_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; + return ret; +} + +/* + * Krait Performance Monitor Region Event Selection Register (PMRESRn) + * + * 31 30 24 16 8 0 + * +--------------------------------+ + * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0 + * +--------------------------------+ + * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1 + * +--------------------------------+ + * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2 + * +--------------------------------+ + * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ? + * +--------------------------------+ + * EN | G=3 | G=2 | G=1 | G=0 + * + * Event Encoding: + * + * hwc->config_base = 0xNRCCG + * + * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR) + * R = region register + * CC = class of events the group G is choosing from + * G = group or particular event + * + * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2 + * + * A region (R) corresponds to a piece of the CPU (execution unit, instruction + * unit, etc.) while the event code (CC) corresponds to a particular class of + * events (interrupts for example). An event code is broken down into + * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for + * example). + */ + +#define KRAIT_EVENT (1 << 16) +#define VENUM_EVENT (2 << 16) +#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) +#define PMRESRn_EN BIT(31) + +#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */ +#define EVENT_GROUP(event) ((event) & 0xf) /* G */ +#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */ +#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */ +#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */ + +static u32 krait_read_pmresrn(int n) +{ + u32 val; + + switch (n) { + case 0: + asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val)); + break; + case 1: + asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val)); + break; + case 2: + asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val)); + break; + default: + BUG(); /* Should be validated in krait_pmu_get_event_idx() */ + } + + return val; +} + +static void krait_write_pmresrn(int n, u32 val) +{ + switch (n) { + case 0: + asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val)); + break; + case 1: + asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val)); + break; + case 2: + asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val)); + break; + default: + BUG(); /* Should be validated in krait_pmu_get_event_idx() */ + } +} + +static u32 venum_read_pmresr(void) +{ + u32 val; + asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); + return val; +} + +static void venum_write_pmresr(u32 val) +{ + asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); +} + +static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val) +{ + u32 venum_new_val; + u32 fp_new_val; + + BUG_ON(preemptible()); + /* CPACR Enable CP10 and CP11 access */ + *venum_orig_val = get_copro_access(); + venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11); + set_copro_access(venum_new_val); + + /* Enable FPEXC */ + *fp_orig_val = fmrx(FPEXC); + fp_new_val = *fp_orig_val | FPEXC_EN; + fmxr(FPEXC, fp_new_val); +} + +static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val) +{ + BUG_ON(preemptible()); + /* Restore FPEXC */ + fmxr(FPEXC, fp_orig_val); + isb(); + /* Restore CPACR */ + set_copro_access(venum_orig_val); +} + +static u32 krait_get_pmresrn_event(unsigned int region) +{ + static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0, + KRAIT_PMRESR1_GROUP0, + KRAIT_PMRESR2_GROUP0 }; + return pmresrn_table[region]; +} + +static void krait_evt_setup(int idx, u32 config_base) +{ + u32 val; + u32 mask; + u32 vval, fval; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + unsigned int code = EVENT_CODE(config_base); + unsigned int group_shift; + bool venum_event = EVENT_VENUM(config_base); + + group_shift = group * 8; + mask = 0xff << group_shift; + + /* Configure evtsel for the region and group */ + if (venum_event) + val = KRAIT_VPMRESR0_GROUP0; + else + val = krait_get_pmresrn_event(region); + val += group; + /* Mix in mode-exclusion bits */ + val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); + armv7_pmnc_write_evtsel(idx, val); + + if (venum_event) { + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val &= ~mask; + val |= code << group_shift; + val |= PMRESRn_EN; + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); + } else { + val = krait_read_pmresrn(region); + val &= ~mask; + val |= code << group_shift; + val |= PMRESRn_EN; + krait_write_pmresrn(region, val); + } +} + +static u32 clear_pmresrn_group(u32 val, int group) +{ + u32 mask; + int group_shift; + + group_shift = group * 8; + mask = 0xff << group_shift; + val &= ~mask; + + /* Don't clear enable bit if entire region isn't disabled */ + if (val & ~PMRESRn_EN) + return val |= PMRESRn_EN; + + return 0; +} + +static void krait_clearpmu(u32 config_base) +{ + u32 val; + u32 vval, fval; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + bool venum_event = EVENT_VENUM(config_base); + + if (venum_event) { + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val = clear_pmresrn_group(val, group); + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); + } else { + val = krait_read_pmresrn(region); + val = clear_pmresrn_group(val, group); + krait_write_pmresrn(region, val); + } +} + +static void krait_pmu_disable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + /* Disable counter and interrupt */ + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Clear pmresr code (if destined for PMNx counters) + */ + if (hwc->config_base & KRAIT_EVENT_MASK) + krait_clearpmu(hwc->config_base); + + /* Disable interrupt for this counter */ + armv7_pmnc_disable_intens(idx); +} + +static void krait_pmu_enable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We set the event for the cycle counter because we + * have the ability to perform event filtering. + */ + if (hwc->config_base & KRAIT_EVENT_MASK) + krait_evt_setup(idx, hwc->config_base); + else + armv7_pmnc_write_evtsel(idx, hwc->config_base); + + /* Enable interrupt for this counter */ + armv7_pmnc_enable_intens(idx); + + /* Enable counter */ + armv7_pmnc_enable_counter(idx); +} + +static void krait_pmu_reset(void *info) +{ + u32 vval, fval; + struct arm_pmu *cpu_pmu = info; + u32 idx, nb_cnt = cpu_pmu->num_events; + + armv7pmu_reset(info); + + /* Clear all pmresrs */ + krait_write_pmresrn(0, 0); + krait_write_pmresrn(1, 0); + krait_write_pmresrn(2, 0); + + venum_pre_pmresr(&vval, &fval); + venum_write_pmresr(0); + venum_post_pmresr(vval, fval); + + /* Reset PMxEVNCTCR to sane default */ + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv7_pmnc_select_counter(idx); + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + } + +} + +static int krait_event_to_bit(struct perf_event *event, unsigned int region, + unsigned int group) +{ + int bit; + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + + if (hwc->config_base & VENUM_EVENT) + bit = KRAIT_VPMRESR0_GROUP0; + else + bit = krait_get_pmresrn_event(region); + bit -= krait_get_pmresrn_event(0); + bit += group; + /* + * Lower bits are reserved for use by the counters (see + * armv7pmu_get_event_idx() for more info) + */ + bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; + + return bit; +} + +/* + * We check for column exclusion constraints here. + * Two events cant use the same group within a pmresr register. + */ +static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int idx; + int bit = -1; + struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int code = EVENT_CODE(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool krait_event = EVENT_CPU(hwc->config_base); + + if (venum_event || krait_event) { + /* Ignore invalid events */ + if (group > 3 || region > 2) + return -EINVAL; + if (venum_event && (code & 0xe0)) + return -EINVAL; + + bit = krait_event_to_bit(event, region, group); + if (test_and_set_bit(bit, cpuc->used_mask)) + return -EAGAIN; + } + + idx = armv7pmu_get_event_idx(cpuc, event); + if (idx < 0 && bit >= 0) + clear_bit(bit, cpuc->used_mask); + + return idx; +} + +static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int bit; + struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool krait_event = EVENT_CPU(hwc->config_base); + + armv7pmu_clear_event_idx(cpuc, event); + if (venum_event || krait_event) { + bit = krait_event_to_bit(event, region, group); + clear_bit(bit, cpuc->used_mask); + } +} + +static int krait_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_krait"; + /* Some early versions of Krait don't support PC write events */ + if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node, + "qcom,no-pc-write")) + cpu_pmu->map_event = krait_map_event_no_branch; + else + cpu_pmu->map_event = krait_map_event; + cpu_pmu->set_event_filter = armv7pmu_set_event_filter; + cpu_pmu->reset = krait_pmu_reset; + cpu_pmu->enable = krait_pmu_enable_event; + cpu_pmu->disable = krait_pmu_disable_event; + cpu_pmu->get_event_idx = krait_pmu_get_event_idx; + cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; + return armv7_probe_num_events(cpu_pmu); +} + +/* + * Scorpion Local Performance Monitor Register (LPMn) + * + * 31 30 24 16 8 0 + * +--------------------------------+ + * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0 + * +--------------------------------+ + * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1 + * +--------------------------------+ + * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2 + * +--------------------------------+ + * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3 + * +--------------------------------+ + * VLPM | EN | CC | CC | CC | CC | N = 2, R = ? + * +--------------------------------+ + * EN | G=3 | G=2 | G=1 | G=0 + * + * + * Event Encoding: + * + * hwc->config_base = 0xNRCCG + * + * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM) + * R = region register + * CC = class of events the group G is choosing from + * G = group or particular event + * + * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2 + * + * A region (R) corresponds to a piece of the CPU (execution unit, instruction + * unit, etc.) while the event code (CC) corresponds to a particular class of + * events (interrupts for example). An event code is broken down into + * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for + * example). + */ + +static u32 scorpion_read_pmresrn(int n) +{ + u32 val; + + switch (n) { + case 0: + asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val)); + break; + case 1: + asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); + break; + case 2: + asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val)); + break; + case 3: + asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val)); + break; + default: + BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ + } + + return val; +} + +static void scorpion_write_pmresrn(int n, u32 val) +{ + switch (n) { + case 0: + asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val)); + break; + case 1: + asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); + break; + case 2: + asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val)); + break; + case 3: + asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val)); + break; + default: + BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ + } +} + +static u32 scorpion_get_pmresrn_event(unsigned int region) +{ + static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0, + SCORPION_LPM1_GROUP0, + SCORPION_LPM2_GROUP0, + SCORPION_L2LPM_GROUP0 }; + return pmresrn_table[region]; +} + +static void scorpion_evt_setup(int idx, u32 config_base) +{ + u32 val; + u32 mask; + u32 vval, fval; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + unsigned int code = EVENT_CODE(config_base); + unsigned int group_shift; + bool venum_event = EVENT_VENUM(config_base); + + group_shift = group * 8; + mask = 0xff << group_shift; + + /* Configure evtsel for the region and group */ + if (venum_event) + val = SCORPION_VLPM_GROUP0; + else + val = scorpion_get_pmresrn_event(region); + val += group; + /* Mix in mode-exclusion bits */ + val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); + armv7_pmnc_write_evtsel(idx, val); + + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + + if (venum_event) { + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val &= ~mask; + val |= code << group_shift; + val |= PMRESRn_EN; + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); + } else { + val = scorpion_read_pmresrn(region); + val &= ~mask; + val |= code << group_shift; + val |= PMRESRn_EN; + scorpion_write_pmresrn(region, val); + } +} + +static void scorpion_clearpmu(u32 config_base) +{ + u32 val; + u32 vval, fval; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + bool venum_event = EVENT_VENUM(config_base); + + if (venum_event) { + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val = clear_pmresrn_group(val, group); + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); + } else { + val = scorpion_read_pmresrn(region); + val = clear_pmresrn_group(val, group); + scorpion_write_pmresrn(region, val); + } +} + +static void scorpion_pmu_disable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + /* Disable counter and interrupt */ + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Clear pmresr code (if destined for PMNx counters) + */ + if (hwc->config_base & KRAIT_EVENT_MASK) + scorpion_clearpmu(hwc->config_base); + + /* Disable interrupt for this counter */ + armv7_pmnc_disable_intens(idx); +} + +static void scorpion_pmu_enable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We don't set the event for the cycle counter because we + * don't have the ability to perform event filtering. + */ + if (hwc->config_base & KRAIT_EVENT_MASK) + scorpion_evt_setup(idx, hwc->config_base); + else if (idx != ARMV7_IDX_CYCLE_COUNTER) + armv7_pmnc_write_evtsel(idx, hwc->config_base); + + /* Enable interrupt for this counter */ + armv7_pmnc_enable_intens(idx); + + /* Enable counter */ + armv7_pmnc_enable_counter(idx); +} + +static void scorpion_pmu_reset(void *info) +{ + u32 vval, fval; + struct arm_pmu *cpu_pmu = info; + u32 idx, nb_cnt = cpu_pmu->num_events; + + armv7pmu_reset(info); + + /* Clear all pmresrs */ + scorpion_write_pmresrn(0, 0); + scorpion_write_pmresrn(1, 0); + scorpion_write_pmresrn(2, 0); + scorpion_write_pmresrn(3, 0); + + venum_pre_pmresr(&vval, &fval); + venum_write_pmresr(0); + venum_post_pmresr(vval, fval); + + /* Reset PMxEVNCTCR to sane default */ + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv7_pmnc_select_counter(idx); + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + } +} + +static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, + unsigned int group) +{ + int bit; + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + + if (hwc->config_base & VENUM_EVENT) + bit = SCORPION_VLPM_GROUP0; + else + bit = scorpion_get_pmresrn_event(region); + bit -= scorpion_get_pmresrn_event(0); + bit += group; + /* + * Lower bits are reserved for use by the counters (see + * armv7pmu_get_event_idx() for more info) + */ + bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; + + return bit; +} + +/* + * We check for column exclusion constraints here. + * Two events cant use the same group within a pmresr register. + */ +static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int idx; + int bit = -1; + struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool scorpion_event = EVENT_CPU(hwc->config_base); + + if (venum_event || scorpion_event) { + /* Ignore invalid events */ + if (group > 3 || region > 3) + return -EINVAL; + + bit = scorpion_event_to_bit(event, region, group); + if (test_and_set_bit(bit, cpuc->used_mask)) + return -EAGAIN; + } + + idx = armv7pmu_get_event_idx(cpuc, event); + if (idx < 0 && bit >= 0) + clear_bit(bit, cpuc->used_mask); + + return idx; +} + +static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int bit; + struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool scorpion_event = EVENT_CPU(hwc->config_base); + + armv7pmu_clear_event_idx(cpuc, event); + if (venum_event || scorpion_event) { + bit = scorpion_event_to_bit(event, region, group); + clear_bit(bit, cpuc->used_mask); + } +} + +static int scorpion_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_scorpion"; + cpu_pmu->map_event = scorpion_map_event; + cpu_pmu->reset = scorpion_pmu_reset; + cpu_pmu->enable = scorpion_pmu_enable_event; + cpu_pmu->disable = scorpion_pmu_disable_event; + cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; + cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; + return armv7_probe_num_events(cpu_pmu); +} + +static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_scorpion_mp"; + cpu_pmu->map_event = scorpion_map_event; + cpu_pmu->reset = scorpion_pmu_reset; + cpu_pmu->enable = scorpion_pmu_enable_event; + cpu_pmu->disable = scorpion_pmu_disable_event; + cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; + cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; + return armv7_probe_num_events(cpu_pmu); +} + +static const struct of_device_id armv7_pmu_of_device_ids[] = { + {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, + {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, + {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, + {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, + {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, + {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, + {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init}, + {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, + {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init}, + {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init}, + {}, +}; + +static int armv7_pmu_device_probe(struct platform_device *pdev) +{ + return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids, NULL); +} + +static struct platform_driver armv7_pmu_driver = { + .driver = { + .name = "armv7-pmu", + .of_match_table = armv7_pmu_of_device_ids, + .suppress_bind_attrs = true, + }, + .probe = armv7_pmu_device_probe, +}; + +builtin_platform_driver(armv7_pmu_driver); diff --git a/drivers/perf/arm_xscale_pmu.c b/drivers/perf/arm_xscale_pmu.c new file mode 100644 index 000000000000..3d8b72d6b37f --- /dev/null +++ b/drivers/perf/arm_xscale_pmu.c @@ -0,0 +1,745 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARMv5 [xscale] Performance counter handling code. + * + * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com> + * + * Based on the previous xscale OProfile code. + * + * There are two variants of the xscale PMU that we support: + * - xscale1pmu: 2 event counters and a cycle counter + * - xscale2pmu: 4 event counters and a cycle counter + * The two variants share event definitions, but have different + * PMU structures. + */ + +#include <asm/cputype.h> +#include <asm/irq_regs.h> + +#include <linux/of.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> + +enum xscale_perf_types { + XSCALE_PERFCTR_ICACHE_MISS = 0x00, + XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, + XSCALE_PERFCTR_DATA_STALL = 0x02, + XSCALE_PERFCTR_ITLB_MISS = 0x03, + XSCALE_PERFCTR_DTLB_MISS = 0x04, + XSCALE_PERFCTR_BRANCH = 0x05, + XSCALE_PERFCTR_BRANCH_MISS = 0x06, + XSCALE_PERFCTR_INSTRUCTION = 0x07, + XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, + XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, + XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, + XSCALE_PERFCTR_DCACHE_MISS = 0x0B, + XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, + XSCALE_PERFCTR_PC_CHANGED = 0x0D, + XSCALE_PERFCTR_BCU_REQUEST = 0x10, + XSCALE_PERFCTR_BCU_FULL = 0x11, + XSCALE_PERFCTR_BCU_DRAIN = 0x12, + XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, + XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, + XSCALE_PERFCTR_RMW = 0x16, + /* XSCALE_PERFCTR_CCNT is not hardware defined */ + XSCALE_PERFCTR_CCNT = 0xFE, + XSCALE_PERFCTR_UNUSED = 0xFF, +}; + +enum xscale_counters { + XSCALE_CYCLE_COUNTER = 0, + XSCALE_COUNTER0, + XSCALE_COUNTER1, + XSCALE_COUNTER2, + XSCALE_COUNTER3, +}; + +static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, + [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, + [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER, +}; + +static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, + + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, +}; + +#define XSCALE_PMU_ENABLE 0x001 +#define XSCALE_PMN_RESET 0x002 +#define XSCALE_CCNT_RESET 0x004 +#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) +#define XSCALE_PMU_CNT64 0x008 + +#define XSCALE1_OVERFLOWED_MASK 0x700 +#define XSCALE1_CCOUNT_OVERFLOW 0x400 +#define XSCALE1_COUNT0_OVERFLOW 0x100 +#define XSCALE1_COUNT1_OVERFLOW 0x200 +#define XSCALE1_CCOUNT_INT_EN 0x040 +#define XSCALE1_COUNT0_INT_EN 0x010 +#define XSCALE1_COUNT1_INT_EN 0x020 +#define XSCALE1_COUNT0_EVT_SHFT 12 +#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) +#define XSCALE1_COUNT1_EVT_SHFT 20 +#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) + +static inline u32 +xscale1pmu_read_pmnc(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); + return val; +} + +static inline void +xscale1pmu_write_pmnc(u32 val) +{ + /* upper 4bits and 7, 11 are write-as-0 */ + val &= 0xffff77f; + asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); +} + +static inline int +xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, + enum xscale_counters counter) +{ + int ret = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; + break; + case XSCALE_COUNTER0: + ret = pmnc & XSCALE1_COUNT0_OVERFLOW; + break; + case XSCALE_COUNTER1: + ret = pmnc & XSCALE1_COUNT1_OVERFLOW; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + } + + return ret; +} + +static irqreturn_t +xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) +{ + unsigned long pmnc; + struct perf_sample_data data; + struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); + struct pt_regs *regs; + int idx; + + /* + * NOTE: there's an A stepping erratum that states if an overflow + * bit already exists and another occurs, the previous + * Overflow bit gets cleared. There's no workaround. + * Fixed in B stepping or later. + */ + pmnc = xscale1pmu_read_pmnc(); + + /* + * Write the value back to clear the overflow flags. Overflow + * flags remain in pmnc for use below. We also disable the PMU + * while we process the interrupt. + */ + xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); + + if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) + return IRQ_NONE; + + regs = get_irq_regs(); + + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!event) + continue; + + if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event); + perf_sample_data_init(&data, 0, hwc->last_period); + if (!armpmu_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + cpu_pmu->disable(event); + } + + irq_work_run(); + + /* + * Re-enable the PMU. + */ + pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(pmnc); + + return IRQ_HANDLED; +} + +static void xscale1pmu_enable_event(struct perf_event *event) +{ + unsigned long val, mask, evt; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + mask = 0; + evt = XSCALE1_CCOUNT_INT_EN; + break; + case XSCALE_COUNTER0: + mask = XSCALE1_COUNT0_EVT_MASK; + evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | + XSCALE1_COUNT0_INT_EN; + break; + case XSCALE_COUNTER1: + mask = XSCALE1_COUNT1_EVT_MASK; + evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | + XSCALE1_COUNT1_INT_EN; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + val = xscale1pmu_read_pmnc(); + val &= ~mask; + val |= evt; + xscale1pmu_write_pmnc(val); +} + +static void xscale1pmu_disable_event(struct perf_event *event) +{ + unsigned long val, mask, evt; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + mask = XSCALE1_CCOUNT_INT_EN; + evt = 0; + break; + case XSCALE_COUNTER0: + mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; + evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; + break; + case XSCALE_COUNTER1: + mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; + evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + val = xscale1pmu_read_pmnc(); + val &= ~mask; + val |= evt; + xscale1pmu_write_pmnc(val); +} + +static int +xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + if (XSCALE_PERFCTR_CCNT == hwc->config_base) { + if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return XSCALE_CYCLE_COUNTER; + } else { + if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) + return XSCALE_COUNTER1; + + if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) + return XSCALE_COUNTER0; + + return -EAGAIN; + } +} + +static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + clear_bit(event->hw.idx, cpuc->used_mask); +} + +static void xscale1pmu_start(struct arm_pmu *cpu_pmu) +{ + unsigned long val; + + val = xscale1pmu_read_pmnc(); + val |= XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(val); +} + +static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) +{ + unsigned long val; + + val = xscale1pmu_read_pmnc(); + val &= ~XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(val); +} + +static inline u64 xscale1pmu_read_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + u32 val = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); + break; + } + + return val; +} + +static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val) +{ + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); + break; + } +} + +static int xscale_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &xscale_perf_map, + &xscale_perf_cache_map, 0xFF); +} + +static int xscale1pmu_init(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->name = "armv5_xscale1"; + cpu_pmu->handle_irq = xscale1pmu_handle_irq; + cpu_pmu->enable = xscale1pmu_enable_event; + cpu_pmu->disable = xscale1pmu_disable_event; + cpu_pmu->read_counter = xscale1pmu_read_counter; + cpu_pmu->write_counter = xscale1pmu_write_counter; + cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; + cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx; + cpu_pmu->start = xscale1pmu_start; + cpu_pmu->stop = xscale1pmu_stop; + cpu_pmu->map_event = xscale_map_event; + cpu_pmu->num_events = 3; + + return 0; +} + +#define XSCALE2_OVERFLOWED_MASK 0x01f +#define XSCALE2_CCOUNT_OVERFLOW 0x001 +#define XSCALE2_COUNT0_OVERFLOW 0x002 +#define XSCALE2_COUNT1_OVERFLOW 0x004 +#define XSCALE2_COUNT2_OVERFLOW 0x008 +#define XSCALE2_COUNT3_OVERFLOW 0x010 +#define XSCALE2_CCOUNT_INT_EN 0x001 +#define XSCALE2_COUNT0_INT_EN 0x002 +#define XSCALE2_COUNT1_INT_EN 0x004 +#define XSCALE2_COUNT2_INT_EN 0x008 +#define XSCALE2_COUNT3_INT_EN 0x010 +#define XSCALE2_COUNT0_EVT_SHFT 0 +#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) +#define XSCALE2_COUNT1_EVT_SHFT 8 +#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) +#define XSCALE2_COUNT2_EVT_SHFT 16 +#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) +#define XSCALE2_COUNT3_EVT_SHFT 24 +#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) + +static inline u32 +xscale2pmu_read_pmnc(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); + /* bits 1-2 and 4-23 are read-unpredictable */ + return val & 0xff000009; +} + +static inline void +xscale2pmu_write_pmnc(u32 val) +{ + /* bits 4-23 are write-as-0, 24-31 are write ignored */ + val &= 0xf; + asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); +} + +static inline u32 +xscale2pmu_read_overflow_flags(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); + return val; +} + +static inline void +xscale2pmu_write_overflow_flags(u32 val) +{ + asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); +} + +static inline u32 +xscale2pmu_read_event_select(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); + return val; +} + +static inline void +xscale2pmu_write_event_select(u32 val) +{ + asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); +} + +static inline u32 +xscale2pmu_read_int_enable(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); + return val; +} + +static void +xscale2pmu_write_int_enable(u32 val) +{ + asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); +} + +static inline int +xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, + enum xscale_counters counter) +{ + int ret = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; + break; + case XSCALE_COUNTER0: + ret = of_flags & XSCALE2_COUNT0_OVERFLOW; + break; + case XSCALE_COUNTER1: + ret = of_flags & XSCALE2_COUNT1_OVERFLOW; + break; + case XSCALE_COUNTER2: + ret = of_flags & XSCALE2_COUNT2_OVERFLOW; + break; + case XSCALE_COUNTER3: + ret = of_flags & XSCALE2_COUNT3_OVERFLOW; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + } + + return ret; +} + +static irqreturn_t +xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) +{ + unsigned long pmnc, of_flags; + struct perf_sample_data data; + struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); + struct pt_regs *regs; + int idx; + + /* Disable the PMU. */ + pmnc = xscale2pmu_read_pmnc(); + xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); + + /* Check the overflow flag register. */ + of_flags = xscale2pmu_read_overflow_flags(); + if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) + return IRQ_NONE; + + /* Clear the overflow bits. */ + xscale2pmu_write_overflow_flags(of_flags); + + regs = get_irq_regs(); + + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!event) + continue; + + if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event); + perf_sample_data_init(&data, 0, hwc->last_period); + if (!armpmu_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + cpu_pmu->disable(event); + } + + irq_work_run(); + + /* + * Re-enable the PMU. + */ + pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(pmnc); + + return IRQ_HANDLED; +} + +static void xscale2pmu_enable_event(struct perf_event *event) +{ + unsigned long ien, evtsel; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + ien = xscale2pmu_read_int_enable(); + evtsel = xscale2pmu_read_event_select(); + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + ien |= XSCALE2_CCOUNT_INT_EN; + break; + case XSCALE_COUNTER0: + ien |= XSCALE2_COUNT0_INT_EN; + evtsel &= ~XSCALE2_COUNT0_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; + break; + case XSCALE_COUNTER1: + ien |= XSCALE2_COUNT1_INT_EN; + evtsel &= ~XSCALE2_COUNT1_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; + break; + case XSCALE_COUNTER2: + ien |= XSCALE2_COUNT2_INT_EN; + evtsel &= ~XSCALE2_COUNT2_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; + break; + case XSCALE_COUNTER3: + ien |= XSCALE2_COUNT3_INT_EN; + evtsel &= ~XSCALE2_COUNT3_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + xscale2pmu_write_event_select(evtsel); + xscale2pmu_write_int_enable(ien); +} + +static void xscale2pmu_disable_event(struct perf_event *event) +{ + unsigned long ien, evtsel, of_flags; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + ien = xscale2pmu_read_int_enable(); + evtsel = xscale2pmu_read_event_select(); + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + ien &= ~XSCALE2_CCOUNT_INT_EN; + of_flags = XSCALE2_CCOUNT_OVERFLOW; + break; + case XSCALE_COUNTER0: + ien &= ~XSCALE2_COUNT0_INT_EN; + evtsel &= ~XSCALE2_COUNT0_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; + of_flags = XSCALE2_COUNT0_OVERFLOW; + break; + case XSCALE_COUNTER1: + ien &= ~XSCALE2_COUNT1_INT_EN; + evtsel &= ~XSCALE2_COUNT1_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; + of_flags = XSCALE2_COUNT1_OVERFLOW; + break; + case XSCALE_COUNTER2: + ien &= ~XSCALE2_COUNT2_INT_EN; + evtsel &= ~XSCALE2_COUNT2_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; + of_flags = XSCALE2_COUNT2_OVERFLOW; + break; + case XSCALE_COUNTER3: + ien &= ~XSCALE2_COUNT3_INT_EN; + evtsel &= ~XSCALE2_COUNT3_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; + of_flags = XSCALE2_COUNT3_OVERFLOW; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + xscale2pmu_write_event_select(evtsel); + xscale2pmu_write_int_enable(ien); + xscale2pmu_write_overflow_flags(of_flags); +} + +static int +xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int idx = xscale1pmu_get_event_idx(cpuc, event); + if (idx >= 0) + goto out; + + if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) + idx = XSCALE_COUNTER3; + else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) + idx = XSCALE_COUNTER2; +out: + return idx; +} + +static void xscale2pmu_start(struct arm_pmu *cpu_pmu) +{ + unsigned long val; + + val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; + val |= XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(val); +} + +static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) +{ + unsigned long val; + + val = xscale2pmu_read_pmnc(); + val &= ~XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(val); +} + +static inline u64 xscale2pmu_read_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + u32 val = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER2: + asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER3: + asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); + break; + } + + return val; +} + +static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val) +{ + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER2: + asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER3: + asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); + break; + } +} + +static int xscale2pmu_init(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->name = "armv5_xscale2"; + cpu_pmu->handle_irq = xscale2pmu_handle_irq; + cpu_pmu->enable = xscale2pmu_enable_event; + cpu_pmu->disable = xscale2pmu_disable_event; + cpu_pmu->read_counter = xscale2pmu_read_counter; + cpu_pmu->write_counter = xscale2pmu_write_counter; + cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; + cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx; + cpu_pmu->start = xscale2pmu_start; + cpu_pmu->stop = xscale2pmu_stop; + cpu_pmu->map_event = xscale_map_event; + cpu_pmu->num_events = 5; + + return 0; +} + +static const struct pmu_probe_info xscale_pmu_probe_table[] = { + XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init), + XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init), + { /* sentinel value */ } +}; + +static int xscale_pmu_device_probe(struct platform_device *pdev) +{ + return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table); +} + +static struct platform_driver xscale_pmu_driver = { + .driver = { + .name = "xscale-pmu", + }, + .probe = xscale_pmu_device_probe, +}; + +builtin_platform_driver(xscale_pmu_driver); diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c index 1f93a66eff5b..43d68b69e630 100644 --- a/drivers/perf/cxl_pmu.c +++ b/drivers/perf/cxl_pmu.c @@ -972,6 +972,7 @@ static __exit void cxl_pmu_exit(void) cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num); } +MODULE_DESCRIPTION("CXL Performance Monitor Driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(CXL); module_init(cxl_pmu_init); diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 1bbdb29743c4..746b92330ca7 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -850,4 +850,5 @@ static struct platform_driver imx_ddr_pmu_driver = { }; module_platform_driver(imx_ddr_pmu_driver); +MODULE_DESCRIPTION("Freescale i.MX8 DDR Performance Monitor Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c index 72c2d3074cde..69f920b1caf2 100644 --- a/drivers/perf/fsl_imx9_ddr_perf.c +++ b/drivers/perf/fsl_imx9_ddr_perf.c @@ -11,14 +11,24 @@ #include <linux/perf_event.h> /* Performance monitor configuration */ -#define PMCFG1 0x00 -#define PMCFG1_RD_TRANS_FILT_EN BIT(31) -#define PMCFG1_WR_TRANS_FILT_EN BIT(30) -#define PMCFG1_RD_BT_FILT_EN BIT(29) -#define PMCFG1_ID_MASK GENMASK(17, 0) +#define PMCFG1 0x00 +#define MX93_PMCFG1_RD_TRANS_FILT_EN BIT(31) +#define MX93_PMCFG1_WR_TRANS_FILT_EN BIT(30) +#define MX93_PMCFG1_RD_BT_FILT_EN BIT(29) +#define MX93_PMCFG1_ID_MASK GENMASK(17, 0) -#define PMCFG2 0x04 -#define PMCFG2_ID GENMASK(17, 0) +#define MX95_PMCFG1_WR_BEAT_FILT_EN BIT(31) +#define MX95_PMCFG1_RD_BEAT_FILT_EN BIT(30) + +#define PMCFG2 0x04 +#define MX93_PMCFG2_ID GENMASK(17, 0) + +#define PMCFG3 0x08 +#define PMCFG4 0x0C +#define PMCFG5 0x10 +#define PMCFG6 0x14 +#define MX95_PMCFG_ID_MASK GENMASK(9, 0) +#define MX95_PMCFG_ID GENMASK(25, 16) /* Global control register affects all counters and takes priority over local control registers */ #define PMGC0 0x40 @@ -41,6 +51,10 @@ #define NUM_COUNTERS 11 #define CYCLES_COUNTER 0 +#define CYCLES_EVENT_ID 0 + +#define CONFIG_EVENT_MASK GENMASK(7, 0) +#define CONFIG_COUNTER_MASK GENMASK(23, 16) #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) @@ -71,8 +85,23 @@ static const struct imx_ddr_devtype_data imx93_devtype_data = { .identifier = "imx93", }; +static const struct imx_ddr_devtype_data imx95_devtype_data = { + .identifier = "imx95", +}; + +static inline bool is_imx93(struct ddr_pmu *pmu) +{ + return pmu->devtype_data == &imx93_devtype_data; +} + +static inline bool is_imx95(struct ddr_pmu *pmu) +{ + return pmu->devtype_data == &imx95_devtype_data; +} + static const struct of_device_id imx_ddr_pmu_dt_ids[] = { - {.compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data}, + { .compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data }, + { .compatible = "fsl,imx95-ddr-pmu", .data = &imx95_devtype_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); @@ -118,21 +147,40 @@ static const struct attribute_group ddr_perf_cpumask_attr_group = { .attrs = ddr_perf_cpumask_attrs, }; +struct imx9_pmu_events_attr { + struct device_attribute attr; + u64 id; + const void *devtype_data; +}; + static ssize_t ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, char *page) { - struct perf_pmu_events_attr *pmu_attr; + struct imx9_pmu_events_attr *pmu_attr; - pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + pmu_attr = container_of(attr, struct imx9_pmu_events_attr, attr); return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); } -#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \ - (&((struct perf_pmu_events_attr[]) { \ +#define COUNTER_OFFSET_IN_EVENT 8 +#define ID(counter, id) ((counter << COUNTER_OFFSET_IN_EVENT) | id) + +#define DDR_PMU_EVENT_ATTR_COMM(_name, _id, _data) \ + (&((struct imx9_pmu_events_attr[]) { \ { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\ - .id = _id, } \ + .id = _id, \ + .devtype_data = _data, } \ })[0].attr.attr) +#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \ + DDR_PMU_EVENT_ATTR_COMM(_name, _id, NULL) + +#define IMX93_DDR_PMU_EVENT_ATTR(_name, _id) \ + DDR_PMU_EVENT_ATTR_COMM(_name, _id, &imx93_devtype_data) + +#define IMX95_DDR_PMU_EVENT_ATTR(_name, _id) \ + DDR_PMU_EVENT_ATTR_COMM(_name, _id, &imx95_devtype_data) + static struct attribute *ddr_perf_events_attrs[] = { /* counter0 cycles event */ IMX9_DDR_PMU_EVENT_ATTR(cycles, 0), @@ -159,90 +207,114 @@ static struct attribute *ddr_perf_events_attrs[] = { IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63), /* counter1 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, ID(1, 64)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, ID(1, 65)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, ID(1, 66)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, ID(1, 67)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, ID(1, 68)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, ID(1, 69)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, ID(1, 70)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, ID(1, 71)), /* counter2 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, ID(2, 64)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, ID(2, 65)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, ID(2, 66)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, ID(2, 67)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, ID(2, 68)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, ID(2, 69)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, ID(2, 70)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, ID(2, 71)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, ID(2, 72)), + IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, ID(2, 73)), /* imx93 specific*/ + IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_beat_filt, ID(2, 73)), /* imx95 specific*/ /* counter3 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, ID(3, 64)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, ID(3, 65)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, ID(3, 66)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, ID(3, 67)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, ID(3, 68)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, ID(3, 69)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, ID(3, 70)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, ID(3, 71)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, ID(3, 72)), + IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, ID(3, 73)), /* imx93 specific*/ + IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt2, ID(3, 73)), /* imx95 specific*/ /* counter4 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, ID(4, 64)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, ID(4, 65)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, ID(4, 66)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, ID(4, 67)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, ID(4, 68)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, ID(4, 69)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, ID(4, 70)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, ID(4, 71)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, ID(4, 72)), + IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, ID(4, 73)), /* imx93 specific*/ + IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt1, ID(4, 73)), /* imx95 specific*/ /* counter5 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70), - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, ID(5, 64)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, ID(5, 65)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, ID(5, 66)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, ID(5, 67)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, ID(5, 68)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, ID(5, 69)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, ID(5, 70)), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, ID(5, 71)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, ID(5, 72)), + IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt0, ID(5, 73)), /* imx95 specific*/ /* counter6 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72), + IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, ID(6, 64)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, ID(6, 72)), /* counter7 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, ID(7, 64)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, ID(7, 65)), /* counter8 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, ID(8, 64)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, ID(8, 65)), /* counter9 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, ID(9, 65)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, ID(9, 66)), /* counter10 specific events */ - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65), - IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, ID(10, 65)), + IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, ID(10, 66)), NULL, }; +static umode_t +ddr_perf_events_attrs_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj)); + struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); + struct imx9_pmu_events_attr *eattr; + + eattr = container_of(attr, typeof(*eattr), attr.attr); + + if (!eattr->devtype_data) + return attr->mode; + + if (eattr->devtype_data != ddr_pmu->devtype_data) + return 0; + + return attr->mode; +} + static const struct attribute_group ddr_perf_events_attr_group = { .name = "events", .attrs = ddr_perf_events_attrs, + .is_visible = ddr_perf_events_attrs_is_visible, }; -PMU_FORMAT_ATTR(event, "config:0-7"); +PMU_FORMAT_ATTR(event, "config:0-7,16-23"); PMU_FORMAT_ATTR(counter, "config:8-15"); PMU_FORMAT_ATTR(axi_id, "config1:0-17"); PMU_FORMAT_ATTR(axi_mask, "config2:0-17"); @@ -339,8 +411,10 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config, int counter, bool enable) { u32 ctrl_a; + int event; ctrl_a = readl_relaxed(pmu->base + PMLCA(counter)); + event = FIELD_GET(CONFIG_EVENT_MASK, config); if (enable) { ctrl_a |= PMLCA_FC; @@ -352,7 +426,7 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config, ctrl_a &= ~PMLCA_FC; ctrl_a |= PMLCA_CE; ctrl_a &= ~FIELD_PREP(PMLCA_EVENT, 0x7F); - ctrl_a |= FIELD_PREP(PMLCA_EVENT, (config & 0x000000FF)); + ctrl_a |= FIELD_PREP(PMLCA_EVENT, event); writel(ctrl_a, pmu->base + PMLCA(counter)); } else { /* Freeze counter. */ @@ -361,39 +435,79 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config, } } -static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2) +static void imx93_ddr_perf_monitor_config(struct ddr_pmu *pmu, int event, + int counter, int axi_id, int axi_mask) { u32 pmcfg1, pmcfg2; - int event, counter; - - event = cfg & 0x000000FF; - counter = (cfg & 0x0000FF00) >> 8; + u32 mask[] = { MX93_PMCFG1_RD_TRANS_FILT_EN, + MX93_PMCFG1_WR_TRANS_FILT_EN, + MX93_PMCFG1_RD_BT_FILT_EN }; pmcfg1 = readl_relaxed(pmu->base + PMCFG1); - if (counter == 2 && event == 73) - pmcfg1 |= PMCFG1_RD_TRANS_FILT_EN; - else if (counter == 2 && event != 73) - pmcfg1 &= ~PMCFG1_RD_TRANS_FILT_EN; + if (counter >= 2 && counter <= 4) + pmcfg1 = event == 73 ? pmcfg1 | mask[counter - 2] : + pmcfg1 & ~mask[counter - 2]; - if (counter == 3 && event == 73) - pmcfg1 |= PMCFG1_WR_TRANS_FILT_EN; - else if (counter == 3 && event != 73) - pmcfg1 &= ~PMCFG1_WR_TRANS_FILT_EN; + pmcfg1 &= ~FIELD_PREP(MX93_PMCFG1_ID_MASK, 0x3FFFF); + pmcfg1 |= FIELD_PREP(MX93_PMCFG1_ID_MASK, axi_mask); + writel_relaxed(pmcfg1, pmu->base + PMCFG1); - if (counter == 4 && event == 73) - pmcfg1 |= PMCFG1_RD_BT_FILT_EN; - else if (counter == 4 && event != 73) - pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN; + pmcfg2 = readl_relaxed(pmu->base + PMCFG2); + pmcfg2 &= ~FIELD_PREP(MX93_PMCFG2_ID, 0x3FFFF); + pmcfg2 |= FIELD_PREP(MX93_PMCFG2_ID, axi_id); + writel_relaxed(pmcfg2, pmu->base + PMCFG2); +} - pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF); - pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2); - writel(pmcfg1, pmu->base + PMCFG1); +static void imx95_ddr_perf_monitor_config(struct ddr_pmu *pmu, int event, + int counter, int axi_id, int axi_mask) +{ + u32 pmcfg1, pmcfg, offset = 0; - pmcfg2 = readl_relaxed(pmu->base + PMCFG2); - pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF); - pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1); - writel(pmcfg2, pmu->base + PMCFG2); + pmcfg1 = readl_relaxed(pmu->base + PMCFG1); + + if (event == 73) { + switch (counter) { + case 2: + pmcfg1 |= MX95_PMCFG1_WR_BEAT_FILT_EN; + offset = PMCFG3; + break; + case 3: + pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN; + offset = PMCFG4; + break; + case 4: + pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN; + offset = PMCFG5; + break; + case 5: + pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN; + offset = PMCFG6; + break; + } + } else { + switch (counter) { + case 2: + pmcfg1 &= ~MX95_PMCFG1_WR_BEAT_FILT_EN; + break; + case 3: + case 4: + case 5: + pmcfg1 &= ~MX95_PMCFG1_RD_BEAT_FILT_EN; + break; + } + } + + writel_relaxed(pmcfg1, pmu->base + PMCFG1); + + if (offset) { + pmcfg = readl_relaxed(pmu->base + offset); + pmcfg &= ~(FIELD_PREP(MX95_PMCFG_ID_MASK, 0x3FF) | + FIELD_PREP(MX95_PMCFG_ID, 0x3FF)); + pmcfg |= (FIELD_PREP(MX95_PMCFG_ID_MASK, axi_mask) | + FIELD_PREP(MX95_PMCFG_ID, axi_id)); + writel_relaxed(pmcfg, pmu->base + offset); + } } static void ddr_perf_event_update(struct perf_event *event) @@ -460,6 +574,28 @@ static void ddr_perf_event_start(struct perf_event *event, int flags) hwc->state = 0; } +static int ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event, int counter) +{ + int i; + + if (event == CYCLES_EVENT_ID) { + // Cycles counter is dedicated for cycle event. + if (pmu->events[CYCLES_COUNTER] == NULL) + return CYCLES_COUNTER; + } else if (counter != 0) { + // Counter specific event use specific counter. + if (pmu->events[counter] == NULL) + return counter; + } else { + // Auto allocate counter for referene event. + for (i = 1; i < NUM_COUNTERS; i++) + if (pmu->events[i] == NULL) + return i; + } + + return -ENOENT; +} + static int ddr_perf_event_add(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); @@ -467,21 +603,33 @@ static int ddr_perf_event_add(struct perf_event *event, int flags) int cfg = event->attr.config; int cfg1 = event->attr.config1; int cfg2 = event->attr.config2; - int counter; + int event_id, counter; - counter = (cfg & 0x0000FF00) >> 8; + event_id = FIELD_GET(CONFIG_EVENT_MASK, cfg); + counter = FIELD_GET(CONFIG_COUNTER_MASK, cfg); + + counter = ddr_perf_alloc_counter(pmu, event_id, counter); + if (counter < 0) { + dev_dbg(pmu->dev, "There are not enough counters\n"); + return -EOPNOTSUPP; + } pmu->events[counter] = event; pmu->active_events++; hwc->idx = counter; hwc->state |= PERF_HES_STOPPED; + if (is_imx93(pmu)) + /* read trans, write trans, read beat */ + imx93_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2); + + if (is_imx95(pmu)) + /* write beat, read beat2, read beat1, read beat */ + imx95_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2); + if (flags & PERF_EF_START) ddr_perf_event_start(event, flags); - /* read trans, write trans, read beat */ - ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2); - return 0; } @@ -501,9 +649,11 @@ static void ddr_perf_event_del(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; ddr_perf_event_stop(event, PERF_EF_UPDATE); + pmu->events[counter] = NULL; pmu->active_events--; hwc->idx = -1; } diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 6392cbedcd06..918cdc31de57 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -537,4 +537,5 @@ void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module) } EXPORT_SYMBOL_GPL(hisi_pmu_init); +MODULE_DESCRIPTION("HiSilicon SoC uncore Performance Monitor driver framework"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c index e2abca188dbe..94f1ebcd2a27 100644 --- a/drivers/perf/marvell_cn10k_ddr_pmu.c +++ b/drivers/perf/marvell_cn10k_ddr_pmu.c @@ -763,4 +763,5 @@ module_init(cn10k_ddr_pmu_init); module_exit(cn10k_ddr_pmu_exit); MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>"); +MODULE_DESCRIPTION("Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index 78c490e0505a..0a02e85a8951 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -167,7 +167,7 @@ u64 riscv_pmu_event_update(struct perf_event *event) unsigned long cmask; u64 oldval, delta; - if (!rvpmu->ctr_read) + if (!rvpmu->ctr_read || (hwc->state & PERF_HES_UPTODATE)) return 0; cmask = riscv_pmu_ctr_get_width_mask(event); diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index a2e4005e1fd0..4e842dcedfba 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -20,6 +20,7 @@ #include <linux/cpu_pm.h> #include <linux/sched/clock.h> #include <linux/soc/andes/irq.h> +#include <linux/workqueue.h> #include <asm/errata_list.h> #include <asm/sbi.h> @@ -114,7 +115,7 @@ struct sbi_pmu_event_data { }; }; -static const struct sbi_pmu_event_data pmu_hw_event_map[] = { +static struct sbi_pmu_event_data pmu_hw_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = { SBI_PMU_HW_CPU_CYCLES, SBI_PMU_EVENT_TYPE_HW, 0}}, @@ -148,7 +149,7 @@ static const struct sbi_pmu_event_data pmu_hw_event_map[] = { }; #define C(x) PERF_COUNT_HW_CACHE_##x -static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] +static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [C(L1D)] = { @@ -293,6 +294,34 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M }, }; +static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, + 0, cmask, 0, edata->event_idx, 0, 0); + if (!ret.error) { + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, + ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); + } else if (ret.error == SBI_ERR_NOT_SUPPORTED) { + /* This event cannot be monitored by any counter */ + edata->event_idx = -EINVAL; + } +} + +static void pmu_sbi_check_std_events(struct work_struct *work) +{ + for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++) + pmu_sbi_check_event(&pmu_hw_event_map[i]); + + for (int i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) + for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) + for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) + pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]); +} + +static DECLARE_WORK(check_std_events_work, pmu_sbi_check_std_events); + static int pmu_sbi_ctr_get_width(int idx) { return pmu_ctr_list[idx].width; @@ -478,6 +507,12 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) u64 raw_config_val; int ret; + /* + * Ensure we are finished checking standard hardware events for + * validity before allowing userspace to configure any events. + */ + flush_work(&check_std_events_work); + switch (type) { case PERF_TYPE_HARDWARE: if (config >= PERF_COUNT_HW_MAX) @@ -762,7 +797,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu) * which may include counters that are not enabled yet. */ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, - 0, pmu->cmask, 0, 0, 0, 0); + 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); } static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) @@ -1359,6 +1394,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) if (ret) goto out_unregister; + /* Asynchronously check which standard events are available */ + schedule_work(&check_std_events_work); + return 0; out_unregister: diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index fbab6ac0f0d1..7594f64eb737 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -188,6 +188,9 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus) dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus); if (ch->soc_no_adp_ctrl) { + if (ch->vbus) + regulator_hardware_enable(ch->vbus, vbus); + vbus_ctrl_reg = USB2_VBCTRL; vbus_ctrl_val = USB2_VBCTRL_VBOUT; } @@ -718,7 +721,10 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) phy_set_drvdata(channel->rphys[i].phy, &channel->rphys[i]); } - channel->vbus = devm_regulator_get_optional(dev, "vbus"); + if (channel->soc_no_adp_ctrl && channel->is_otg_channel) + channel->vbus = devm_regulator_get_exclusive(dev, "vbus"); + else + channel->vbus = devm_regulator_get_optional(dev, "vbus"); if (IS_ERR(channel->vbus)) { if (PTR_ERR(channel->vbus) == -EPROBE_DEFER) { ret = PTR_ERR(channel->vbus); diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c index 22e3cd2cc963..6f44a13b90ce 100644 --- a/drivers/pinctrl/pinctrl-da9062.c +++ b/drivers/pinctrl/pinctrl-da9062.c @@ -139,7 +139,7 @@ static int da9062_gpio_direction_input(struct gpio_chip *gc, { struct da9062_pctl *pctl = gpiochip_get_data(gc); struct regmap *regmap = pctl->da9062->regmap; - struct gpio_desc *desc = gpiochip_get_desc(gc, offset); + struct gpio_desc *desc = gpio_device_get_desc(gc->gpiodev, offset); unsigned int gpi_type; int ret; diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 81a298517df2..960fd6a82450 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -7,6 +7,8 @@ source "drivers/platform/goldfish/Kconfig" source "drivers/platform/chrome/Kconfig" +source "drivers/platform/cznic/Kconfig" + source "drivers/platform/mellanox/Kconfig" source "drivers/platform/olpc/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index fbbe4f77aa5d..bf69cc8d7429 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -10,5 +10,6 @@ obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ +obj-$(CONFIG_CZNIC_PLATFORMS) += cznic/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ obj-$(CONFIG_ARM64) += arm64/ diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index 47d19f7e295a..e821b3d39590 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -388,8 +388,8 @@ EXPORT_SYMBOL(cros_ec_suspend_late); */ int cros_ec_suspend(struct cros_ec_device *ec_dev) { - cros_ec_send_suspend_event(ec_dev); - cros_ec_disable_irq(ec_dev); + cros_ec_suspend_prepare(ec_dev); + cros_ec_suspend_late(ec_dev); return 0; } EXPORT_SYMBOL(cros_ec_suspend); diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index e1d313246beb..4525ad1b59f4 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -26,6 +26,10 @@ #define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1)) +static unsigned int log_poll_period_ms = LOG_POLL_SEC * MSEC_PER_SEC; +module_param(log_poll_period_ms, uint, 0644); +MODULE_PARM_DESC(log_poll_period_ms, "EC log polling period(ms)"); + /* waitqueue for log readers */ static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq); @@ -57,7 +61,7 @@ struct cros_ec_debugfs { /* * We need to make sure that the EC log buffer on the UART is large enough, - * so that it is unlikely enough to overlow within LOG_POLL_SEC. + * so that it is unlikely enough to overlow within log_poll_period_ms. */ static void cros_ec_console_log_work(struct work_struct *__work) { @@ -119,7 +123,7 @@ static void cros_ec_console_log_work(struct work_struct *__work) resched: schedule_delayed_work(&debug_info->log_poll_work, - msecs_to_jiffies(LOG_POLL_SEC * 1000)); + msecs_to_jiffies(log_poll_period_ms)); } static int cros_ec_console_log_open(struct inode *inode, struct file *file) @@ -330,6 +334,7 @@ static int ec_read_version_supported(struct cros_ec_dev *ec) if (!msg) return 0; + msg->version = 1; msg->command = EC_CMD_GET_CMD_VERSIONS + ec->cmd_offset; msg->outsize = sizeof(*params); msg->insize = sizeof(*response); diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index ddfbfec44f4c..f0470248b109 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -39,6 +39,16 @@ static bool cros_ec_lpc_acpi_device_found; * be used as the base port for EC mapped memory. */ #define CROS_EC_LPC_QUIRK_REMAP_MEMORY BIT(0) +/* + * Indicates that lpc_driver_data.quirk_acpi_id should be used to find + * the ACPI device. + */ +#define CROS_EC_LPC_QUIRK_ACPI_ID BIT(1) +/* + * Indicates that lpc_driver_data.quirk_aml_mutex_name should be used + * to find an AML mutex to protect access to Microchip EC. + */ +#define CROS_EC_LPC_QUIRK_AML_MUTEX BIT(2) /** * struct lpc_driver_data - driver data attached to a DMI device ID to indicate @@ -46,10 +56,15 @@ static bool cros_ec_lpc_acpi_device_found; * @quirks: a bitfield composed of quirks from CROS_EC_LPC_QUIRK_* * @quirk_mmio_memory_base: The first I/O port addressing EC mapped memory (used * when quirk ...REMAP_MEMORY is set.) + * @quirk_acpi_id: An ACPI HID to be used to find the ACPI device. + * @quirk_aml_mutex_name: The name of an AML mutex to be used to protect access + * to Microchip EC. */ struct lpc_driver_data { u32 quirks; u16 quirk_mmio_memory_base; + const char *quirk_acpi_id; + const char *quirk_aml_mutex_name; }; /** @@ -62,14 +77,16 @@ struct cros_ec_lpc { /** * struct lpc_driver_ops - LPC driver operations - * @read: Copy length bytes from EC address offset into buffer dest. Returns - * the 8-bit checksum of all bytes read. - * @write: Copy length bytes from buffer msg into EC address offset. Returns - * the 8-bit checksum of all bytes written. + * @read: Copy length bytes from EC address offset into buffer dest. + * Returns a negative error code on error, or the 8-bit checksum + * of all bytes read. + * @write: Copy length bytes from buffer msg into EC address offset. + * Returns a negative error code on error, or the 8-bit checksum + * of all bytes written. */ struct lpc_driver_ops { - u8 (*read)(unsigned int offset, unsigned int length, u8 *dest); - u8 (*write)(unsigned int offset, unsigned int length, const u8 *msg); + int (*read)(unsigned int offset, unsigned int length, u8 *dest); + int (*write)(unsigned int offset, unsigned int length, const u8 *msg); }; static struct lpc_driver_ops cros_ec_lpc_ops = { }; @@ -78,10 +95,10 @@ static struct lpc_driver_ops cros_ec_lpc_ops = { }; * A generic instance of the read function of struct lpc_driver_ops, used for * the LPC EC. */ -static u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, - u8 *dest) +static int cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, + u8 *dest) { - int sum = 0; + u8 sum = 0; int i; for (i = 0; i < length; ++i) { @@ -97,10 +114,10 @@ static u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, * A generic instance of the write function of struct lpc_driver_ops, used for * the LPC EC. */ -static u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, - const u8 *msg) +static int cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, + const u8 *msg) { - int sum = 0; + u8 sum = 0; int i; for (i = 0; i < length; ++i) { @@ -116,13 +133,13 @@ static u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, * An instance of the read function of struct lpc_driver_ops, used for the * MEC variant of LPC EC. */ -static u8 cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length, - u8 *dest) +static int cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length, + u8 *dest) { int in_range = cros_ec_lpc_mec_in_range(offset, length); if (in_range < 0) - return 0; + return in_range; return in_range ? cros_ec_lpc_io_bytes_mec(MEC_IO_READ, @@ -135,13 +152,13 @@ static u8 cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length, * An instance of the write function of struct lpc_driver_ops, used for the * MEC variant of LPC EC. */ -static u8 cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length, - const u8 *msg) +static int cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length, + const u8 *msg) { int in_range = cros_ec_lpc_mec_in_range(offset, length); if (in_range < 0) - return 0; + return in_range; return in_range ? cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, @@ -154,11 +171,14 @@ static int ec_response_timed_out(void) { unsigned long one_second = jiffies + HZ; u8 data; + int ret; usleep_range(200, 300); do { - if (!(cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_CMD, 1, &data) & - EC_LPC_STATUS_BUSY_MASK)) + ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_CMD, 1, &data); + if (ret < 0) + return ret; + if (!(data & EC_LPC_STATUS_BUSY_MASK)) return 0; usleep_range(100, 200); } while (time_before(jiffies, one_second)); @@ -179,28 +199,41 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, goto done; /* Write buffer */ - cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout); + ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout); + if (ret < 0) + goto done; /* Here we go */ sum = EC_COMMAND_PROTOCOL_3; - cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum); + ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum); + if (ret < 0) + goto done; - if (ec_response_timed_out()) { + ret = ec_response_timed_out(); + if (ret < 0) + goto done; + if (ret) { dev_warn(ec->dev, "EC response timed out\n"); ret = -EIO; goto done; } /* Check result */ - msg->result = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum); + ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum); + if (ret < 0) + goto done; + msg->result = ret; ret = cros_ec_check_result(ec, msg); if (ret) goto done; /* Read back response */ dout = (u8 *)&response; - sum = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET, sizeof(response), + ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET, sizeof(response), dout); + if (ret < 0) + goto done; + sum = ret; msg->result = response.result; @@ -213,9 +246,12 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, } /* Read response and process checksum */ - sum += cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET + - sizeof(response), response.data_len, - msg->data); + ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET + + sizeof(response), response.data_len, + msg->data); + if (ret < 0) + goto done; + sum += ret; if (sum) { dev_err(ec->dev, @@ -255,32 +291,47 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec, sum = msg->command + args.flags + args.command_version + args.data_size; /* Copy data and update checksum */ - sum += cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PARAM, msg->outsize, - msg->data); + ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PARAM, msg->outsize, + msg->data); + if (ret < 0) + goto done; + sum += ret; /* Finalize checksum and write args */ args.checksum = sum; - cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_ARGS, sizeof(args), - (u8 *)&args); + ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_ARGS, sizeof(args), + (u8 *)&args); + if (ret < 0) + goto done; /* Here we go */ sum = msg->command; - cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum); + ret = cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum); + if (ret < 0) + goto done; - if (ec_response_timed_out()) { + ret = ec_response_timed_out(); + if (ret < 0) + goto done; + if (ret) { dev_warn(ec->dev, "EC response timed out\n"); ret = -EIO; goto done; } /* Check result */ - msg->result = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum); + ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum); + if (ret < 0) + goto done; + msg->result = ret; ret = cros_ec_check_result(ec, msg); if (ret) goto done; /* Read back args */ - cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args); + ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args); + if (ret < 0) + goto done; if (args.data_size > msg->insize) { dev_err(ec->dev, @@ -294,8 +345,11 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec, sum = msg->command + args.flags + args.command_version + args.data_size; /* Read response and update checksum */ - sum += cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PARAM, args.data_size, - msg->data); + ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PARAM, args.data_size, + msg->data); + if (ret < 0) + goto done; + sum += ret; /* Verify checksum */ if (args.checksum != sum) { @@ -320,19 +374,24 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset, int i = offset; char *s = dest; int cnt = 0; + int ret; if (offset >= EC_MEMMAP_SIZE - bytes) return -EINVAL; /* fixed length */ if (bytes) { - cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + offset, bytes, s); + ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + offset, bytes, s); + if (ret < 0) + return ret; return bytes; } /* string */ for (; i < EC_MEMMAP_SIZE; i++, s++) { - cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + i, 1, s); + ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + i, 1, s); + if (ret < 0) + return ret; cnt++; if (!*s) break; @@ -374,6 +433,26 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data) pm_system_wakeup(); } +static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level, + void *context, void **retval) +{ + *(struct acpi_device **)context = acpi_fetch_acpi_dev(handle); + return AE_CTRL_TERMINATE; +} + +static struct acpi_device *cros_ec_lpc_get_device(const char *id) +{ + struct acpi_device *adev = NULL; + acpi_status status = acpi_get_devices(id, cros_ec_lpc_parse_device, + &adev, NULL); + if (ACPI_FAILURE(status)) { + pr_warn(DRV_NAME ": Looking for %s failed\n", id); + return NULL; + } + + return adev; +} + static int cros_ec_lpc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -401,6 +480,27 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) if (quirks & CROS_EC_LPC_QUIRK_REMAP_MEMORY) ec_lpc->mmio_memory_base = driver_data->quirk_mmio_memory_base; + + if (quirks & CROS_EC_LPC_QUIRK_ACPI_ID) { + adev = cros_ec_lpc_get_device(driver_data->quirk_acpi_id); + if (!adev) { + dev_err(dev, "failed to get ACPI device '%s'", + driver_data->quirk_acpi_id); + return -ENODEV; + } + ACPI_COMPANION_SET(dev, adev); + } + + if (quirks & CROS_EC_LPC_QUIRK_AML_MUTEX) { + const char *name + = driver_data->quirk_aml_mutex_name; + ret = cros_ec_lpc_mec_acpi_mutex(ACPI_COMPANION(dev), name); + if (ret) { + dev_err(dev, "failed to get AML mutex '%s'", name); + return ret; + } + dev_info(dev, "got AML mutex '%s'", name); + } } /* @@ -425,7 +525,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) */ cros_ec_lpc_ops.read = cros_ec_lpc_mec_read_bytes; cros_ec_lpc_ops.write = cros_ec_lpc_mec_write_bytes; - cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf); + ret = cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf); + if (ret < 0) + return ret; if (buf[0] != 'E' || buf[1] != 'C') { if (!devm_request_region(dev, ec_lpc->mmio_memory_base, EC_MEMMAP_SIZE, dev_name(dev))) { @@ -436,8 +538,10 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) /* Re-assign read/write operations for the non MEC variant */ cros_ec_lpc_ops.read = cros_ec_lpc_read_bytes; cros_ec_lpc_ops.write = cros_ec_lpc_write_bytes; - cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2, - buf); + ret = cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2, + buf); + if (ret < 0) + return ret; if (buf[0] != 'E' || buf[1] != 'C') { dev_err(dev, "EC ID not detected\n"); return -ENODEV; @@ -532,6 +636,12 @@ static const struct lpc_driver_data framework_laptop_amd_lpc_driver_data __initc .quirk_mmio_memory_base = 0xE00, }; +static const struct lpc_driver_data framework_laptop_11_lpc_driver_data __initconst = { + .quirks = CROS_EC_LPC_QUIRK_ACPI_ID|CROS_EC_LPC_QUIRK_AML_MUTEX, + .quirk_acpi_id = "PNP0C09", + .quirk_aml_mutex_name = "ECMT", +}; + static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { { /* @@ -600,6 +710,7 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { DMI_MATCH(DMI_SYS_VENDOR, "Framework"), DMI_MATCH(DMI_PRODUCT_NAME, "Laptop"), }, + .driver_data = (void *)&framework_laptop_11_lpc_driver_data, }, { /* sentinel */ } }; @@ -661,23 +772,12 @@ static struct platform_device cros_ec_lpc_device = { .name = DRV_NAME }; -static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level, - void *context, void **retval) -{ - *(bool *)context = true; - return AE_CTRL_TERMINATE; -} - static int __init cros_ec_lpc_init(void) { int ret; - acpi_status status; const struct dmi_system_id *dmi_match; - status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device, - &cros_ec_lpc_acpi_device_found, NULL); - if (ACPI_FAILURE(status)) - pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME); + cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME); dmi_match = dmi_first_match(cros_ec_lpc_dmi_table); diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c index 0d9c79b270ce..a56584171168 100644 --- a/drivers/platform/chrome/cros_ec_lpc_mec.c +++ b/drivers/platform/chrome/cros_ec_lpc_mec.c @@ -10,14 +10,66 @@ #include "cros_ec_lpc_mec.h" +#define ACPI_LOCK_DELAY_MS 500 + /* * This mutex must be held while accessing the EMI unit. We can't rely on the * EC mutex because memmap data may be accessed without it being held. */ static DEFINE_MUTEX(io_mutex); +/* + * An alternative mutex to be used when the ACPI AML code may also + * access memmap data. When set, this mutex is used in preference to + * io_mutex. + */ +static acpi_handle aml_mutex; + static u16 mec_emi_base, mec_emi_end; /** + * cros_ec_lpc_mec_lock() - Acquire mutex for EMI + * + * @return: Negative error code, or zero for success + */ +static int cros_ec_lpc_mec_lock(void) +{ + bool success; + + if (!aml_mutex) { + mutex_lock(&io_mutex); + return 0; + } + + success = ACPI_SUCCESS(acpi_acquire_mutex(aml_mutex, + NULL, ACPI_LOCK_DELAY_MS)); + if (!success) + return -EBUSY; + + return 0; +} + +/** + * cros_ec_lpc_mec_unlock() - Release mutex for EMI + * + * @return: Negative error code, or zero for success + */ +static int cros_ec_lpc_mec_unlock(void) +{ + bool success; + + if (!aml_mutex) { + mutex_unlock(&io_mutex); + return 0; + } + + success = ACPI_SUCCESS(acpi_release_mutex(aml_mutex, NULL)); + if (!success) + return -EBUSY; + + return 0; +} + +/** * cros_ec_lpc_mec_emi_write_address() - Initialize EMI at a given address. * * @addr: Starting read / write address @@ -41,9 +93,6 @@ static void cros_ec_lpc_mec_emi_write_address(u16 addr, */ int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length) { - if (length == 0) - return -EINVAL; - if (WARN_ON(mec_emi_base == 0 || mec_emi_end == 0)) return -EINVAL; @@ -67,16 +116,21 @@ int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length) * @length: Number of bytes to read / write * @buf: Destination / source buffer * - * Return: 8-bit checksum of all bytes read / written + * @return: A negative error code on error, or 8-bit checksum of all + * bytes read / written */ -u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, - unsigned int offset, unsigned int length, - u8 *buf) +int cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, + unsigned int offset, unsigned int length, + u8 *buf) { int i = 0; int io_addr; u8 sum = 0; enum cros_ec_lpc_mec_emi_access_mode access, new_access; + int ret; + + if (length == 0) + return 0; /* Return checksum of 0 if window is not initialized */ WARN_ON(mec_emi_base == 0 || mec_emi_end == 0); @@ -92,7 +146,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, else access = ACCESS_TYPE_LONG_AUTO_INCREMENT; - mutex_lock(&io_mutex); + ret = cros_ec_lpc_mec_lock(); + if (ret) + return ret; /* Initialize I/O at desired address */ cros_ec_lpc_mec_emi_write_address(offset, access); @@ -134,7 +190,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, } done: - mutex_unlock(&io_mutex); + ret = cros_ec_lpc_mec_unlock(); + if (ret) + return ret; return sum; } @@ -146,3 +204,18 @@ void cros_ec_lpc_mec_init(unsigned int base, unsigned int end) mec_emi_end = end; } EXPORT_SYMBOL(cros_ec_lpc_mec_init); + +int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname) +{ + int status; + + if (!adev) + return -ENOENT; + + status = acpi_get_handle(adev->handle, pathname, &aml_mutex); + if (ACPI_FAILURE(status)) + return -ENOENT; + + return 0; +} +EXPORT_SYMBOL(cros_ec_lpc_mec_acpi_mutex); diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h index 9d0521b23e8a..69f9d8786f61 100644 --- a/drivers/platform/chrome/cros_ec_lpc_mec.h +++ b/drivers/platform/chrome/cros_ec_lpc_mec.h @@ -8,6 +8,8 @@ #ifndef __CROS_EC_LPC_MEC_H #define __CROS_EC_LPC_MEC_H +#include <linux/acpi.h> + enum cros_ec_lpc_mec_emi_access_mode { /* 8-bit access */ ACCESS_TYPE_BYTE = 0x0, @@ -46,6 +48,15 @@ enum cros_ec_lpc_mec_io_type { void cros_ec_lpc_mec_init(unsigned int base, unsigned int end); /** + * cros_ec_lpc_mec_acpi_mutex() - Find and set ACPI mutex for MEC + * + * @adev: Parent ACPI device + * @pathname: Name of AML mutex + * @return: Negative error code, or zero for success + */ +int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname); + +/** * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range. * * @offset: Address offset @@ -64,9 +75,10 @@ int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length); * @length: Number of bytes to read / write * @buf: Destination / source buffer * - * @return 8-bit checksum of all bytes read / written + * @return: A negative error code on error, or 8-bit checksum of all + * bytes read / written */ -u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, - unsigned int offset, unsigned int length, u8 *buf); +int cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, + unsigned int offset, unsigned int length, u8 *buf); #endif /* __CROS_EC_LPC_MEC_H */ diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 945b1b15a04c..f776fd42244f 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -5,6 +5,7 @@ #include <linux/delay.h> #include <linux/device.h> +#include <linux/limits.h> #include <linux/module.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> @@ -239,13 +240,12 @@ int cros_ec_check_result(struct cros_ec_device *ec_dev, } EXPORT_SYMBOL(cros_ec_check_result); -/* +/** * cros_ec_get_host_event_wake_mask * * Get the mask of host events that cause wake from suspend. * * @ec_dev: EC device to call - * @msg: message structure to use * @mask: result when function returns 0. * * LOCKING: @@ -427,13 +427,12 @@ exit: return ret; } -/* +/** * cros_ec_get_host_command_version_mask * * Get the version mask of a given command. * * @ec_dev: EC device to call - * @msg: message structure to use * @cmd: command to get the version of. * @mask: result when function returns 0. * @@ -686,7 +685,7 @@ EXPORT_SYMBOL(cros_ec_cmd_xfer_status); static int get_next_event_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg, - struct ec_response_get_next_event_v1 *event, + struct ec_response_get_next_event_v3 *event, int version, uint32_t size) { int ret; @@ -709,11 +708,12 @@ static int get_next_event(struct cros_ec_device *ec_dev) { struct { struct cros_ec_command msg; - struct ec_response_get_next_event_v1 event; + struct ec_response_get_next_event_v3 event; } __packed buf; struct cros_ec_command *msg = &buf.msg; - struct ec_response_get_next_event_v1 *event = &buf.event; - const int cmd_version = ec_dev->mkbp_event_supported - 1; + struct ec_response_get_next_event_v3 *event = &buf.event; + int cmd_version = ec_dev->mkbp_event_supported - 1; + u32 size; memset(msg, 0, sizeof(*msg)); if (ec_dev->suspended) { @@ -721,12 +721,20 @@ static int get_next_event(struct cros_ec_device *ec_dev) return -EHOSTDOWN; } - if (cmd_version == 0) - return get_next_event_xfer(ec_dev, msg, event, 0, - sizeof(struct ec_response_get_next_event)); + if (cmd_version == 0) { + size = sizeof(struct ec_response_get_next_event); + } else if (cmd_version < 3) { + size = sizeof(struct ec_response_get_next_event_v1); + } else { + /* + * The max version we support is v3. So, we speak v3 even if the + * EC says it supports v4+. + */ + cmd_version = 3; + size = sizeof(struct ec_response_get_next_event_v3); + } - return get_next_event_xfer(ec_dev, msg, event, cmd_version, - sizeof(struct ec_response_get_next_event_v1)); + return get_next_event_xfer(ec_dev, msg, event, cmd_version, size); } static int get_keyboard_state_event(struct cros_ec_device *ec_dev) @@ -1035,3 +1043,64 @@ error: return ret; } EXPORT_SYMBOL_GPL(cros_ec_cmd); + +/** + * cros_ec_cmd_readmem - Read from EC memory. + * + * @ec_dev: EC device + * @offset: Is within EC_LPC_ADDR_MEMMAP region. + * @size: Number of bytes to read. + * @dest: EC command output data + * + * Return: >= 0 on success, negative error number on failure. + */ +int cros_ec_cmd_readmem(struct cros_ec_device *ec_dev, u8 offset, u8 size, void *dest) +{ + struct ec_params_read_memmap params = {}; + + if (!size) + return -EINVAL; + + if (ec_dev->cmd_readmem) + return ec_dev->cmd_readmem(ec_dev, offset, size, dest); + + params.offset = offset; + params.size = size; + return cros_ec_cmd(ec_dev, 0, EC_CMD_READ_MEMMAP, + ¶ms, sizeof(params), dest, size); +} +EXPORT_SYMBOL_GPL(cros_ec_cmd_readmem); + +/** + * cros_ec_get_cmd_versions - Get supported version mask. + * + * @ec_dev: EC device + * @cmd: Command to test + * + * Return: version mask on success, negative error number on failure. + */ +int cros_ec_get_cmd_versions(struct cros_ec_device *ec_dev, u16 cmd) +{ + struct ec_params_get_cmd_versions req_v0; + struct ec_params_get_cmd_versions_v1 req_v1; + struct ec_response_get_cmd_versions resp; + int ret; + + if (cmd <= U8_MAX) { + req_v0.cmd = cmd; + ret = cros_ec_cmd(ec_dev, 0, EC_CMD_GET_CMD_VERSIONS, + &req_v0, sizeof(req_v0), &resp, sizeof(resp)); + } else { + req_v1.cmd = cmd; + ret = cros_ec_cmd(ec_dev, 1, EC_CMD_GET_CMD_VERSIONS, + &req_v1, sizeof(req_v1), &resp, sizeof(resp)); + } + + if (ret == -EINVAL) + return 0; /* Command not implemented */ + else if (ret < 0) + return ret; + else + return resp.version_mask; +} +EXPORT_SYMBOL_GPL(cros_ec_get_cmd_versions); diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c index 41378c2ee6a0..7ca9895a0065 100644 --- a/drivers/platform/chrome/cros_ec_proto_test.c +++ b/drivers/platform/chrome/cros_ec_proto_test.c @@ -2060,17 +2060,17 @@ static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test) /* For get_keyboard_state_event(). */ { - union ec_response_get_next_data_v1 *data; + union ec_response_get_next_data_v3 *data; mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data)); KUNIT_ASSERT_PTR_NE(test, mock, NULL); - data = (union ec_response_get_next_data_v1 *)mock->o_data; + data = (union ec_response_get_next_data_v3 *)mock->o_data; data->host_event = 0xbeef; } ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events); - KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v1)); + KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v3)); KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_KEY_MATRIX); KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.host_event, 0xbeef); @@ -2085,7 +2085,7 @@ static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test) KUNIT_EXPECT_EQ(test, mock->msg.version, 0); KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MKBP_STATE); - KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v1)); + KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v3)); KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0); } } @@ -2740,4 +2740,5 @@ static struct kunit_suite cros_ec_proto_test_suite = { kunit_test_suite(cros_ec_proto_test_suite); +MODULE_DESCRIPTION("Kunit tests for ChromeOS Embedded Controller protocol"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c index 0f98358ea824..4d8273b47cde 100644 --- a/drivers/platform/chrome/wilco_ec/mailbox.c +++ b/drivers/platform/chrome/wilco_ec/mailbox.c @@ -117,13 +117,17 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec, struct wilco_ec_request *rq) { struct wilco_ec_response *rs; - u8 checksum; + int ret; u8 flag; /* Write request header, then data */ - cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq); - cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size, - msg->request_data); + ret = cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq); + if (ret < 0) + return ret; + ret = cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size, + msg->request_data); + if (ret < 0) + return ret; /* Start the command */ outb(EC_MAILBOX_START_COMMAND, ec->io_command->start); @@ -149,10 +153,12 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec, /* Read back response */ rs = ec->data_buffer; - checksum = cros_ec_lpc_io_bytes_mec(MEC_IO_READ, 0, - sizeof(*rs) + EC_MAILBOX_DATA_SIZE, - (u8 *)rs); - if (checksum) { + ret = cros_ec_lpc_io_bytes_mec(MEC_IO_READ, 0, + sizeof(*rs) + EC_MAILBOX_DATA_SIZE, + (u8 *)rs); + if (ret < 0) + return ret; + if (ret) { dev_dbg(ec->dev, "bad packet checksum 0x%02x\n", rs->checksum); return -EBADMSG; } diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig new file mode 100644 index 000000000000..cb0d4d686d8a --- /dev/null +++ b/drivers/platform/cznic/Kconfig @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.rst. +# + +menuconfig CZNIC_PLATFORMS + bool "Platform support for CZ.NIC's Turris hardware" + help + Say Y here to be able to choose driver support for CZ.NIC's Turris + devices. This option alone does not add any kernel code. + +if CZNIC_PLATFORMS + +config TURRIS_OMNIA_MCU + tristate "Turris Omnia MCU driver" + depends on MACH_ARMADA_38X || COMPILE_TEST + depends on I2C + depends on OF + depends on WATCHDOG + depends on GPIOLIB + depends on HW_RANDOM + depends on RTC_CLASS + depends on WATCHDOG_CORE + select GPIOLIB_IRQCHIP + help + Say Y here to add support for the features implemented by the + microcontroller on the CZ.NIC's Turris Omnia SOHO router. + The features include: + - board poweroff into true low power mode (with voltage regulators + disabled) and the ability to configure wake up from this mode (via + rtcwake) + - true random number generator (if available on the MCU) + - MCU watchdog + - GPIO pins + - to get front button press events (the front button can be + configured either to generate press events to the CPU or to change + front LEDs panel brightness) + - to enable / disable USB port voltage regulators and to detect + USB overcurrent + - to detect MiniPCIe / mSATA card presence in MiniPCIe port 0 + - to configure resets of various peripherals on board revisions 32+ + - to enable / disable the VHV voltage regulator to the SOC in order + to be able to program SOC's OTP on board revisions 32+ + - to get input from the LED output pins of the WAN ethernet PHY, LAN + switch and MiniPCIe ports + To compile this driver as a module, choose M here; the module will be + called turris-omnia-mcu. + +endif # CZNIC_PLATFORMS diff --git a/drivers/platform/cznic/Makefile b/drivers/platform/cznic/Makefile new file mode 100644 index 000000000000..eae4c6b341ff --- /dev/null +++ b/drivers/platform/cznic/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_TURRIS_OMNIA_MCU) += turris-omnia-mcu.o +turris-omnia-mcu-y := turris-omnia-mcu-base.o +turris-omnia-mcu-y += turris-omnia-mcu-gpio.o +turris-omnia-mcu-y += turris-omnia-mcu-sys-off-wakeup.o +turris-omnia-mcu-y += turris-omnia-mcu-trng.o +turris-omnia-mcu-y += turris-omnia-mcu-watchdog.o diff --git a/drivers/platform/cznic/turris-omnia-mcu-base.c b/drivers/platform/cznic/turris-omnia-mcu-base.c new file mode 100644 index 000000000000..c68a7a84a951 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-base.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/array_size.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/hex.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +#define OMNIA_FW_VERSION_LEN 20 +#define OMNIA_FW_VERSION_HEX_LEN (2 * OMNIA_FW_VERSION_LEN + 1) +#define OMNIA_BOARD_INFO_LEN 16 + +int omnia_cmd_write_read(const struct i2c_client *client, + void *cmd, unsigned int cmd_len, + void *reply, unsigned int reply_len) +{ + struct i2c_msg msgs[2]; + int ret, num; + + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = cmd_len; + msgs[0].buf = cmd; + num = 1; + + if (reply_len) { + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = reply_len; + msgs[1].buf = reply; + num++; + } + + ret = i2c_transfer(client->adapter, msgs, num); + if (ret < 0) + return ret; + if (ret != num) + return -EIO; + + return 0; +} + +static int omnia_get_version_hash(struct omnia_mcu *mcu, bool bootloader, + char version[static OMNIA_FW_VERSION_HEX_LEN]) +{ + u8 reply[OMNIA_FW_VERSION_LEN]; + char *p; + int err; + + err = omnia_cmd_read(mcu->client, + bootloader ? OMNIA_CMD_GET_FW_VERSION_BOOT + : OMNIA_CMD_GET_FW_VERSION_APP, + reply, sizeof(reply)); + if (err) + return err; + + p = bin2hex(version, reply, OMNIA_FW_VERSION_LEN); + *p = '\0'; + + return 0; +} + +static ssize_t fw_version_hash_show(struct device *dev, char *buf, + bool bootloader) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + char version[OMNIA_FW_VERSION_HEX_LEN]; + int err; + + err = omnia_get_version_hash(mcu, bootloader, version); + if (err) + return err; + + return sysfs_emit(buf, "%s\n", version); +} + +static ssize_t fw_version_hash_application_show(struct device *dev, + struct device_attribute *a, + char *buf) +{ + return fw_version_hash_show(dev, buf, false); +} +static DEVICE_ATTR_RO(fw_version_hash_application); + +static ssize_t fw_version_hash_bootloader_show(struct device *dev, + struct device_attribute *a, + char *buf) +{ + return fw_version_hash_show(dev, buf, true); +} +static DEVICE_ATTR_RO(fw_version_hash_bootloader); + +static ssize_t fw_features_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "0x%x\n", mcu->features); +} +static DEVICE_ATTR_RO(fw_features); + +static ssize_t mcu_type_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", mcu->type); +} +static DEVICE_ATTR_RO(mcu_type); + +static ssize_t reset_selector_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + u8 reply; + int err; + + err = omnia_cmd_read_u8(to_i2c_client(dev), OMNIA_CMD_GET_RESET, + &reply); + if (err) + return err; + + return sysfs_emit(buf, "%d\n", reply); +} +static DEVICE_ATTR_RO(reset_selector); + +static ssize_t serial_number_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%016llX\n", mcu->board_serial_number); +} +static DEVICE_ATTR_RO(serial_number); + +static ssize_t first_mac_address_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%pM\n", mcu->board_first_mac); +} +static DEVICE_ATTR_RO(first_mac_address); + +static ssize_t board_revision_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%u\n", mcu->board_revision); +} +static DEVICE_ATTR_RO(board_revision); + +static struct attribute *omnia_mcu_base_attrs[] = { + &dev_attr_fw_version_hash_application.attr, + &dev_attr_fw_version_hash_bootloader.attr, + &dev_attr_fw_features.attr, + &dev_attr_mcu_type.attr, + &dev_attr_reset_selector.attr, + &dev_attr_serial_number.attr, + &dev_attr_first_mac_address.attr, + &dev_attr_board_revision.attr, + NULL +}; + +static umode_t omnia_mcu_base_attrs_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + if ((a == &dev_attr_serial_number.attr || + a == &dev_attr_first_mac_address.attr || + a == &dev_attr_board_revision.attr) && + !(mcu->features & OMNIA_FEAT_BOARD_INFO)) + return 0; + + return a->mode; +} + +static const struct attribute_group omnia_mcu_base_group = { + .attrs = omnia_mcu_base_attrs, + .is_visible = omnia_mcu_base_attrs_visible, +}; + +static const struct attribute_group *omnia_mcu_groups[] = { + &omnia_mcu_base_group, + &omnia_mcu_gpio_group, + &omnia_mcu_poweroff_group, + NULL +}; + +static void omnia_mcu_print_version_hash(struct omnia_mcu *mcu, bool bootloader) +{ + const char *type = bootloader ? "bootloader" : "application"; + struct device *dev = &mcu->client->dev; + char version[OMNIA_FW_VERSION_HEX_LEN]; + int err; + + err = omnia_get_version_hash(mcu, bootloader, version); + if (err) { + dev_err(dev, "Cannot read MCU %s firmware version: %d\n", + type, err); + return; + } + + dev_info(dev, "MCU %s firmware version hash: %s\n", type, version); +} + +static const char *omnia_status_to_mcu_type(u16 status) +{ + switch (status & OMNIA_STS_MCU_TYPE_MASK) { + case OMNIA_STS_MCU_TYPE_STM32: + return "STM32"; + case OMNIA_STS_MCU_TYPE_GD32: + return "GD32"; + case OMNIA_STS_MCU_TYPE_MKL: + return "MKL"; + default: + return "unknown"; + } +} + +static void omnia_info_missing_feature(struct device *dev, const char *feature) +{ + dev_info(dev, + "Your board's MCU firmware does not support the %s feature.\n", + feature); +} + +static int omnia_mcu_read_features(struct omnia_mcu *mcu) +{ + static const struct { + u16 mask; + const char *name; + } features[] = { +#define _DEF_FEAT(_n, _m) { OMNIA_FEAT_ ## _n, _m } + _DEF_FEAT(EXT_CMDS, "extended control and status"), + _DEF_FEAT(WDT_PING, "watchdog pinging"), + _DEF_FEAT(LED_STATE_EXT_MASK, "peripheral LED pins reading"), + _DEF_FEAT(NEW_INT_API, "new interrupt API"), + _DEF_FEAT(POWEROFF_WAKEUP, "poweroff and wakeup"), + _DEF_FEAT(TRNG, "true random number generator"), +#undef _DEF_FEAT + }; + struct i2c_client *client = mcu->client; + struct device *dev = &client->dev; + bool suggest_fw_upgrade = false; + u16 status; + int err; + + /* status word holds MCU type, which we need below */ + err = omnia_cmd_read_u16(client, OMNIA_CMD_GET_STATUS_WORD, &status); + if (err) + return err; + + /* + * Check whether MCU firmware supports the OMNIA_CMD_GET_FEATURES + * command. + */ + if (status & OMNIA_STS_FEATURES_SUPPORTED) { + /* try read 32-bit features */ + err = omnia_cmd_read_u32(client, OMNIA_CMD_GET_FEATURES, + &mcu->features); + if (err) { + /* try read 16-bit features */ + u16 features16; + + err = omnia_cmd_read_u16(client, OMNIA_CMD_GET_FEATURES, + &features16); + if (err) + return err; + + mcu->features = features16; + } else { + if (mcu->features & OMNIA_FEAT_FROM_BIT_16_INVALID) + mcu->features &= GENMASK(15, 0); + } + } else { + dev_info(dev, + "Your board's MCU firmware does not support feature reading.\n"); + suggest_fw_upgrade = true; + } + + mcu->type = omnia_status_to_mcu_type(status); + dev_info(dev, "MCU type %s%s\n", mcu->type, + (mcu->features & OMNIA_FEAT_PERIPH_MCU) ? + ", with peripheral resets wired" : ""); + + omnia_mcu_print_version_hash(mcu, true); + + if (mcu->features & OMNIA_FEAT_BOOTLOADER) + dev_warn(dev, + "MCU is running bootloader firmware. Was firmware upgrade interrupted?\n"); + else + omnia_mcu_print_version_hash(mcu, false); + + for (unsigned int i = 0; i < ARRAY_SIZE(features); i++) { + if (mcu->features & features[i].mask) + continue; + + omnia_info_missing_feature(dev, features[i].name); + suggest_fw_upgrade = true; + } + + if (suggest_fw_upgrade) + dev_info(dev, + "Consider upgrading MCU firmware with the omnia-mcutool utility.\n"); + + return 0; +} + +static int omnia_mcu_read_board_info(struct omnia_mcu *mcu) +{ + u8 reply[1 + OMNIA_BOARD_INFO_LEN]; + int err; + + err = omnia_cmd_read(mcu->client, OMNIA_CMD_BOARD_INFO_GET, reply, + sizeof(reply)); + if (err) + return err; + + if (reply[0] != OMNIA_BOARD_INFO_LEN) + return -EIO; + + mcu->board_serial_number = get_unaligned_le64(&reply[1]); + + /* we can't use ether_addr_copy() because reply is not u16-aligned */ + memcpy(mcu->board_first_mac, &reply[9], sizeof(mcu->board_first_mac)); + + mcu->board_revision = reply[15]; + + return 0; +} + +static int omnia_mcu_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct omnia_mcu *mcu; + int err; + + if (!client->irq) + return dev_err_probe(dev, -EINVAL, "IRQ resource not found\n"); + + mcu = devm_kzalloc(dev, sizeof(*mcu), GFP_KERNEL); + if (!mcu) + return -ENOMEM; + + mcu->client = client; + i2c_set_clientdata(client, mcu); + + err = omnia_mcu_read_features(mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot determine MCU supported features\n"); + + if (mcu->features & OMNIA_FEAT_BOARD_INFO) { + err = omnia_mcu_read_board_info(mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot read board info\n"); + } + + err = omnia_mcu_register_sys_off_and_wakeup(mcu); + if (err) + return err; + + err = omnia_mcu_register_watchdog(mcu); + if (err) + return err; + + err = omnia_mcu_register_gpiochip(mcu); + if (err) + return err; + + return omnia_mcu_register_trng(mcu); +} + +static const struct of_device_id of_omnia_mcu_match[] = { + { .compatible = "cznic,turris-omnia-mcu" }, + {} +}; + +static struct i2c_driver omnia_mcu_driver = { + .probe = omnia_mcu_probe, + .driver = { + .name = "turris-omnia-mcu", + .of_match_table = of_omnia_mcu_match, + .dev_groups = omnia_mcu_groups, + }, +}; +module_i2c_driver(omnia_mcu_driver); + +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); +MODULE_DESCRIPTION("CZ.NIC's Turris Omnia MCU"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c new file mode 100644 index 000000000000..91da56a704c7 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c @@ -0,0 +1,1095 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU GPIO and IRQ driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/device.h> +#include <linux/devm-helpers.h> +#include <linux/errno.h> +#include <linux/gpio/driver.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/sysfs.h> +#include <linux/types.h> +#include <linux/workqueue.h> +#include <asm/unaligned.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +#define OMNIA_CMD_INT_ARG_LEN 8 +#define FRONT_BUTTON_RELEASE_DELAY_MS 50 + +static const char * const omnia_mcu_gpio_templates[64] = { + /* GPIOs with value read from the 16-bit wide status */ + [4] = "MiniPCIe0 Card Detect", + [5] = "MiniPCIe0 mSATA Indicator", + [6] = "Front USB3 port over-current", + [7] = "Rear USB3 port over-current", + [8] = "Front USB3 port power", + [9] = "Rear USB3 port power", + [12] = "Front Button", + + /* GPIOs with value read from the 32-bit wide extended status */ + [16] = "SFP nDET", + [28] = "MiniPCIe0 LED", + [29] = "MiniPCIe1 LED", + [30] = "MiniPCIe2 LED", + [31] = "MiniPCIe0 PAN LED", + [32] = "MiniPCIe1 PAN LED", + [33] = "MiniPCIe2 PAN LED", + [34] = "WAN PHY LED0", + [35] = "WAN PHY LED1", + [36] = "LAN switch p0 LED0", + [37] = "LAN switch p0 LED1", + [38] = "LAN switch p1 LED0", + [39] = "LAN switch p1 LED1", + [40] = "LAN switch p2 LED0", + [41] = "LAN switch p2 LED1", + [42] = "LAN switch p3 LED0", + [43] = "LAN switch p3 LED1", + [44] = "LAN switch p4 LED0", + [45] = "LAN switch p4 LED1", + [46] = "LAN switch p5 LED0", + [47] = "LAN switch p5 LED1", + + /* GPIOs with value read from the 16-bit wide extended control status */ + [48] = "eMMC nRESET", + [49] = "LAN switch nRESET", + [50] = "WAN PHY nRESET", + [51] = "MiniPCIe0 nPERST", + [52] = "MiniPCIe1 nPERST", + [53] = "MiniPCIe2 nPERST", + [54] = "WAN PHY SFP mux", + [56] = "VHV power disable", +}; + +struct omnia_gpio { + u8 cmd; + u8 ctl_cmd; + u8 bit; + u8 ctl_bit; + u8 int_bit; + u16 feat; + u16 feat_mask; +}; + +#define OMNIA_GPIO_INVALID_INT_BIT 0xff + +#define _DEF_GPIO(_cmd, _ctl_cmd, _bit, _ctl_bit, _int_bit, _feat, _feat_mask) \ + { \ + .cmd = _cmd, \ + .ctl_cmd = _ctl_cmd, \ + .bit = _bit, \ + .ctl_bit = _ctl_bit, \ + .int_bit = (_int_bit) < 0 ? OMNIA_GPIO_INVALID_INT_BIT \ + : (_int_bit), \ + .feat = _feat, \ + .feat_mask = _feat_mask, \ + } + +#define _DEF_GPIO_STS(_name) \ + _DEF_GPIO(OMNIA_CMD_GET_STATUS_WORD, 0, __bf_shf(OMNIA_STS_ ## _name), \ + 0, __bf_shf(OMNIA_INT_ ## _name), 0, 0) + +#define _DEF_GPIO_CTL(_name) \ + _DEF_GPIO(OMNIA_CMD_GET_STATUS_WORD, OMNIA_CMD_GENERAL_CONTROL, \ + __bf_shf(OMNIA_STS_ ## _name), __bf_shf(OMNIA_CTL_ ## _name), \ + -1, 0, 0) + +#define _DEF_GPIO_EXT_STS(_name, _feat) \ + _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \ + __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \ + __bf_shf(OMNIA_INT_ ## _name), \ + OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS, \ + OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS) + +#define _DEF_GPIO_EXT_STS_LED(_name, _ledext) \ + _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \ + __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \ + __bf_shf(OMNIA_INT_ ## _name), \ + OMNIA_FEAT_LED_STATE_ ## _ledext, \ + OMNIA_FEAT_LED_STATE_EXT_MASK) + +#define _DEF_GPIO_EXT_STS_LEDALL(_name) \ + _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \ + __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \ + __bf_shf(OMNIA_INT_ ## _name), \ + OMNIA_FEAT_LED_STATE_EXT_MASK, 0) + +#define _DEF_GPIO_EXT_CTL(_name, _feat) \ + _DEF_GPIO(OMNIA_CMD_GET_EXT_CONTROL_STATUS, OMNIA_CMD_EXT_CONTROL, \ + __bf_shf(OMNIA_EXT_CTL_ ## _name), \ + __bf_shf(OMNIA_EXT_CTL_ ## _name), -1, \ + OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS, \ + OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS) + +#define _DEF_INT(_name) \ + _DEF_GPIO(0, 0, 0, 0, __bf_shf(OMNIA_INT_ ## _name), 0, 0) + +static inline bool is_int_bit_valid(const struct omnia_gpio *gpio) +{ + return gpio->int_bit != OMNIA_GPIO_INVALID_INT_BIT; +} + +static const struct omnia_gpio omnia_gpios[64] = { + /* GPIOs with value read from the 16-bit wide status */ + [4] = _DEF_GPIO_STS(CARD_DET), + [5] = _DEF_GPIO_STS(MSATA_IND), + [6] = _DEF_GPIO_STS(USB30_OVC), + [7] = _DEF_GPIO_STS(USB31_OVC), + [8] = _DEF_GPIO_CTL(USB30_PWRON), + [9] = _DEF_GPIO_CTL(USB31_PWRON), + + /* brightness changed interrupt, no GPIO */ + [11] = _DEF_INT(BRIGHTNESS_CHANGED), + + [12] = _DEF_GPIO_STS(BUTTON_PRESSED), + + /* TRNG interrupt, no GPIO */ + [13] = _DEF_INT(TRNG), + + /* MESSAGE_SIGNED interrupt, no GPIO */ + [14] = _DEF_INT(MESSAGE_SIGNED), + + /* GPIOs with value read from the 32-bit wide extended status */ + [16] = _DEF_GPIO_EXT_STS(SFP_nDET, PERIPH_MCU), + [28] = _DEF_GPIO_EXT_STS_LEDALL(WLAN0_MSATA_LED), + [29] = _DEF_GPIO_EXT_STS_LEDALL(WLAN1_LED), + [30] = _DEF_GPIO_EXT_STS_LEDALL(WLAN2_LED), + [31] = _DEF_GPIO_EXT_STS_LED(WPAN0_LED, EXT), + [32] = _DEF_GPIO_EXT_STS_LED(WPAN1_LED, EXT), + [33] = _DEF_GPIO_EXT_STS_LED(WPAN2_LED, EXT), + [34] = _DEF_GPIO_EXT_STS_LEDALL(WAN_LED0), + [35] = _DEF_GPIO_EXT_STS_LED(WAN_LED1, EXT_V32), + [36] = _DEF_GPIO_EXT_STS_LEDALL(LAN0_LED0), + [37] = _DEF_GPIO_EXT_STS_LEDALL(LAN0_LED1), + [38] = _DEF_GPIO_EXT_STS_LEDALL(LAN1_LED0), + [39] = _DEF_GPIO_EXT_STS_LEDALL(LAN1_LED1), + [40] = _DEF_GPIO_EXT_STS_LEDALL(LAN2_LED0), + [41] = _DEF_GPIO_EXT_STS_LEDALL(LAN2_LED1), + [42] = _DEF_GPIO_EXT_STS_LEDALL(LAN3_LED0), + [43] = _DEF_GPIO_EXT_STS_LEDALL(LAN3_LED1), + [44] = _DEF_GPIO_EXT_STS_LEDALL(LAN4_LED0), + [45] = _DEF_GPIO_EXT_STS_LEDALL(LAN4_LED1), + [46] = _DEF_GPIO_EXT_STS_LEDALL(LAN5_LED0), + [47] = _DEF_GPIO_EXT_STS_LEDALL(LAN5_LED1), + + /* GPIOs with value read from the 16-bit wide extended control status */ + [48] = _DEF_GPIO_EXT_CTL(nRES_MMC, PERIPH_MCU), + [49] = _DEF_GPIO_EXT_CTL(nRES_LAN, PERIPH_MCU), + [50] = _DEF_GPIO_EXT_CTL(nRES_PHY, PERIPH_MCU), + [51] = _DEF_GPIO_EXT_CTL(nPERST0, PERIPH_MCU), + [52] = _DEF_GPIO_EXT_CTL(nPERST1, PERIPH_MCU), + [53] = _DEF_GPIO_EXT_CTL(nPERST2, PERIPH_MCU), + [54] = _DEF_GPIO_EXT_CTL(PHY_SFP, PERIPH_MCU), + [56] = _DEF_GPIO_EXT_CTL(nVHV_CTRL, PERIPH_MCU), +}; + +/* mapping from interrupts to indexes of GPIOs in the omnia_gpios array */ +const u8 omnia_int_to_gpio_idx[32] = { + [__bf_shf(OMNIA_INT_CARD_DET)] = 4, + [__bf_shf(OMNIA_INT_MSATA_IND)] = 5, + [__bf_shf(OMNIA_INT_USB30_OVC)] = 6, + [__bf_shf(OMNIA_INT_USB31_OVC)] = 7, + [__bf_shf(OMNIA_INT_BUTTON_PRESSED)] = 12, + [__bf_shf(OMNIA_INT_TRNG)] = 13, + [__bf_shf(OMNIA_INT_MESSAGE_SIGNED)] = 14, + [__bf_shf(OMNIA_INT_SFP_nDET)] = 16, + [__bf_shf(OMNIA_INT_BRIGHTNESS_CHANGED)] = 11, + [__bf_shf(OMNIA_INT_WLAN0_MSATA_LED)] = 28, + [__bf_shf(OMNIA_INT_WLAN1_LED)] = 29, + [__bf_shf(OMNIA_INT_WLAN2_LED)] = 30, + [__bf_shf(OMNIA_INT_WPAN0_LED)] = 31, + [__bf_shf(OMNIA_INT_WPAN1_LED)] = 32, + [__bf_shf(OMNIA_INT_WPAN2_LED)] = 33, + [__bf_shf(OMNIA_INT_WAN_LED0)] = 34, + [__bf_shf(OMNIA_INT_WAN_LED1)] = 35, + [__bf_shf(OMNIA_INT_LAN0_LED0)] = 36, + [__bf_shf(OMNIA_INT_LAN0_LED1)] = 37, + [__bf_shf(OMNIA_INT_LAN1_LED0)] = 38, + [__bf_shf(OMNIA_INT_LAN1_LED1)] = 39, + [__bf_shf(OMNIA_INT_LAN2_LED0)] = 40, + [__bf_shf(OMNIA_INT_LAN2_LED1)] = 41, + [__bf_shf(OMNIA_INT_LAN3_LED0)] = 42, + [__bf_shf(OMNIA_INT_LAN3_LED1)] = 43, + [__bf_shf(OMNIA_INT_LAN4_LED0)] = 44, + [__bf_shf(OMNIA_INT_LAN4_LED1)] = 45, + [__bf_shf(OMNIA_INT_LAN5_LED0)] = 46, + [__bf_shf(OMNIA_INT_LAN5_LED1)] = 47, +}; + +/* index of PHY_SFP GPIO in the omnia_gpios array */ +#define OMNIA_GPIO_PHY_SFP_OFFSET 54 + +static int omnia_ctl_cmd_locked(struct omnia_mcu *mcu, u8 cmd, u16 val, u16 mask) +{ + unsigned int len; + u8 buf[5]; + + buf[0] = cmd; + + switch (cmd) { + case OMNIA_CMD_GENERAL_CONTROL: + buf[1] = val; + buf[2] = mask; + len = 3; + break; + + case OMNIA_CMD_EXT_CONTROL: + put_unaligned_le16(val, &buf[1]); + put_unaligned_le16(mask, &buf[3]); + len = 5; + break; + + default: + BUG(); + } + + return omnia_cmd_write(mcu->client, buf, len); +} + +static int omnia_ctl_cmd(struct omnia_mcu *mcu, u8 cmd, u16 val, u16 mask) +{ + guard(mutex)(&mcu->lock); + + return omnia_ctl_cmd_locked(mcu, cmd, val, mask); +} + +static int omnia_gpio_request(struct gpio_chip *gc, unsigned int offset) +{ + if (!omnia_gpios[offset].cmd) + return -EINVAL; + + return 0; +} + +static int omnia_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + if (offset == OMNIA_GPIO_PHY_SFP_OFFSET) { + int val; + + scoped_guard(mutex, &mcu->lock) { + val = omnia_cmd_read_bit(mcu->client, + OMNIA_CMD_GET_EXT_CONTROL_STATUS, + OMNIA_EXT_CTL_PHY_SFP_AUTO); + if (val < 0) + return val; + } + + if (val) + return GPIO_LINE_DIRECTION_IN; + + return GPIO_LINE_DIRECTION_OUT; + } + + if (omnia_gpios[offset].ctl_cmd) + return GPIO_LINE_DIRECTION_OUT; + + return GPIO_LINE_DIRECTION_IN; +} + +static int omnia_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + const struct omnia_gpio *gpio = &omnia_gpios[offset]; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + if (offset == OMNIA_GPIO_PHY_SFP_OFFSET) + return omnia_ctl_cmd(mcu, OMNIA_CMD_EXT_CONTROL, + OMNIA_EXT_CTL_PHY_SFP_AUTO, + OMNIA_EXT_CTL_PHY_SFP_AUTO); + + if (gpio->ctl_cmd) + return -ENOTSUPP; + + return 0; +} + +static int omnia_gpio_direction_output(struct gpio_chip *gc, + unsigned int offset, int value) +{ + const struct omnia_gpio *gpio = &omnia_gpios[offset]; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + u16 val, mask; + + if (!gpio->ctl_cmd) + return -ENOTSUPP; + + mask = BIT(gpio->ctl_bit); + val = value ? mask : 0; + + if (offset == OMNIA_GPIO_PHY_SFP_OFFSET) + mask |= OMNIA_EXT_CTL_PHY_SFP_AUTO; + + return omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask); +} + +static int omnia_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + const struct omnia_gpio *gpio = &omnia_gpios[offset]; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + /* + * If firmware does not support the new interrupt API, we are informed + * of every change of the status word by an interrupt from MCU and save + * its value in the interrupt service routine. Simply return the saved + * value. + */ + if (gpio->cmd == OMNIA_CMD_GET_STATUS_WORD && + !(mcu->features & OMNIA_FEAT_NEW_INT_API)) + return test_bit(gpio->bit, &mcu->last_status); + + guard(mutex)(&mcu->lock); + + /* + * If firmware does support the new interrupt API, we may have cached + * the value of a GPIO in the interrupt service routine. If not, read + * the relevant bit now. + */ + if (is_int_bit_valid(gpio) && test_bit(gpio->int_bit, &mcu->is_cached)) + return test_bit(gpio->int_bit, &mcu->cached); + + return omnia_cmd_read_bit(mcu->client, gpio->cmd, BIT(gpio->bit)); +} + +static unsigned long * +_relevant_field_for_sts_cmd(u8 cmd, unsigned long *sts, unsigned long *ext_sts, + unsigned long *ext_ctl) +{ + switch (cmd) { + case OMNIA_CMD_GET_STATUS_WORD: + return sts; + case OMNIA_CMD_GET_EXT_STATUS_DWORD: + return ext_sts; + case OMNIA_CMD_GET_EXT_CONTROL_STATUS: + return ext_ctl; + default: + return NULL; + } +} + +static int omnia_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) +{ + unsigned long sts = 0, ext_sts = 0, ext_ctl = 0, *field; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + struct i2c_client *client = mcu->client; + unsigned int i; + int err; + + /* determine which bits to read from the 3 possible commands */ + for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) { + field = _relevant_field_for_sts_cmd(omnia_gpios[i].cmd, + &sts, &ext_sts, &ext_ctl); + if (!field) + continue; + + __set_bit(omnia_gpios[i].bit, field); + } + + guard(mutex)(&mcu->lock); + + if (mcu->features & OMNIA_FEAT_NEW_INT_API) { + /* read relevant bits from status */ + err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_STATUS_WORD, + sts, &sts); + if (err) + return err; + } else { + /* + * Use status word value cached in the interrupt service routine + * if firmware does not support the new interrupt API. + */ + sts = mcu->last_status; + } + + /* read relevant bits from extended status */ + err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_EXT_STATUS_DWORD, + ext_sts, &ext_sts); + if (err) + return err; + + /* read relevant bits from extended control */ + err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_EXT_CONTROL_STATUS, + ext_ctl, &ext_ctl); + if (err) + return err; + + /* assign relevant bits in result */ + for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) { + field = _relevant_field_for_sts_cmd(omnia_gpios[i].cmd, + &sts, &ext_sts, &ext_ctl); + if (!field) + continue; + + __assign_bit(i, bits, test_bit(omnia_gpios[i].bit, field)); + } + + return 0; +} + +static void omnia_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + const struct omnia_gpio *gpio = &omnia_gpios[offset]; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + u16 val, mask; + + if (!gpio->ctl_cmd) + return; + + mask = BIT(gpio->ctl_bit); + val = value ? mask : 0; + + omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask); +} + +static void omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) +{ + unsigned long ctl = 0, ctl_mask = 0, ext_ctl = 0, ext_ctl_mask = 0; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + unsigned int i; + + for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) { + unsigned long *field, *field_mask; + u8 bit = omnia_gpios[i].ctl_bit; + + switch (omnia_gpios[i].ctl_cmd) { + case OMNIA_CMD_GENERAL_CONTROL: + field = &ctl; + field_mask = &ctl_mask; + break; + case OMNIA_CMD_EXT_CONTROL: + field = &ext_ctl; + field_mask = &ext_ctl_mask; + break; + default: + field = field_mask = NULL; + break; + } + + if (!field) + continue; + + __set_bit(bit, field_mask); + __assign_bit(bit, field, test_bit(i, bits)); + } + + guard(mutex)(&mcu->lock); + + if (ctl_mask) + omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL, + ctl, ctl_mask); + + if (ext_ctl_mask) + omnia_ctl_cmd_locked(mcu, OMNIA_CMD_EXT_CONTROL, + ext_ctl, ext_ctl_mask); +} + +static bool omnia_gpio_available(struct omnia_mcu *mcu, + const struct omnia_gpio *gpio) +{ + if (gpio->feat_mask) + return (mcu->features & gpio->feat_mask) == gpio->feat; + + if (gpio->feat) + return mcu->features & gpio->feat; + + return true; +} + +static int omnia_gpio_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + for (unsigned int i = 0; i < ngpios; i++) { + const struct omnia_gpio *gpio = &omnia_gpios[i]; + + if (gpio->cmd || is_int_bit_valid(gpio)) + __assign_bit(i, valid_mask, + omnia_gpio_available(mcu, gpio)); + else + __clear_bit(i, valid_mask); + } + + return 0; +} + +static int omnia_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + u32 bank, gpio; + + if (WARN_ON(gpiospec->args_count != 3)) + return -EINVAL; + + if (flags) + *flags = gpiospec->args[2]; + + bank = gpiospec->args[0]; + gpio = gpiospec->args[1]; + + switch (bank) { + case 0: + return gpio < 16 ? gpio : -EINVAL; + case 1: + return gpio < 32 ? 16 + gpio : -EINVAL; + case 2: + return gpio < 16 ? 48 + gpio : -EINVAL; + default: + return -EINVAL; + } +} + +static void omnia_irq_shutdown(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u8 bit = omnia_gpios[hwirq].int_bit; + + __clear_bit(bit, &mcu->rising); + __clear_bit(bit, &mcu->falling); +} + +static void omnia_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u8 bit = omnia_gpios[hwirq].int_bit; + + if (!omnia_gpios[hwirq].cmd) + __clear_bit(bit, &mcu->rising); + __clear_bit(bit, &mcu->mask); + gpiochip_disable_irq(gc, hwirq); +} + +static void omnia_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u8 bit = omnia_gpios[hwirq].int_bit; + + gpiochip_enable_irq(gc, hwirq); + __set_bit(bit, &mcu->mask); + if (!omnia_gpios[hwirq].cmd) + __set_bit(bit, &mcu->rising); +} + +static int omnia_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct device *dev = &mcu->client->dev; + u8 bit = omnia_gpios[hwirq].int_bit; + + if (!(type & IRQ_TYPE_EDGE_BOTH)) { + dev_err(dev, "irq %u: unsupported type %u\n", d->irq, type); + return -EINVAL; + } + + __assign_bit(bit, &mcu->rising, type & IRQ_TYPE_EDGE_RISING); + __assign_bit(bit, &mcu->falling, type & IRQ_TYPE_EDGE_FALLING); + + return 0; +} + +static void omnia_irq_bus_lock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + /* nothing to do if MCU firmware does not support new interrupt API */ + if (!(mcu->features & OMNIA_FEAT_NEW_INT_API)) + return; + + mutex_lock(&mcu->lock); +} + +/** + * omnia_mask_interleave - Interleaves the bytes from @rising and @falling + * @dst: the destination u8 array of interleaved bytes + * @rising: rising mask + * @falling: falling mask + * + * Interleaves the little-endian bytes from @rising and @falling words. + * + * If @rising = (r0, r1, r2, r3) and @falling = (f0, f1, f2, f3), the result is + * @dst = (r0, f0, r1, f1, r2, f2, r3, f3). + * + * The MCU receives an interrupt mask and reports a pending interrupt bitmap in + * this interleaved format. The rationale behind this is that the low-indexed + * bits are more important - in many cases, the user will be interested only in + * interrupts with indexes 0 to 7, and so the system can stop reading after + * first 2 bytes (r0, f0), to save time on the slow I2C bus. + * + * Feel free to remove this function and its inverse, omnia_mask_deinterleave, + * and use an appropriate bitmap_*() function once such a function exists. + */ +static void +omnia_mask_interleave(u8 *dst, unsigned long rising, unsigned long falling) +{ + for (unsigned int i = 0; i < sizeof(u32); i++) { + dst[2 * i] = rising >> (8 * i); + dst[2 * i + 1] = falling >> (8 * i); + } +} + +/** + * omnia_mask_deinterleave - Deinterleaves the bytes into @rising and @falling + * @src: the source u8 array containing the interleaved bytes + * @rising: pointer where to store the rising mask gathered from @src + * @falling: pointer where to store the falling mask gathered from @src + * + * This is the inverse function to omnia_mask_interleave. + */ +static void omnia_mask_deinterleave(const u8 *src, unsigned long *rising, + unsigned long *falling) +{ + *rising = *falling = 0; + + for (unsigned int i = 0; i < sizeof(u32); i++) { + *rising |= src[2 * i] << (8 * i); + *falling |= src[2 * i + 1] << (8 * i); + } +} + +static void omnia_irq_bus_sync_unlock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + struct device *dev = &mcu->client->dev; + u8 cmd[1 + OMNIA_CMD_INT_ARG_LEN]; + unsigned long rising, falling; + int err; + + /* nothing to do if MCU firmware does not support new interrupt API */ + if (!(mcu->features & OMNIA_FEAT_NEW_INT_API)) + return; + + cmd[0] = OMNIA_CMD_SET_INT_MASK; + + rising = mcu->rising & mcu->mask; + falling = mcu->falling & mcu->mask; + + /* interleave the rising and falling bytes into the command arguments */ + omnia_mask_interleave(&cmd[1], rising, falling); + + dev_dbg(dev, "set int mask %8ph\n", &cmd[1]); + + err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd)); + if (err) { + dev_err(dev, "Cannot set mask: %d\n", err); + goto unlock; + } + + /* + * Remember which GPIOs have both rising and falling interrupts enabled. + * For those we will cache their value so that .get() method is faster. + * We also need to forget cached values of GPIOs that aren't cached + * anymore. + */ + mcu->both = rising & falling; + mcu->is_cached &= mcu->both; + +unlock: + mutex_unlock(&mcu->lock); +} + +static const struct irq_chip omnia_mcu_irq_chip = { + .name = "Turris Omnia MCU interrupts", + .irq_shutdown = omnia_irq_shutdown, + .irq_mask = omnia_irq_mask, + .irq_unmask = omnia_irq_unmask, + .irq_set_type = omnia_irq_set_type, + .irq_bus_lock = omnia_irq_bus_lock, + .irq_bus_sync_unlock = omnia_irq_bus_sync_unlock, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static void omnia_irq_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + for (unsigned int i = 0; i < ngpios; i++) { + const struct omnia_gpio *gpio = &omnia_gpios[i]; + + if (is_int_bit_valid(gpio)) + __assign_bit(i, valid_mask, + omnia_gpio_available(mcu, gpio)); + else + __clear_bit(i, valid_mask); + } +} + +static int omnia_irq_init_hw(struct gpio_chip *gc) +{ + struct omnia_mcu *mcu = gpiochip_get_data(gc); + u8 cmd[1 + OMNIA_CMD_INT_ARG_LEN] = {}; + + cmd[0] = OMNIA_CMD_SET_INT_MASK; + + return omnia_cmd_write(mcu->client, cmd, sizeof(cmd)); +} + +/* + * Determine how many bytes we need to read from the reply to the + * OMNIA_CMD_GET_INT_AND_CLEAR command in order to retrieve all unmasked + * interrupts. + */ +static unsigned int +omnia_irq_compute_pending_length(unsigned long rising, unsigned long falling) +{ + return max(omnia_compute_reply_length(rising, true, 0), + omnia_compute_reply_length(falling, true, 1)); +} + +static bool omnia_irq_read_pending_new(struct omnia_mcu *mcu, + unsigned long *pending) +{ + struct device *dev = &mcu->client->dev; + u8 reply[OMNIA_CMD_INT_ARG_LEN] = {}; + unsigned long rising, falling; + unsigned int len; + int err; + + len = omnia_irq_compute_pending_length(mcu->rising & mcu->mask, + mcu->falling & mcu->mask); + if (!len) + return false; + + guard(mutex)(&mcu->lock); + + err = omnia_cmd_read(mcu->client, OMNIA_CMD_GET_INT_AND_CLEAR, reply, + len); + if (err) { + dev_err(dev, "Cannot read pending IRQs: %d\n", err); + return false; + } + + /* deinterleave the reply bytes into rising and falling */ + omnia_mask_deinterleave(reply, &rising, &falling); + + rising &= mcu->mask; + falling &= mcu->mask; + *pending = rising | falling; + + /* cache values for GPIOs that have both edges enabled */ + mcu->is_cached &= ~(rising & falling); + mcu->is_cached |= mcu->both & (rising ^ falling); + mcu->cached = (mcu->cached | rising) & ~falling; + + return true; +} + +static int omnia_read_status_word_old_fw(struct omnia_mcu *mcu, + unsigned long *status) +{ + u16 raw_status; + int err; + + err = omnia_cmd_read_u16(mcu->client, OMNIA_CMD_GET_STATUS_WORD, + &raw_status); + if (err) + return err; + + /* + * Old firmware has a bug wherein it never resets the USB port + * overcurrent bits back to zero. Ignore them. + */ + *status = raw_status & ~(OMNIA_STS_USB30_OVC | OMNIA_STS_USB31_OVC); + + return 0; +} + +static void button_release_emul_fn(struct work_struct *work) +{ + struct omnia_mcu *mcu = container_of(to_delayed_work(work), + struct omnia_mcu, + button_release_emul_work); + + mcu->button_pressed_emul = false; + generic_handle_irq_safe(mcu->client->irq); +} + +static void +fill_int_from_sts(unsigned long *rising, unsigned long *falling, + unsigned long rising_sts, unsigned long falling_sts, + unsigned long sts_bit, unsigned long int_bit) +{ + if (rising_sts & sts_bit) + *rising |= int_bit; + if (falling_sts & sts_bit) + *falling |= int_bit; +} + +static bool omnia_irq_read_pending_old(struct omnia_mcu *mcu, + unsigned long *pending) +{ + unsigned long status, rising_sts, falling_sts, rising, falling; + struct device *dev = &mcu->client->dev; + int err; + + guard(mutex)(&mcu->lock); + + err = omnia_read_status_word_old_fw(mcu, &status); + if (err) { + dev_err(dev, "Cannot read pending IRQs: %d\n", err); + return false; + } + + /* + * The old firmware triggers an interrupt whenever status word changes, + * but does not inform about which bits rose or fell. We need to compute + * this here by comparing with the last status word value. + * + * The OMNIA_STS_BUTTON_PRESSED bit needs special handling, because the + * old firmware clears the OMNIA_STS_BUTTON_PRESSED bit on successful + * completion of the OMNIA_CMD_GET_STATUS_WORD command, resulting in + * another interrupt: + * - first we get an interrupt, we read the status word where + * OMNIA_STS_BUTTON_PRESSED is present, + * - MCU clears the OMNIA_STS_BUTTON_PRESSED bit because we read the + * status word, + * - we get another interrupt because the status word changed again + * (the OMNIA_STS_BUTTON_PRESSED bit was cleared). + * + * The gpiolib-cdev, gpiolib-sysfs and gpio-keys input driver all call + * the gpiochip's .get() method after an edge event on a requested GPIO + * occurs. + * + * We ensure that the .get() method reads 1 for the button GPIO for some + * time. + */ + + if (status & OMNIA_STS_BUTTON_PRESSED) { + mcu->button_pressed_emul = true; + mod_delayed_work(system_wq, &mcu->button_release_emul_work, + msecs_to_jiffies(FRONT_BUTTON_RELEASE_DELAY_MS)); + } else if (mcu->button_pressed_emul) { + status |= OMNIA_STS_BUTTON_PRESSED; + } + + rising_sts = ~mcu->last_status & status; + falling_sts = mcu->last_status & ~status; + + mcu->last_status = status; + + /* + * Fill in the relevant interrupt bits from status bits for CARD_DET, + * MSATA_IND and BUTTON_PRESSED. + */ + rising = 0; + falling = 0; + fill_int_from_sts(&rising, &falling, rising_sts, falling_sts, + OMNIA_STS_CARD_DET, OMNIA_INT_CARD_DET); + fill_int_from_sts(&rising, &falling, rising_sts, falling_sts, + OMNIA_STS_MSATA_IND, OMNIA_INT_MSATA_IND); + fill_int_from_sts(&rising, &falling, rising_sts, falling_sts, + OMNIA_STS_BUTTON_PRESSED, OMNIA_INT_BUTTON_PRESSED); + + /* Use only bits that are enabled */ + rising &= mcu->rising & mcu->mask; + falling &= mcu->falling & mcu->mask; + *pending = rising | falling; + + return true; +} + +static bool omnia_irq_read_pending(struct omnia_mcu *mcu, + unsigned long *pending) +{ + if (mcu->features & OMNIA_FEAT_NEW_INT_API) + return omnia_irq_read_pending_new(mcu, pending); + else + return omnia_irq_read_pending_old(mcu, pending); +} + +static irqreturn_t omnia_irq_thread_handler(int irq, void *dev_id) +{ + struct omnia_mcu *mcu = dev_id; + struct irq_domain *domain; + unsigned long pending; + unsigned int i; + + if (!omnia_irq_read_pending(mcu, &pending)) + return IRQ_NONE; + + domain = mcu->gc.irq.domain; + + for_each_set_bit(i, &pending, 32) { + unsigned int nested_irq; + + nested_irq = irq_find_mapping(domain, omnia_int_to_gpio_idx[i]); + + handle_nested_irq(nested_irq); + } + + return IRQ_RETVAL(pending); +} + +static const char * const front_button_modes[] = { "mcu", "cpu" }; + +static ssize_t front_button_mode_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + int val; + + if (mcu->features & OMNIA_FEAT_NEW_INT_API) { + val = omnia_cmd_read_bit(mcu->client, OMNIA_CMD_GET_STATUS_WORD, + OMNIA_STS_BUTTON_MODE); + if (val < 0) + return val; + } else { + val = !!(mcu->last_status & OMNIA_STS_BUTTON_MODE); + } + + return sysfs_emit(buf, "%s\n", front_button_modes[val]); +} + +static ssize_t front_button_mode_store(struct device *dev, + struct device_attribute *a, + const char *buf, size_t count) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + int err, i; + + i = sysfs_match_string(front_button_modes, buf); + if (i < 0) + return i; + + err = omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL, + i ? OMNIA_CTL_BUTTON_MODE : 0, + OMNIA_CTL_BUTTON_MODE); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(front_button_mode); + +static struct attribute *omnia_mcu_gpio_attrs[] = { + &dev_attr_front_button_mode.attr, + NULL +}; + +const struct attribute_group omnia_mcu_gpio_group = { + .attrs = omnia_mcu_gpio_attrs, +}; + +int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu) +{ + bool new_api = mcu->features & OMNIA_FEAT_NEW_INT_API; + struct device *dev = &mcu->client->dev; + unsigned long irqflags; + int err; + + err = devm_mutex_init(dev, &mcu->lock); + if (err) + return err; + + mcu->gc.request = omnia_gpio_request; + mcu->gc.get_direction = omnia_gpio_get_direction; + mcu->gc.direction_input = omnia_gpio_direction_input; + mcu->gc.direction_output = omnia_gpio_direction_output; + mcu->gc.get = omnia_gpio_get; + mcu->gc.get_multiple = omnia_gpio_get_multiple; + mcu->gc.set = omnia_gpio_set; + mcu->gc.set_multiple = omnia_gpio_set_multiple; + mcu->gc.init_valid_mask = omnia_gpio_init_valid_mask; + mcu->gc.can_sleep = true; + mcu->gc.names = omnia_mcu_gpio_templates; + mcu->gc.base = -1; + mcu->gc.ngpio = ARRAY_SIZE(omnia_gpios); + mcu->gc.label = "Turris Omnia MCU GPIOs"; + mcu->gc.parent = dev; + mcu->gc.owner = THIS_MODULE; + mcu->gc.of_gpio_n_cells = 3; + mcu->gc.of_xlate = omnia_gpio_of_xlate; + + gpio_irq_chip_set_chip(&mcu->gc.irq, &omnia_mcu_irq_chip); + /* This will let us handle the parent IRQ in the driver */ + mcu->gc.irq.parent_handler = NULL; + mcu->gc.irq.num_parents = 0; + mcu->gc.irq.parents = NULL; + mcu->gc.irq.default_type = IRQ_TYPE_NONE; + mcu->gc.irq.handler = handle_bad_irq; + mcu->gc.irq.threaded = true; + if (new_api) + mcu->gc.irq.init_hw = omnia_irq_init_hw; + mcu->gc.irq.init_valid_mask = omnia_irq_init_valid_mask; + + err = devm_gpiochip_add_data(dev, &mcu->gc, mcu); + if (err) + return dev_err_probe(dev, err, "Cannot add GPIO chip\n"); + + /* + * Before requesting the interrupt, if firmware does not support the new + * interrupt API, we need to cache the value of the status word, so that + * when it changes, we may compare the new value with the cached one in + * the interrupt handler. + */ + if (!new_api) { + err = omnia_read_status_word_old_fw(mcu, &mcu->last_status); + if (err) + return dev_err_probe(dev, err, + "Cannot read status word\n"); + + INIT_DELAYED_WORK(&mcu->button_release_emul_work, + button_release_emul_fn); + } + + irqflags = IRQF_ONESHOT; + if (new_api) + irqflags |= IRQF_TRIGGER_LOW; + else + irqflags |= IRQF_TRIGGER_FALLING; + + err = devm_request_threaded_irq(dev, mcu->client->irq, NULL, + omnia_irq_thread_handler, irqflags, + "turris-omnia-mcu", mcu); + if (err) + return dev_err_probe(dev, err, "Cannot request IRQ\n"); + + if (!new_api) { + /* + * The button_release_emul_work has to be initialized before the + * thread is requested, and on driver remove it needs to be + * canceled before the thread is freed. Therefore we can't use + * devm_delayed_work_autocancel() directly, because the order + * devm_delayed_work_autocancel(); + * devm_request_threaded_irq(); + * would cause improper release order: + * free_irq(); + * cancel_delayed_work_sync(); + * Instead we first initialize the work above, and only now + * after IRQ is requested we add the work devm action. + */ + err = devm_add_action(dev, devm_delayed_work_drop, + &mcu->button_release_emul_work); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c b/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c new file mode 100644 index 000000000000..0e8ab15b6037 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU system off and RTC wakeup driver + * + * This is not a true RTC driver (in the sense that it does not provide a + * real-time clock), rather the MCU implements a wakeup from powered off state + * at a specified time relative to MCU boot, and we expose this feature via RTC + * alarm, so that it can be used via the rtcwake command, which is the standard + * Linux command for this. + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/kstrtox.h> +#include <linux/reboot.h> +#include <linux/rtc.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +static int omnia_get_uptime_wakeup(const struct i2c_client *client, u32 *uptime, + u32 *wakeup) +{ + __le32 reply[2]; + int err; + + err = omnia_cmd_read(client, OMNIA_CMD_GET_UPTIME_AND_WAKEUP, reply, + sizeof(reply)); + if (err) + return err; + + if (uptime) + *uptime = le32_to_cpu(reply[0]); + + if (wakeup) + *wakeup = le32_to_cpu(reply[1]); + + return 0; +} + +static int omnia_read_time(struct device *dev, struct rtc_time *tm) +{ + u32 uptime; + int err; + + err = omnia_get_uptime_wakeup(to_i2c_client(dev), &uptime, NULL); + if (err) + return err; + + rtc_time64_to_tm(uptime, tm); + + return 0; +} + +static int omnia_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct i2c_client *client = to_i2c_client(dev); + struct omnia_mcu *mcu = i2c_get_clientdata(client); + u32 wakeup; + int err; + + err = omnia_get_uptime_wakeup(client, NULL, &wakeup); + if (err) + return err; + + alrm->enabled = !!wakeup; + rtc_time64_to_tm(wakeup ?: mcu->rtc_alarm, &alrm->time); + + return 0; +} + +static int omnia_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct i2c_client *client = to_i2c_client(dev); + struct omnia_mcu *mcu = i2c_get_clientdata(client); + + mcu->rtc_alarm = rtc_tm_to_time64(&alrm->time); + + if (alrm->enabled) + return omnia_cmd_write_u32(client, OMNIA_CMD_SET_WAKEUP, + mcu->rtc_alarm); + + return 0; +} + +static int omnia_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct i2c_client *client = to_i2c_client(dev); + struct omnia_mcu *mcu = i2c_get_clientdata(client); + + return omnia_cmd_write_u32(client, OMNIA_CMD_SET_WAKEUP, + enabled ? mcu->rtc_alarm : 0); +} + +static const struct rtc_class_ops omnia_rtc_ops = { + .read_time = omnia_read_time, + .read_alarm = omnia_read_alarm, + .set_alarm = omnia_set_alarm, + .alarm_irq_enable = omnia_alarm_irq_enable, +}; + +static int omnia_power_off(struct sys_off_data *data) +{ + struct omnia_mcu *mcu = data->cb_data; + __be32 tmp; + u8 cmd[9]; + u16 arg; + int err; + + if (mcu->front_button_poweron) + arg = OMNIA_CMD_POWER_OFF_POWERON_BUTTON; + else + arg = 0; + + cmd[0] = OMNIA_CMD_POWER_OFF; + put_unaligned_le16(OMNIA_CMD_POWER_OFF_MAGIC, &cmd[1]); + put_unaligned_le16(arg, &cmd[3]); + + /* + * Although all values from and to MCU are passed in little-endian, the + * MCU's CRC unit uses big-endian CRC32 polynomial (0x04c11db7), so we + * need to use crc32_be() here. + */ + tmp = cpu_to_be32(get_unaligned_le32(&cmd[1])); + put_unaligned_le32(crc32_be(~0, (void *)&tmp, sizeof(tmp)), &cmd[5]); + + err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd)); + if (err) + dev_err(&mcu->client->dev, + "Unable to send the poweroff command: %d\n", err); + + return NOTIFY_DONE; +} + +static int omnia_restart(struct sys_off_data *data) +{ + struct omnia_mcu *mcu = data->cb_data; + u8 cmd[3]; + int err; + + cmd[0] = OMNIA_CMD_GENERAL_CONTROL; + + if (reboot_mode == REBOOT_HARD) + cmd[1] = cmd[2] = OMNIA_CTL_HARD_RST; + else + cmd[1] = cmd[2] = OMNIA_CTL_LIGHT_RST; + + err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd)); + if (err) + dev_err(&mcu->client->dev, + "Unable to send the restart command: %d\n", err); + + /* + * MCU needs a little bit to process the I2C command, otherwise it will + * do a light reset based on SOC SYSRES_OUT pin. + */ + mdelay(1); + + return NOTIFY_DONE; +} + +static ssize_t front_button_poweron_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", mcu->front_button_poweron); +} + +static ssize_t front_button_poweron_store(struct device *dev, + struct device_attribute *a, + const char *buf, size_t count) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + bool val; + int err; + + err = kstrtobool(buf, &val); + if (err) + return err; + + mcu->front_button_poweron = val; + + return count; +} +static DEVICE_ATTR_RW(front_button_poweron); + +static struct attribute *omnia_mcu_poweroff_attrs[] = { + &dev_attr_front_button_poweron.attr, + NULL +}; + +static umode_t poweroff_attrs_visible(struct kobject *kobj, struct attribute *a, + int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + if (mcu->features & OMNIA_FEAT_POWEROFF_WAKEUP) + return a->mode; + + return 0; +} + +const struct attribute_group omnia_mcu_poweroff_group = { + .attrs = omnia_mcu_poweroff_attrs, + .is_visible = poweroff_attrs_visible, +}; + +int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu) +{ + struct device *dev = &mcu->client->dev; + int err; + + /* MCU restart is always available */ + err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART, + SYS_OFF_PRIO_FIRMWARE, + omnia_restart, mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot register system restart handler\n"); + + /* + * Poweroff and wakeup are available only if POWEROFF_WAKEUP feature is + * present. + */ + if (!(mcu->features & OMNIA_FEAT_POWEROFF_WAKEUP)) + return 0; + + err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE, + omnia_power_off, mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot register system power off handler\n"); + + mcu->rtcdev = devm_rtc_allocate_device(dev); + if (IS_ERR(mcu->rtcdev)) + return dev_err_probe(dev, PTR_ERR(mcu->rtcdev), + "Cannot allocate RTC device\n"); + + mcu->rtcdev->ops = &omnia_rtc_ops; + mcu->rtcdev->range_max = U32_MAX; + set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, mcu->rtcdev->features); + + err = devm_rtc_register_device(mcu->rtcdev); + if (err) + return dev_err_probe(dev, err, "Cannot register RTC device\n"); + + mcu->front_button_poweron = true; + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c new file mode 100644 index 000000000000..ad953fb3c37a --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU TRNG driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/bitfield.h> +#include <linux/completion.h> +#include <linux/container_of.h> +#include <linux/errno.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/hw_random.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/minmax.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +#define OMNIA_CMD_TRNG_MAX_ENTROPY_LEN 64 + +static irqreturn_t omnia_trng_irq_handler(int irq, void *dev_id) +{ + struct omnia_mcu *mcu = dev_id; + + complete(&mcu->trng_entropy_ready); + + return IRQ_HANDLED; +} + +static int omnia_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct omnia_mcu *mcu = container_of(rng, struct omnia_mcu, trng); + u8 reply[1 + OMNIA_CMD_TRNG_MAX_ENTROPY_LEN]; + int err, bytes; + + if (!wait && !completion_done(&mcu->trng_entropy_ready)) + return 0; + + do { + if (wait_for_completion_interruptible(&mcu->trng_entropy_ready)) + return -ERESTARTSYS; + + err = omnia_cmd_read(mcu->client, + OMNIA_CMD_TRNG_COLLECT_ENTROPY, + reply, sizeof(reply)); + if (err) + return err; + + bytes = min3(reply[0], max, OMNIA_CMD_TRNG_MAX_ENTROPY_LEN); + } while (wait && !bytes); + + memcpy(data, &reply[1], bytes); + + return bytes; +} + +int omnia_mcu_register_trng(struct omnia_mcu *mcu) +{ + struct device *dev = &mcu->client->dev; + u8 irq_idx, dummy; + int irq, err; + + if (!(mcu->features & OMNIA_FEAT_TRNG)) + return 0; + + irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)]; + irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx)); + if (!irq) + return dev_err_probe(dev, -ENXIO, "Cannot get TRNG IRQ\n"); + + /* + * If someone else cleared the TRNG interrupt but did not read the + * entropy, a new interrupt won't be generated, and entropy collection + * will be stuck. Ensure an interrupt will be generated by executing + * the collect entropy command (and discarding the result). + */ + err = omnia_cmd_read(mcu->client, OMNIA_CMD_TRNG_COLLECT_ENTROPY, + &dummy, 1); + if (err) + return err; + + init_completion(&mcu->trng_entropy_ready); + + err = devm_request_threaded_irq(dev, irq, NULL, omnia_trng_irq_handler, + IRQF_ONESHOT, "turris-omnia-mcu-trng", + mcu); + if (err) + return dev_err_probe(dev, err, "Cannot request TRNG IRQ\n"); + + mcu->trng.name = "turris-omnia-mcu-trng"; + mcu->trng.read = omnia_trng_read; + + err = devm_hwrng_register(dev, &mcu->trng); + if (err) + return dev_err_probe(dev, err, "Cannot register TRNG\n"); + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu-watchdog.c b/drivers/platform/cznic/turris-omnia-mcu-watchdog.c new file mode 100644 index 000000000000..3ad146ec1d80 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-watchdog.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU watchdog driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/units.h> +#include <linux/watchdog.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +#define WATCHDOG_TIMEOUT 120 + +static unsigned int timeout; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int omnia_wdt_start(struct watchdog_device *wdt) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + + return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 1); +} + +static int omnia_wdt_stop(struct watchdog_device *wdt) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + + return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 0); +} + +static int omnia_wdt_ping(struct watchdog_device *wdt) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + + return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 1); +} + +static int omnia_wdt_set_timeout(struct watchdog_device *wdt, + unsigned int timeout) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + + return omnia_cmd_write_u16(mcu->client, OMNIA_CMD_SET_WDT_TIMEOUT, + timeout * DECI); +} + +static unsigned int omnia_wdt_get_timeleft(struct watchdog_device *wdt) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + u16 timeleft; + int err; + + err = omnia_cmd_read_u16(mcu->client, OMNIA_CMD_GET_WDT_TIMELEFT, + &timeleft); + if (err) { + dev_err(&mcu->client->dev, "Cannot get watchdog timeleft: %d\n", + err); + return 0; + } + + return timeleft / DECI; +} + +static const struct watchdog_info omnia_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "Turris Omnia MCU Watchdog", +}; + +static const struct watchdog_ops omnia_wdt_ops = { + .owner = THIS_MODULE, + .start = omnia_wdt_start, + .stop = omnia_wdt_stop, + .ping = omnia_wdt_ping, + .set_timeout = omnia_wdt_set_timeout, + .get_timeleft = omnia_wdt_get_timeleft, +}; + +int omnia_mcu_register_watchdog(struct omnia_mcu *mcu) +{ + struct device *dev = &mcu->client->dev; + u8 state; + int err; + + if (!(mcu->features & OMNIA_FEAT_WDT_PING)) + return 0; + + mcu->wdt.info = &omnia_wdt_info; + mcu->wdt.ops = &omnia_wdt_ops; + mcu->wdt.parent = dev; + mcu->wdt.min_timeout = 1; + mcu->wdt.max_timeout = 65535 / DECI; + + mcu->wdt.timeout = WATCHDOG_TIMEOUT; + watchdog_init_timeout(&mcu->wdt, timeout, dev); + + watchdog_set_drvdata(&mcu->wdt, mcu); + + omnia_wdt_set_timeout(&mcu->wdt, mcu->wdt.timeout); + + err = omnia_cmd_read_u8(mcu->client, OMNIA_CMD_GET_WATCHDOG_STATE, + &state); + if (err) + return dev_err_probe(dev, err, + "Cannot get MCU watchdog state\n"); + + if (state) + set_bit(WDOG_HW_RUNNING, &mcu->wdt.status); + + watchdog_set_nowayout(&mcu->wdt, nowayout); + watchdog_stop_on_reboot(&mcu->wdt); + err = devm_watchdog_register_device(dev, &mcu->wdt); + if (err) + return dev_err_probe(dev, err, + "Cannot register MCU watchdog\n"); + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h new file mode 100644 index 000000000000..2ca56ae13aa9 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CZ.NIC's Turris Omnia MCU driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#ifndef __TURRIS_OMNIA_MCU_H +#define __TURRIS_OMNIA_MCU_H + +#include <linux/bitops.h> +#include <linux/completion.h> +#include <linux/gpio/driver.h> +#include <linux/hw_random.h> +#include <linux/if_ether.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/watchdog.h> +#include <linux/workqueue.h> +#include <asm/byteorder.h> +#include <asm/unaligned.h> + +struct i2c_client; +struct rtc_device; + +struct omnia_mcu { + struct i2c_client *client; + const char *type; + u32 features; + + /* board information */ + u64 board_serial_number; + u8 board_first_mac[ETH_ALEN]; + u8 board_revision; + + /* GPIO chip */ + struct gpio_chip gc; + struct mutex lock; + unsigned long mask, rising, falling, both, cached, is_cached; + /* Old MCU firmware handling needs the following */ + struct delayed_work button_release_emul_work; + unsigned long last_status; + bool button_pressed_emul; + + /* RTC device for configuring wake-up */ + struct rtc_device *rtcdev; + u32 rtc_alarm; + bool front_button_poweron; + + /* MCU watchdog */ + struct watchdog_device wdt; + + /* true random number generator */ + struct hwrng trng; + struct completion trng_entropy_ready; +}; + +int omnia_cmd_write_read(const struct i2c_client *client, + void *cmd, unsigned int cmd_len, + void *reply, unsigned int reply_len); + +static inline int omnia_cmd_write(const struct i2c_client *client, void *cmd, + unsigned int len) +{ + return omnia_cmd_write_read(client, cmd, len, NULL, 0); +} + +static inline int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, + u8 val) +{ + u8 buf[2] = { cmd, val }; + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_write_u16(const struct i2c_client *client, u8 cmd, + u16 val) +{ + u8 buf[3]; + + buf[0] = cmd; + put_unaligned_le16(val, &buf[1]); + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_write_u32(const struct i2c_client *client, u8 cmd, + u32 val) +{ + u8 buf[5]; + + buf[0] = cmd; + put_unaligned_le32(val, &buf[1]); + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_read(const struct i2c_client *client, u8 cmd, + void *reply, unsigned int len) +{ + return omnia_cmd_write_read(client, &cmd, 1, reply, len); +} + +static inline unsigned int +omnia_compute_reply_length(unsigned long mask, bool interleaved, + unsigned int offset) +{ + if (!mask) + return 0; + + return ((__fls(mask) >> 3) << interleaved) + 1 + offset; +} + +/* Returns 0 on success */ +static inline int omnia_cmd_read_bits(const struct i2c_client *client, u8 cmd, + unsigned long bits, unsigned long *dst) +{ + __le32 reply; + int err; + + if (!bits) { + *dst = 0; + return 0; + } + + err = omnia_cmd_read(client, cmd, &reply, + omnia_compute_reply_length(bits, false, 0)); + if (err) + return err; + + *dst = le32_to_cpu(reply) & bits; + + return 0; +} + +static inline int omnia_cmd_read_bit(const struct i2c_client *client, u8 cmd, + unsigned long bit) +{ + unsigned long reply; + int err; + + err = omnia_cmd_read_bits(client, cmd, bit, &reply); + if (err) + return err; + + return !!reply; +} + +static inline int omnia_cmd_read_u32(const struct i2c_client *client, u8 cmd, + u32 *dst) +{ + __le32 reply; + int err; + + err = omnia_cmd_read(client, cmd, &reply, sizeof(reply)); + if (err) + return err; + + *dst = le32_to_cpu(reply); + + return 0; +} + +static inline int omnia_cmd_read_u16(const struct i2c_client *client, u8 cmd, + u16 *dst) +{ + __le16 reply; + int err; + + err = omnia_cmd_read(client, cmd, &reply, sizeof(reply)); + if (err) + return err; + + *dst = le16_to_cpu(reply); + + return 0; +} + +static inline int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd, + u8 *reply) +{ + return omnia_cmd_read(client, cmd, reply, sizeof(*reply)); +} + +extern const u8 omnia_int_to_gpio_idx[32]; +extern const struct attribute_group omnia_mcu_gpio_group; +extern const struct attribute_group omnia_mcu_poweroff_group; + +int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu); +int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu); +int omnia_mcu_register_trng(struct omnia_mcu *mcu); +int omnia_mcu_register_watchdog(struct omnia_mcu *mcu); + +#endif /* __TURRIS_OMNIA_MCU_H */ diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 3a8d8df89186..78a5aac2dcfd 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -3271,7 +3271,7 @@ static const char *find_hci_method(acpi_handle handle) */ #define QUIRK_HCI_HOTKEY_QUICKSTART BIT(1) -static const struct dmi_system_id toshiba_dmi_quirks[] = { +static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = { { /* Toshiba Portégé R700 */ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */ @@ -3299,6 +3299,7 @@ static const struct dmi_system_id toshiba_dmi_quirks[] = { }, .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART), }, + { } }; static int toshiba_acpi_add(struct acpi_device *acpi_dev) @@ -3306,8 +3307,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) struct toshiba_acpi_dev *dev; const char *hci_method; u32 dummy; - const struct dmi_system_id *dmi_id; - long quirks = 0; int ret = 0; if (toshiba_acpi) @@ -3460,16 +3459,6 @@ iio_error: } #endif - dmi_id = dmi_first_match(toshiba_dmi_quirks); - if (dmi_id) - quirks = (long)dmi_id->driver_data; - - if (turn_on_panel_on_resume == -1) - turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME); - - if (hci_hotkey_quickstart == -1) - hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART); - toshiba_wwan_available(dev); if (dev->wwan_supported) toshiba_acpi_setup_wwan_rfkill(dev); @@ -3618,10 +3607,27 @@ static struct acpi_driver toshiba_acpi_driver = { .drv.pm = &toshiba_acpi_pm, }; +static void __init toshiba_dmi_init(void) +{ + const struct dmi_system_id *dmi_id; + long quirks = 0; + + dmi_id = dmi_first_match(toshiba_dmi_quirks); + if (dmi_id) + quirks = (long)dmi_id->driver_data; + + if (turn_on_panel_on_resume == -1) + turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME); + + if (hci_hotkey_quickstart == -1) + hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART); +} + static int __init toshiba_acpi_init(void) { int ret; + toshiba_dmi_init(); toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir); if (!toshiba_proc_dir) { pr_err("Unable to create proc dir " PROC_TOSHIBA "\n"); diff --git a/drivers/pmdomain/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c index fcec6eb610e4..fbb2b4103930 100644 --- a/drivers/pmdomain/amlogic/meson-ee-pwrc.c +++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c @@ -648,4 +648,5 @@ static struct platform_driver meson_ee_pwrc_driver = { }, }; module_platform_driver(meson_ee_pwrc_driver); +MODULE_DESCRIPTION("Amlogic Meson Everything-Else Power Domains driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c index 33df520eab95..6028e91664a4 100644 --- a/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c +++ b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c @@ -376,4 +376,5 @@ static struct platform_driver meson_gx_pwrc_vpu_driver = { }, }; module_platform_driver(meson_gx_pwrc_vpu_driver); +MODULE_DESCRIPTION("Amlogic Meson GX Power Domains driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c index 4d5bda0d60fc..42ce41a2fe3a 100644 --- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c +++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c @@ -14,6 +14,8 @@ #include <dt-bindings/power/amlogic,c3-pwrc.h> #include <dt-bindings/power/meson-s4-power.h> #include <dt-bindings/power/amlogic,t7-pwrc.h> +#include <dt-bindings/power/amlogic,a4-pwrc.h> +#include <dt-bindings/power/amlogic,a5-pwrc.h> #include <linux/arm-smccc.h> #include <linux/firmware/meson/meson_sm.h> #include <linux/module.h> @@ -45,7 +47,7 @@ struct meson_secure_pwrc_domain_desc { struct meson_secure_pwrc_domain_data { unsigned int count; - struct meson_secure_pwrc_domain_desc *domains; + const struct meson_secure_pwrc_domain_desc *domains; }; static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain) @@ -109,7 +111,7 @@ static int meson_secure_pwrc_on(struct generic_pm_domain *domain) .parent = __parent, \ } -static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = { +static const struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = { SEC_PD(DSPA, 0), SEC_PD(DSPB, 0), /* UART should keep working in ATF after suspend and before resume */ @@ -136,7 +138,41 @@ static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = { SEC_PD(RSA, 0), }; -static struct meson_secure_pwrc_domain_desc c3_pwrc_domains[] = { +static const struct meson_secure_pwrc_domain_desc a4_pwrc_domains[] = { + SEC_PD(A4_AUDIO, 0), + SEC_PD(A4_SDIOA, 0), + SEC_PD(A4_EMMC, 0), + SEC_PD(A4_USB_COMB, 0), + SEC_PD(A4_ETH, 0), + SEC_PD(A4_VOUT, 0), + SEC_PD(A4_AUDIO_PDM, 0), + /* DMC is for DDR PHY ana/dig and DMC, and should be always on */ + SEC_PD(A4_DMC, GENPD_FLAG_ALWAYS_ON), + /* WRAP is secure_top, a lot of modules are included, and should be always on */ + SEC_PD(A4_SYS_WRAP, GENPD_FLAG_ALWAYS_ON), + SEC_PD(A4_AO_I2C_S, 0), + SEC_PD(A4_AO_UART, 0), + /* IR is wake up trigger source, and should be always on */ + SEC_PD(A4_AO_IR, GENPD_FLAG_ALWAYS_ON), +}; + +static const struct meson_secure_pwrc_domain_desc a5_pwrc_domains[] = { + SEC_PD(A5_NNA, 0), + SEC_PD(A5_AUDIO, 0), + SEC_PD(A5_SDIOA, 0), + SEC_PD(A5_EMMC, 0), + SEC_PD(A5_USB_COMB, 0), + SEC_PD(A5_ETH, 0), + SEC_PD(A5_RSA, 0), + SEC_PD(A5_AUDIO_PDM, 0), + /* DMC is for DDR PHY ana/dig and DMC, and should be always on */ + SEC_PD(A5_DMC, GENPD_FLAG_ALWAYS_ON), + /* WRAP is secure_top, a lot of modules are included, and should be always on */ + SEC_PD(A5_SYS_WRAP, GENPD_FLAG_ALWAYS_ON), + SEC_PD(A5_DSPA, 0), +}; + +static const struct meson_secure_pwrc_domain_desc c3_pwrc_domains[] = { SEC_PD(C3_NNA, 0), SEC_PD(C3_AUDIO, 0), SEC_PD(C3_SDIOA, 0), @@ -153,7 +189,7 @@ static struct meson_secure_pwrc_domain_desc c3_pwrc_domains[] = { SEC_PD(C3_VCODEC, 0), }; -static struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = { +static const struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = { SEC_PD(S4_DOS_HEVC, 0), SEC_PD(S4_DOS_VDEC, 0), SEC_PD(S4_VPU_HDMI, 0), @@ -165,7 +201,7 @@ static struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = { SEC_PD(S4_AUDIO, 0), }; -static struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = { +static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = { SEC_PD(T7_DSPA, 0), SEC_PD(T7_DSPB, 0), TOP_PD(T7_DOS_HCODEC, 0, PWRC_T7_NIC3_ID), @@ -311,6 +347,16 @@ static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = { .count = ARRAY_SIZE(a1_pwrc_domains), }; +static struct meson_secure_pwrc_domain_data amlogic_secure_a4_pwrc_data = { + .domains = a4_pwrc_domains, + .count = ARRAY_SIZE(a4_pwrc_domains), +}; + +static struct meson_secure_pwrc_domain_data amlogic_secure_a5_pwrc_data = { + .domains = a5_pwrc_domains, + .count = ARRAY_SIZE(a5_pwrc_domains), +}; + static struct meson_secure_pwrc_domain_data amlogic_secure_c3_pwrc_data = { .domains = c3_pwrc_domains, .count = ARRAY_SIZE(c3_pwrc_domains), @@ -332,6 +378,14 @@ static const struct of_device_id meson_secure_pwrc_match_table[] = { .data = &meson_secure_a1_pwrc_data, }, { + .compatible = "amlogic,a4-pwrc", + .data = &amlogic_secure_a4_pwrc_data, + }, + { + .compatible = "amlogic,a5-pwrc", + .data = &amlogic_secure_a5_pwrc_data, + }, + { .compatible = "amlogic,c3-pwrc", .data = &amlogic_secure_c3_pwrc_data, }, @@ -355,4 +409,5 @@ static struct platform_driver meson_secure_pwrc_driver = { }, }; module_platform_driver(meson_secure_pwrc_driver); +MODULE_DESCRIPTION("Amlogic Meson Secure Power Domains driver"); MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c index 0e05a79de82d..a7784a8bb5db 100644 --- a/drivers/pmdomain/arm/scmi_pm_domain.c +++ b/drivers/pmdomain/arm/scmi_pm_domain.c @@ -102,6 +102,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd->genpd.name = scmi_pd->name; scmi_pd->genpd.power_off = scmi_pd_power_off; scmi_pd->genpd.power_on = scmi_pd_power_on; + scmi_pd->genpd.flags = GENPD_FLAG_ACTIVE_WAKEUP; pm_genpd_init(&scmi_pd->genpd, NULL, state == SCMI_POWER_STATE_GENERIC_OFF); diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 623d15b68707..7a61aa88c061 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -588,6 +588,68 @@ void dev_pm_genpd_synced_poweroff(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff); +/** + * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain. + * + * @dev: Device for which the HW-mode should be changed. + * @enable: Value to set or unset the HW-mode. + * + * Some PM domains can rely on HW signals to control the power for a device. To + * allow a consumer driver to switch the behaviour for its device in runtime, + * which may be beneficial from a latency or energy point of view, this function + * may be called. + * + * It is assumed that the users guarantee that the genpd wouldn't be detached + * while this routine is getting called. + * + * Return: Returns 0 on success and negative error values on failures. + */ +int dev_pm_genpd_set_hwmode(struct device *dev, bool enable) +{ + struct generic_pm_domain *genpd; + int ret = 0; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return -ENODEV; + + if (!genpd->set_hwmode_dev) + return -EOPNOTSUPP; + + genpd_lock(genpd); + + if (dev_gpd_data(dev)->hw_mode == enable) + goto out; + + ret = genpd->set_hwmode_dev(genpd, dev, enable); + if (!ret) + dev_gpd_data(dev)->hw_mode = enable; + +out: + genpd_unlock(genpd); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode); + +/** + * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device. + * + * @dev: Device for which the current HW-mode setting should be fetched. + * + * This helper function allows consumer drivers to fetch the current HW mode + * setting of its the device. + * + * It is assumed that the users guarantee that the genpd wouldn't be detached + * while this routine is getting called. + * + * Return: Returns the HW mode setting of device from SW cached hw_mode. + */ +bool dev_pm_genpd_get_hwmode(struct device *dev) +{ + return dev_gpd_data(dev)->hw_mode; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode); + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -1687,6 +1749,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, gpd_data->cpu = genpd_get_cpu(genpd, base_dev); + gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false; + ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; if (ret) goto out; @@ -2079,7 +2143,7 @@ static void genpd_free_data(struct generic_pm_domain *genpd) static void genpd_lock_init(struct generic_pm_domain *genpd) { - if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { + if (genpd_is_irq_safe(genpd)) { spin_lock_init(&genpd->slock); genpd->lock_ops = &genpd_spin_ops; } else { @@ -3120,6 +3184,15 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev) seq_printf(s, "%-25s ", p); } +static void mode_status_str(struct seq_file *s, struct device *dev) +{ + struct generic_pm_domain_data *gpd_data; + + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + + seq_printf(s, "%20s", gpd_data->hw_mode ? "HW" : "SW"); +} + static void perf_status_str(struct seq_file *s, struct device *dev) { struct generic_pm_domain_data *gpd_data; @@ -3178,6 +3251,7 @@ static int genpd_summary_one(struct seq_file *s, seq_printf(s, "\n %-50s ", kobj_path); rtpm_status_str(s, pm_data->dev); perf_status_str(s, pm_data->dev); + mode_status_str(s, pm_data->dev); kfree(kobj_path); } @@ -3194,8 +3268,8 @@ static int summary_show(struct seq_file *s, void *data) int ret = 0; seq_puts(s, "domain status children performance\n"); - seq_puts(s, " /device runtime status\n"); - seq_puts(s, "----------------------------------------------------------------------------------------------\n"); + seq_puts(s, " /device runtime status managed by\n"); + seq_puts(s, "------------------------------------------------------------------------------------------------------------\n"); ret = mutex_lock_interruptible(&gpd_list_lock); if (ret) diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c index de9121ef4216..d2cb4271a1ca 100644 --- a/drivers/pmdomain/qcom/rpmhpd.c +++ b/drivers/pmdomain/qcom/rpmhpd.c @@ -40,6 +40,7 @@ * @addr: Resource address as looped up using resource name from * cmd-db * @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource + * @skip_retention_level: Indicate that retention level should not be used for the power domain */ struct rpmhpd { struct device *dev; @@ -56,6 +57,7 @@ struct rpmhpd { const char *res_name; u32 addr; bool state_synced; + bool skip_retention_level; }; struct rpmhpd_desc { @@ -173,6 +175,7 @@ static struct rpmhpd mxc = { .pd = { .name = "mxc", }, .peer = &mxc_ao, .res_name = "mxc.lvl", + .skip_retention_level = true, }; static struct rpmhpd mxc_ao = { @@ -180,6 +183,7 @@ static struct rpmhpd mxc_ao = { .active_only = true, .peer = &mxc, .res_name = "mxc.lvl", + .skip_retention_level = true, }; static struct rpmhpd nsp = { @@ -819,6 +823,9 @@ static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd) return -EINVAL; for (i = 0; i < rpmhpd->level_count; i++) { + if (rpmhpd->skip_retention_level && buf[i] == RPMH_REGULATOR_LEVEL_RETENTION) + continue; + rpmhpd->level[i] = buf[i]; /* Remember the first corner with non-zero level */ diff --git a/drivers/pmdomain/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c index 0b77f37787d5..5848e79aa438 100644 --- a/drivers/pmdomain/renesas/rmobile-sysc.c +++ b/drivers/pmdomain/renesas/rmobile-sysc.c @@ -268,9 +268,7 @@ static int __init rmobile_add_pm_domains(void __iomem *base, struct device_node *parent, struct generic_pm_domain *genpd_parent) { - struct device_node *np; - - for_each_child_of_node(parent, np) { + for_each_child_of_node_scoped(parent, np) { struct rmobile_pm_domain *pd; u32 idx = ~0; @@ -279,10 +277,8 @@ static int __init rmobile_add_pm_domains(void __iomem *base, } pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) { - of_node_put(np); + if (!pd) return -ENOMEM; - } pd->genpd.name = np->name; pd->base = base; diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 696bf77a7042..9a8e44ca9ae4 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only source "drivers/power/reset/Kconfig" +source "drivers/power/sequencing/Kconfig" source "drivers/power/supply/Kconfig" diff --git a/drivers/power/Makefile b/drivers/power/Makefile index effbf0377f32..962a2cd30a51 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_POWER_RESET) += reset/ +obj-$(CONFIG_POWER_SEQUENCING) += sequencing/ obj-$(CONFIG_POWER_SUPPLY) += supply/ diff --git a/drivers/power/sequencing/Kconfig b/drivers/power/sequencing/Kconfig new file mode 100644 index 000000000000..c9f1cdb66524 --- /dev/null +++ b/drivers/power/sequencing/Kconfig @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0-only + +menuconfig POWER_SEQUENCING + tristate "Power Sequencing support" + help + Say Y here to enable the Power Sequencing subsystem. + + This subsystem is designed to control power to devices that share + complex resources and/or require specific power sequences to be run + during power-up. + + If unsure, say no. + +if POWER_SEQUENCING + +config POWER_SEQUENCING_QCOM_WCN + tristate "Qualcomm WCN family PMU driver" + default m if ARCH_QCOM + help + Say Y here to enable the power sequencing driver for Qualcomm + WCN Bluetooth/WLAN chipsets. + + Typically, a package from the Qualcomm WCN family contains the BT + and WLAN modules whose power is controlled by the PMU module. As the + former two share the power-up sequence which is executed by the PMU, + this driver is needed for correct power control or else we'd risk not + respecting the required delays between enabling Bluetooth and WLAN. + +endif diff --git a/drivers/power/sequencing/Makefile b/drivers/power/sequencing/Makefile new file mode 100644 index 000000000000..2eec2df7912d --- /dev/null +++ b/drivers/power/sequencing/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_POWER_SEQUENCING) += pwrseq-core.o +pwrseq-core-y := core.o + +obj-$(CONFIG_POWER_SEQUENCING_QCOM_WCN) += pwrseq-qcom-wcn.o diff --git a/drivers/power/sequencing/core.c b/drivers/power/sequencing/core.c new file mode 100644 index 000000000000..9c32b07a55e7 --- /dev/null +++ b/drivers/power/sequencing/core.c @@ -0,0 +1,1105 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/idr.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/property.h> +#include <linux/pwrseq/consumer.h> +#include <linux/pwrseq/provider.h> +#include <linux/radix-tree.h> +#include <linux/rwsem.h> +#include <linux/slab.h> + +/* + * Power-sequencing framework for linux. + * + * This subsystem allows power sequence providers to register a set of targets + * that consumers may request and power-up/down. + * + * Glossary: + * + * Unit - a unit is a discreet chunk of a power sequence. For instance one unit + * may enable a set of regulators, another may enable a specific GPIO. Units + * can define dependencies in the form of other units that must be enabled + * before it itself can be. + * + * Target - a target is a set of units (composed of the "final" unit and its + * dependencies) that a consumer selects by its name when requesting a handle + * to the power sequencer. Via the dependency system, multiple targets may + * share the same parts of a power sequence but ignore parts that are + * irrelevant. + * + * Descriptor - a handle passed by the pwrseq core to every consumer that + * serves as the entry point to the provider layer. It ensures coherence + * between different users and keeps reference counting consistent. + * + * Each provider must define a .match() callback whose role is to determine + * whether a potential consumer is in fact associated with this sequencer. + * This allows creating abstraction layers on top of regular device-tree + * resources like regulators, clocks and other nodes connected to the consumer + * via phandle. + */ + +static DEFINE_IDA(pwrseq_ida); + +/* + * Protects the device list on the pwrseq bus from concurrent modifications + * but allows simultaneous read-only access. + */ +static DECLARE_RWSEM(pwrseq_sem); + +/** + * struct pwrseq_unit - Private power-sequence unit data. + * @ref: Reference count for this object. When it goes to 0, the object is + * destroyed. + * @name: Name of this target. + * @list: Link to siblings on the list of all units of a single sequencer. + * @deps: List of units on which this unit depends. + * @enable: Callback running the part of the power-on sequence provided by + * this unit. + * @disable: Callback running the part of the power-off sequence provided + * by this unit. + * @enable_count: Current number of users that enabled this unit. May be the + * consumer of the power sequencer or other units that depend + * on this one. + */ +struct pwrseq_unit { + struct kref ref; + const char *name; + struct list_head list; + struct list_head deps; + pwrseq_power_state_func enable; + pwrseq_power_state_func disable; + unsigned int enable_count; +}; + +static struct pwrseq_unit *pwrseq_unit_new(const struct pwrseq_unit_data *data) +{ + struct pwrseq_unit *unit; + + unit = kzalloc(sizeof(*unit), GFP_KERNEL); + if (!unit) + return NULL; + + unit->name = kstrdup_const(data->name, GFP_KERNEL); + if (!unit->name) { + kfree(unit); + return NULL; + } + + kref_init(&unit->ref); + INIT_LIST_HEAD(&unit->deps); + unit->enable = data->enable; + unit->disable = data->disable; + + return unit; +} + +static struct pwrseq_unit *pwrseq_unit_get(struct pwrseq_unit *unit) +{ + kref_get(&unit->ref); + + return unit; +} + +static void pwrseq_unit_release(struct kref *ref); + +static void pwrseq_unit_put(struct pwrseq_unit *unit) +{ + kref_put(&unit->ref, pwrseq_unit_release); +} + +/** + * struct pwrseq_unit_dep - Wrapper around a reference to the unit structure + * allowing to keep it on multiple dependency lists + * in different units. + * @list: Siblings on the list. + * @unit: Address of the referenced unit. + */ +struct pwrseq_unit_dep { + struct list_head list; + struct pwrseq_unit *unit; +}; + +static struct pwrseq_unit_dep *pwrseq_unit_dep_new(struct pwrseq_unit *unit) +{ + struct pwrseq_unit_dep *dep; + + dep = kzalloc(sizeof(*dep), GFP_KERNEL); + if (!dep) + return NULL; + + dep->unit = unit; + + return dep; +} + +static void pwrseq_unit_dep_free(struct pwrseq_unit_dep *ref) +{ + pwrseq_unit_put(ref->unit); + kfree(ref); +} + +static void pwrseq_unit_free_deps(struct list_head *list) +{ + struct pwrseq_unit_dep *dep, *next; + + list_for_each_entry_safe(dep, next, list, list) { + list_del(&dep->list); + pwrseq_unit_dep_free(dep); + } +} + +static void pwrseq_unit_release(struct kref *ref) +{ + struct pwrseq_unit *unit = container_of(ref, struct pwrseq_unit, ref); + + pwrseq_unit_free_deps(&unit->deps); + list_del(&unit->list); + kfree_const(unit->name); + kfree(unit); +} + +/** + * struct pwrseq_target - Private power-sequence target data. + * @list: Siblings on the list of all targets exposed by a power sequencer. + * @name: Name of the target. + * @unit: Final unit for this target. + * @post_enable: Callback run after the target unit has been enabled, *after* + * the state lock has been released. It's useful for implementing + * boot-up delays without blocking other users from powering up + * using the same power sequencer. + */ +struct pwrseq_target { + struct list_head list; + const char *name; + struct pwrseq_unit *unit; + pwrseq_power_state_func post_enable; +}; + +static struct pwrseq_target * +pwrseq_target_new(const struct pwrseq_target_data *data) +{ + struct pwrseq_target *target; + + target = kzalloc(sizeof(*target), GFP_KERNEL); + if (!target) + return NULL; + + target->name = kstrdup_const(data->name, GFP_KERNEL); + if (!target->name) { + kfree(target); + return NULL; + } + + target->post_enable = data->post_enable; + + return target; +} + +static void pwrseq_target_free(struct pwrseq_target *target) +{ + pwrseq_unit_put(target->unit); + kfree_const(target->name); + kfree(target); +} + +/** + * struct pwrseq_device - Private power sequencing data. + * @dev: Device struct associated with this sequencer. + * @id: Device ID. + * @owner: Prevents removal of active power sequencing providers. + * @rw_lock: Protects the device from being unregistered while in use. + * @state_lock: Prevents multiple users running the power sequence at the same + * time. + * @match: Power sequencer matching callback. + * @targets: List of targets exposed by this sequencer. + * @units: List of all units supported by this sequencer. + */ +struct pwrseq_device { + struct device dev; + int id; + struct module *owner; + struct rw_semaphore rw_lock; + struct mutex state_lock; + pwrseq_match_func match; + struct list_head targets; + struct list_head units; +}; + +static struct pwrseq_device *to_pwrseq_device(struct device *dev) +{ + return container_of(dev, struct pwrseq_device, dev); +} + +static struct pwrseq_device *pwrseq_device_get(struct pwrseq_device *pwrseq) +{ + get_device(&pwrseq->dev); + + return pwrseq; +} + +static void pwrseq_device_put(struct pwrseq_device *pwrseq) +{ + put_device(&pwrseq->dev); +} + +/** + * struct pwrseq_desc - Wraps access to the pwrseq_device and ensures that one + * user cannot break the reference counting for others. + * @pwrseq: Reference to the power sequencing device. + * @target: Reference to the target this descriptor allows to control. + * @powered_on: Power state set by the holder of the descriptor (not necessarily + * corresponding to the actual power state of the device). + */ +struct pwrseq_desc { + struct pwrseq_device *pwrseq; + struct pwrseq_target *target; + bool powered_on; +}; + +static const struct bus_type pwrseq_bus = { + .name = "pwrseq", +}; + +static void pwrseq_release(struct device *dev) +{ + struct pwrseq_device *pwrseq = to_pwrseq_device(dev); + struct pwrseq_target *target, *pos; + + list_for_each_entry_safe(target, pos, &pwrseq->targets, list) { + list_del(&target->list); + pwrseq_target_free(target); + } + + mutex_destroy(&pwrseq->state_lock); + ida_free(&pwrseq_ida, pwrseq->id); + kfree(pwrseq); +} + +static const struct device_type pwrseq_device_type = { + .name = "power_sequencer", + .release = pwrseq_release, +}; + +static int pwrseq_check_unit_deps(const struct pwrseq_unit_data *data, + struct radix_tree_root *visited_units) +{ + const struct pwrseq_unit_data *tmp, **cur; + int ret; + + ret = radix_tree_insert(visited_units, (unsigned long)data, + (void *)data); + if (ret) + return ret; + + for (cur = data->deps; cur && *cur; cur++) { + tmp = radix_tree_lookup(visited_units, (unsigned long)*cur); + if (tmp) { + WARN(1, "Circular dependency in power sequencing flow detected!\n"); + return -EINVAL; + } + + ret = pwrseq_check_unit_deps(*cur, visited_units); + if (ret) + return ret; + } + + return 0; +} + +static int pwrseq_check_target_deps(const struct pwrseq_target_data *data) +{ + struct radix_tree_root visited_units; + struct radix_tree_iter iter; + void __rcu **slot; + int ret; + + if (!data->unit) + return -EINVAL; + + INIT_RADIX_TREE(&visited_units, GFP_KERNEL); + ret = pwrseq_check_unit_deps(data->unit, &visited_units); + radix_tree_for_each_slot(slot, &visited_units, &iter, 0) + radix_tree_delete(&visited_units, iter.index); + + return ret; +} + +static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data, + struct list_head *dep_list, + struct list_head *unit_list, + struct radix_tree_root *processed_units); + +static struct pwrseq_unit * +pwrseq_unit_setup(const struct pwrseq_unit_data *data, + struct list_head *unit_list, + struct radix_tree_root *processed_units) +{ + struct pwrseq_unit *unit; + int ret; + + unit = radix_tree_lookup(processed_units, (unsigned long)data); + if (unit) + return pwrseq_unit_get(unit); + + unit = pwrseq_unit_new(data); + if (!unit) + return ERR_PTR(-ENOMEM); + + if (data->deps) { + ret = pwrseq_unit_setup_deps(data->deps, &unit->deps, + unit_list, processed_units); + if (ret) { + pwrseq_unit_put(unit); + return ERR_PTR(ret); + } + } + + ret = radix_tree_insert(processed_units, (unsigned long)data, unit); + if (ret) { + pwrseq_unit_put(unit); + return ERR_PTR(ret); + } + + list_add_tail(&unit->list, unit_list); + + return unit; +} + +static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data, + struct list_head *dep_list, + struct list_head *unit_list, + struct radix_tree_root *processed_units) +{ + const struct pwrseq_unit_data *pos; + struct pwrseq_unit_dep *dep; + struct pwrseq_unit *unit; + int i; + + for (i = 0; data[i]; i++) { + pos = data[i]; + + unit = pwrseq_unit_setup(pos, unit_list, processed_units); + if (IS_ERR(unit)) + return PTR_ERR(unit); + + dep = pwrseq_unit_dep_new(unit); + if (!dep) { + pwrseq_unit_put(unit); + return -ENOMEM; + } + + list_add_tail(&dep->list, dep_list); + } + + return 0; +} + +static int pwrseq_do_setup_targets(const struct pwrseq_target_data **data, + struct pwrseq_device *pwrseq, + struct radix_tree_root *processed_units) +{ + const struct pwrseq_target_data *pos; + struct pwrseq_target *target; + int ret, i; + + for (i = 0; data[i]; i++) { + pos = data[i]; + + ret = pwrseq_check_target_deps(pos); + if (ret) + return ret; + + target = pwrseq_target_new(pos); + if (!target) + return -ENOMEM; + + target->unit = pwrseq_unit_setup(pos->unit, &pwrseq->units, + processed_units); + if (IS_ERR(target->unit)) { + ret = PTR_ERR(target->unit); + pwrseq_target_free(target); + return ret; + } + + list_add_tail(&target->list, &pwrseq->targets); + } + + return 0; +} + +static int pwrseq_setup_targets(const struct pwrseq_target_data **targets, + struct pwrseq_device *pwrseq) +{ + struct radix_tree_root processed_units; + struct radix_tree_iter iter; + void __rcu **slot; + int ret; + + INIT_RADIX_TREE(&processed_units, GFP_KERNEL); + ret = pwrseq_do_setup_targets(targets, pwrseq, &processed_units); + radix_tree_for_each_slot(slot, &processed_units, &iter, 0) + radix_tree_delete(&processed_units, iter.index); + + return ret; +} + +/** + * pwrseq_device_register() - Register a new power sequencer. + * @config: Configuration of the new power sequencing device. + * + * The config structure is only used during the call and can be freed after + * the function returns. The config structure *must* have the parent device + * as well as the match() callback and at least one target set. + * + * Returns: + * Returns the address of the new pwrseq device or ERR_PTR() on failure. + */ +struct pwrseq_device * +pwrseq_device_register(const struct pwrseq_config *config) +{ + struct pwrseq_device *pwrseq; + int ret, id; + + if (!config->parent || !config->match || !config->targets || + !config->targets[0]) + return ERR_PTR(-EINVAL); + + pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL); + if (!pwrseq) + return ERR_PTR(-ENOMEM); + + pwrseq->dev.type = &pwrseq_device_type; + pwrseq->dev.bus = &pwrseq_bus; + pwrseq->dev.parent = config->parent; + device_set_node(&pwrseq->dev, dev_fwnode(config->parent)); + dev_set_drvdata(&pwrseq->dev, config->drvdata); + + id = ida_alloc(&pwrseq_ida, GFP_KERNEL); + if (id < 0) { + kfree(pwrseq); + return ERR_PTR(id); + } + + pwrseq->id = id; + + /* + * From this point onwards the device's release() callback is + * responsible for freeing resources. + */ + device_initialize(&pwrseq->dev); + + ret = dev_set_name(&pwrseq->dev, "pwrseq.%d", pwrseq->id); + if (ret) + goto err_put_pwrseq; + + pwrseq->owner = config->owner ?: THIS_MODULE; + pwrseq->match = config->match; + + init_rwsem(&pwrseq->rw_lock); + mutex_init(&pwrseq->state_lock); + INIT_LIST_HEAD(&pwrseq->targets); + INIT_LIST_HEAD(&pwrseq->units); + + ret = pwrseq_setup_targets(config->targets, pwrseq); + if (ret) + goto err_put_pwrseq; + + scoped_guard(rwsem_write, &pwrseq_sem) { + ret = device_add(&pwrseq->dev); + if (ret) + goto err_put_pwrseq; + } + + return pwrseq; + +err_put_pwrseq: + pwrseq_device_put(pwrseq); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(pwrseq_device_register); + +/** + * pwrseq_device_unregister() - Unregister the power sequencer. + * @pwrseq: Power sequencer to unregister. + */ +void pwrseq_device_unregister(struct pwrseq_device *pwrseq) +{ + struct device *dev = &pwrseq->dev; + struct pwrseq_target *target; + + scoped_guard(mutex, &pwrseq->state_lock) { + guard(rwsem_write)(&pwrseq->rw_lock); + + list_for_each_entry(target, &pwrseq->targets, list) + WARN(target->unit->enable_count, + "REMOVING POWER SEQUENCER WITH ACTIVE USERS\n"); + + guard(rwsem_write)(&pwrseq_sem); + + device_del(dev); + } + + pwrseq_device_put(pwrseq); +} +EXPORT_SYMBOL_GPL(pwrseq_device_unregister); + +static void devm_pwrseq_device_unregister(void *data) +{ + struct pwrseq_device *pwrseq = data; + + pwrseq_device_unregister(pwrseq); +} + +/** + * devm_pwrseq_device_register() - Managed variant of pwrseq_device_register(). + * @dev: Managing device. + * @config: Configuration of the new power sequencing device. + * + * Returns: + * Returns the address of the new pwrseq device or ERR_PTR() on failure. + */ +struct pwrseq_device * +devm_pwrseq_device_register(struct device *dev, + const struct pwrseq_config *config) +{ + struct pwrseq_device *pwrseq; + int ret; + + pwrseq = pwrseq_device_register(config); + if (IS_ERR(pwrseq)) + return pwrseq; + + ret = devm_add_action_or_reset(dev, devm_pwrseq_device_unregister, + pwrseq); + if (ret) + return ERR_PTR(ret); + + return pwrseq; +} +EXPORT_SYMBOL_GPL(devm_pwrseq_device_register); + +/** + * pwrseq_device_get_drvdata() - Get the driver private data associated with + * this sequencer. + * @pwrseq: Power sequencer object. + * + * Returns: + * Address of the private driver data. + */ +void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq) +{ + return dev_get_drvdata(&pwrseq->dev); +} +EXPORT_SYMBOL_GPL(pwrseq_device_get_drvdata); + +struct pwrseq_match_data { + struct pwrseq_desc *desc; + struct device *dev; + const char *target; +}; + +static int pwrseq_match_device(struct device *pwrseq_dev, void *data) +{ + struct pwrseq_device *pwrseq = to_pwrseq_device(pwrseq_dev); + struct pwrseq_match_data *match_data = data; + struct pwrseq_target *target; + int ret; + + lockdep_assert_held_read(&pwrseq_sem); + + guard(rwsem_read)(&pwrseq->rw_lock); + if (!device_is_registered(&pwrseq->dev)) + return 0; + + ret = pwrseq->match(pwrseq, match_data->dev); + if (ret <= 0) + return ret; + + /* We got the matching device, let's find the right target. */ + list_for_each_entry(target, &pwrseq->targets, list) { + if (strcmp(target->name, match_data->target)) + continue; + + match_data->desc->target = target; + } + + /* + * This device does not have this target. No point in deferring as it + * will not get a new target dynamically later. + */ + if (!match_data->desc->target) + return -ENOENT; + + if (!try_module_get(pwrseq->owner)) + return -EPROBE_DEFER; + + match_data->desc->pwrseq = pwrseq_device_get(pwrseq); + + return 1; +} + +/** + * pwrseq_get() - Get the power sequencer associated with this device. + * @dev: Device for which to get the sequencer. + * @target: Name of the target exposed by the sequencer this device wants to + * reach. + * + * Returns: + * New power sequencer descriptor for use by the consumer driver or ERR_PTR() + * on failure. + */ +struct pwrseq_desc *pwrseq_get(struct device *dev, const char *target) +{ + struct pwrseq_match_data match_data; + int ret; + + struct pwrseq_desc *desc __free(kfree) = kzalloc(sizeof(*desc), + GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + match_data.desc = desc; + match_data.dev = dev; + match_data.target = target; + + guard(rwsem_read)(&pwrseq_sem); + + ret = bus_for_each_dev(&pwrseq_bus, NULL, &match_data, + pwrseq_match_device); + if (ret < 0) + return ERR_PTR(ret); + if (ret == 0) + /* No device matched. */ + return ERR_PTR(-EPROBE_DEFER); + + return_ptr(desc); +} +EXPORT_SYMBOL_GPL(pwrseq_get); + +/** + * pwrseq_put() - Release the power sequencer descriptor. + * @desc: Descriptor to release. + */ +void pwrseq_put(struct pwrseq_desc *desc) +{ + struct pwrseq_device *pwrseq; + + if (!desc) + return; + + pwrseq = desc->pwrseq; + + if (desc->powered_on) + pwrseq_power_off(desc); + + kfree(desc); + module_put(pwrseq->owner); + pwrseq_device_put(pwrseq); +} +EXPORT_SYMBOL_GPL(pwrseq_put); + +static void devm_pwrseq_put(void *data) +{ + struct pwrseq_desc *desc = data; + + pwrseq_put(desc); +} + +/** + * devm_pwrseq_get() - Managed variant of pwrseq_get(). + * @dev: Device for which to get the sequencer and which also manages its + * lifetime. + * @target: Name of the target exposed by the sequencer this device wants to + * reach. + * + * Returns: + * New power sequencer descriptor for use by the consumer driver or ERR_PTR() + * on failure. + */ +struct pwrseq_desc *devm_pwrseq_get(struct device *dev, const char *target) +{ + struct pwrseq_desc *desc; + int ret; + + desc = pwrseq_get(dev, target); + if (IS_ERR(desc)) + return desc; + + ret = devm_add_action_or_reset(dev, devm_pwrseq_put, desc); + if (ret) + return ERR_PTR(ret); + + return desc; +} +EXPORT_SYMBOL_GPL(devm_pwrseq_get); + +static int pwrseq_unit_enable(struct pwrseq_device *pwrseq, + struct pwrseq_unit *target); +static int pwrseq_unit_disable(struct pwrseq_device *pwrseq, + struct pwrseq_unit *target); + +static int pwrseq_unit_enable_deps(struct pwrseq_device *pwrseq, + struct list_head *list) +{ + struct pwrseq_unit_dep *pos; + int ret = 0; + + list_for_each_entry(pos, list, list) { + ret = pwrseq_unit_enable(pwrseq, pos->unit); + if (ret) { + list_for_each_entry_continue_reverse(pos, list, list) + pwrseq_unit_disable(pwrseq, pos->unit); + break; + } + } + + return ret; +} + +static int pwrseq_unit_disable_deps(struct pwrseq_device *pwrseq, + struct list_head *list) +{ + struct pwrseq_unit_dep *pos; + int ret = 0; + + list_for_each_entry_reverse(pos, list, list) { + ret = pwrseq_unit_disable(pwrseq, pos->unit); + if (ret) { + list_for_each_entry_continue(pos, list, list) + pwrseq_unit_enable(pwrseq, pos->unit); + break; + } + } + + return ret; +} + +static int pwrseq_unit_enable(struct pwrseq_device *pwrseq, + struct pwrseq_unit *unit) +{ + int ret; + + lockdep_assert_held_read(&pwrseq->rw_lock); + lockdep_assert_held(&pwrseq->state_lock); + + if (unit->enable_count != 0) { + unit->enable_count++; + return 0; + } + + ret = pwrseq_unit_enable_deps(pwrseq, &unit->deps); + if (ret) { + dev_err(&pwrseq->dev, + "Failed to enable dependencies before power-on for target '%s': %d\n", + unit->name, ret); + return ret; + } + + if (unit->enable) { + ret = unit->enable(pwrseq); + if (ret) { + dev_err(&pwrseq->dev, + "Failed to enable target '%s': %d\n", + unit->name, ret); + pwrseq_unit_disable_deps(pwrseq, &unit->deps); + return ret; + } + } + + unit->enable_count++; + + return 0; +} + +static int pwrseq_unit_disable(struct pwrseq_device *pwrseq, + struct pwrseq_unit *unit) +{ + int ret; + + lockdep_assert_held_read(&pwrseq->rw_lock); + lockdep_assert_held(&pwrseq->state_lock); + + if (unit->enable_count == 0) { + WARN(1, "Unmatched power-off for target '%s'\n", + unit->name); + return -EBUSY; + } + + if (unit->enable_count != 1) { + unit->enable_count--; + return 0; + } + + if (unit->disable) { + ret = unit->disable(pwrseq); + if (ret) { + dev_err(&pwrseq->dev, + "Failed to disable target '%s': %d\n", + unit->name, ret); + return ret; + } + } + + ret = pwrseq_unit_disable_deps(pwrseq, &unit->deps); + if (ret) { + dev_err(&pwrseq->dev, + "Failed to disable dependencies after power-off for target '%s': %d\n", + unit->name, ret); + if (unit->enable) + unit->enable(pwrseq); + return ret; + } + + unit->enable_count--; + + return 0; +} + +/** + * pwrseq_power_on() - Issue a power-on request on behalf of the consumer + * device. + * @desc: Descriptor referencing the power sequencer. + * + * This function tells the power sequencer that the consumer wants to be + * powered-up. The sequencer may already have powered-up the device in which + * case the function returns 0. If the power-up sequence is already in + * progress, the function will block until it's done and return 0. If this is + * the first request, the device will be powered up. + * + * Returns: + * 0 on success, negative error number on failure. + */ +int pwrseq_power_on(struct pwrseq_desc *desc) +{ + struct pwrseq_device *pwrseq; + struct pwrseq_target *target; + struct pwrseq_unit *unit; + int ret; + + might_sleep(); + + if (!desc || desc->powered_on) + return 0; + + pwrseq = desc->pwrseq; + target = desc->target; + unit = target->unit; + + guard(rwsem_read)(&pwrseq->rw_lock); + if (!device_is_registered(&pwrseq->dev)) + return -ENODEV; + + scoped_guard(mutex, &pwrseq->state_lock) { + ret = pwrseq_unit_enable(pwrseq, unit); + if (!ret) + desc->powered_on = true; + } + + if (target->post_enable) { + ret = target->post_enable(pwrseq); + if (ret) { + pwrseq_unit_disable(pwrseq, unit); + desc->powered_on = false; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(pwrseq_power_on); + +/** + * pwrseq_power_off() - Issue a power-off request on behalf of the consumer + * device. + * @desc: Descriptor referencing the power sequencer. + * + * This undoes the effects of pwrseq_power_on(). It issues a power-off request + * on behalf of the consumer and when the last remaining user does so, the + * power-down sequence will be started. If one is in progress, the function + * will block until it's complete and then return. + * + * Returns: + * 0 on success, negative error number on failure. + */ +int pwrseq_power_off(struct pwrseq_desc *desc) +{ + struct pwrseq_device *pwrseq; + struct pwrseq_unit *unit; + int ret; + + might_sleep(); + + if (!desc || !desc->powered_on) + return 0; + + pwrseq = desc->pwrseq; + unit = desc->target->unit; + + guard(rwsem_read)(&pwrseq->rw_lock); + if (!device_is_registered(&pwrseq->dev)) + return -ENODEV; + + guard(mutex)(&pwrseq->state_lock); + + ret = pwrseq_unit_disable(pwrseq, unit); + if (!ret) + desc->powered_on = false; + + return ret; +} +EXPORT_SYMBOL_GPL(pwrseq_power_off); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + +struct pwrseq_debugfs_count_ctx { + struct device *dev; + loff_t index; +}; + +static int pwrseq_debugfs_seq_count(struct device *dev, void *data) +{ + struct pwrseq_debugfs_count_ctx *ctx = data; + + ctx->dev = dev; + + return ctx->index-- ? 0 : 1; +} + +static void *pwrseq_debugfs_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct pwrseq_debugfs_count_ctx ctx; + + ctx.dev = NULL; + ctx.index = *pos; + + /* + * We're holding the lock for the entire printout so no need to fiddle + * with device reference count. + */ + down_read(&pwrseq_sem); + + bus_for_each_dev(&pwrseq_bus, NULL, &ctx, pwrseq_debugfs_seq_count); + if (!ctx.index) + return NULL; + + return ctx.dev; +} + +static void *pwrseq_debugfs_seq_next(struct seq_file *seq, void *data, + loff_t *pos) +{ + struct device *curr = data; + + ++*pos; + + struct device *next __free(put_device) = + bus_find_next_device(&pwrseq_bus, curr); + return next; +} + +static void pwrseq_debugfs_seq_show_target(struct seq_file *seq, + struct pwrseq_target *target) +{ + seq_printf(seq, " target: [%s] (target unit: [%s])\n", + target->name, target->unit->name); +} + +static void pwrseq_debugfs_seq_show_unit(struct seq_file *seq, + struct pwrseq_unit *unit) +{ + struct pwrseq_unit_dep *ref; + + seq_printf(seq, " unit: [%s] - enable count: %u\n", + unit->name, unit->enable_count); + + if (list_empty(&unit->deps)) + return; + + seq_puts(seq, " dependencies:\n"); + list_for_each_entry(ref, &unit->deps, list) + seq_printf(seq, " [%s]\n", ref->unit->name); +} + +static int pwrseq_debugfs_seq_show(struct seq_file *seq, void *data) +{ + struct device *dev = data; + struct pwrseq_device *pwrseq = to_pwrseq_device(dev); + struct pwrseq_target *target; + struct pwrseq_unit *unit; + + seq_printf(seq, "%s:\n", dev_name(dev)); + + seq_puts(seq, " targets:\n"); + list_for_each_entry(target, &pwrseq->targets, list) + pwrseq_debugfs_seq_show_target(seq, target); + + seq_puts(seq, " units:\n"); + list_for_each_entry(unit, &pwrseq->units, list) + pwrseq_debugfs_seq_show_unit(seq, unit); + + return 0; +} + +static void pwrseq_debugfs_seq_stop(struct seq_file *seq, void *data) +{ + up_read(&pwrseq_sem); +} + +static const struct seq_operations pwrseq_debugfs_sops = { + .start = pwrseq_debugfs_seq_start, + .next = pwrseq_debugfs_seq_next, + .show = pwrseq_debugfs_seq_show, + .stop = pwrseq_debugfs_seq_stop, +}; +DEFINE_SEQ_ATTRIBUTE(pwrseq_debugfs); + +static struct dentry *pwrseq_debugfs_dentry; + +#endif /* CONFIG_DEBUG_FS */ + +static int __init pwrseq_init(void) +{ + int ret; + + ret = bus_register(&pwrseq_bus); + if (ret) { + pr_err("Failed to register the power sequencer bus\n"); + return ret; + } + +#if IS_ENABLED(CONFIG_DEBUG_FS) + pwrseq_debugfs_dentry = debugfs_create_file("pwrseq", 0444, NULL, NULL, + &pwrseq_debugfs_fops); +#endif /* CONFIG_DEBUG_FS */ + + return 0; +} +subsys_initcall(pwrseq_init); + +static void __exit pwrseq_exit(void) +{ +#if IS_ENABLED(CONFIG_DEBUG_FS) + debugfs_remove_recursive(pwrseq_debugfs_dentry); +#endif /* CONFIG_DEBUG_FS */ + + bus_unregister(&pwrseq_bus); +} +module_exit(pwrseq_exit); + +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_DESCRIPTION("Power Sequencing subsystem core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c new file mode 100644 index 000000000000..42dacfda745e --- /dev/null +++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/jiffies.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/pwrseq/provider.h> +#include <linux/string.h> +#include <linux/types.h> + +struct pwrseq_qcom_wcn_pdata { + const char *const *vregs; + size_t num_vregs; + unsigned int pwup_delay_ms; + unsigned int gpio_enable_delay_ms; +}; + +struct pwrseq_qcom_wcn_ctx { + struct pwrseq_device *pwrseq; + struct device_node *of_node; + const struct pwrseq_qcom_wcn_pdata *pdata; + struct regulator_bulk_data *regs; + struct gpio_desc *bt_gpio; + struct gpio_desc *wlan_gpio; + struct clk *clk; + unsigned long last_gpio_enable_jf; +}; + +static void pwrseq_qcom_wcn_ensure_gpio_delay(struct pwrseq_qcom_wcn_ctx *ctx) +{ + unsigned long diff_jiffies; + unsigned int diff_msecs; + + if (!ctx->pdata->gpio_enable_delay_ms) + return; + + diff_jiffies = jiffies - ctx->last_gpio_enable_jf; + diff_msecs = jiffies_to_msecs(diff_jiffies); + + if (diff_msecs < ctx->pdata->gpio_enable_delay_ms) + msleep(ctx->pdata->gpio_enable_delay_ms - diff_msecs); +} + +static int pwrseq_qcom_wcn_vregs_enable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + return regulator_bulk_enable(ctx->pdata->num_vregs, ctx->regs); +} + +static int pwrseq_qcom_wcn_vregs_disable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + return regulator_bulk_disable(ctx->pdata->num_vregs, ctx->regs); +} + +static const struct pwrseq_unit_data pwrseq_qcom_wcn_vregs_unit_data = { + .name = "regulators-enable", + .enable = pwrseq_qcom_wcn_vregs_enable, + .disable = pwrseq_qcom_wcn_vregs_disable, +}; + +static int pwrseq_qcom_wcn_clk_enable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + return clk_prepare_enable(ctx->clk); +} + +static int pwrseq_qcom_wcn_clk_disable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + clk_disable_unprepare(ctx->clk); + + return 0; +} + +static const struct pwrseq_unit_data pwrseq_qcom_wcn_clk_unit_data = { + .name = "clock-enable", + .enable = pwrseq_qcom_wcn_clk_enable, + .disable = pwrseq_qcom_wcn_clk_disable, +}; + +static const struct pwrseq_unit_data *pwrseq_qcom_wcn_unit_deps[] = { + &pwrseq_qcom_wcn_vregs_unit_data, + &pwrseq_qcom_wcn_clk_unit_data, + NULL +}; + +static int pwrseq_qcom_wcn_bt_enable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + pwrseq_qcom_wcn_ensure_gpio_delay(ctx); + gpiod_set_value_cansleep(ctx->bt_gpio, 1); + ctx->last_gpio_enable_jf = jiffies; + + return 0; +} + +static int pwrseq_qcom_wcn_bt_disable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + gpiod_set_value_cansleep(ctx->bt_gpio, 0); + + return 0; +} + +static const struct pwrseq_unit_data pwrseq_qcom_wcn_bt_unit_data = { + .name = "bluetooth-enable", + .deps = pwrseq_qcom_wcn_unit_deps, + .enable = pwrseq_qcom_wcn_bt_enable, + .disable = pwrseq_qcom_wcn_bt_disable, +}; + +static int pwrseq_qcom_wcn_wlan_enable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + pwrseq_qcom_wcn_ensure_gpio_delay(ctx); + gpiod_set_value_cansleep(ctx->wlan_gpio, 1); + ctx->last_gpio_enable_jf = jiffies; + + return 0; +} + +static int pwrseq_qcom_wcn_wlan_disable(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + gpiod_set_value_cansleep(ctx->wlan_gpio, 0); + + return 0; +} + +static const struct pwrseq_unit_data pwrseq_qcom_wcn_wlan_unit_data = { + .name = "wlan-enable", + .deps = pwrseq_qcom_wcn_unit_deps, + .enable = pwrseq_qcom_wcn_wlan_enable, + .disable = pwrseq_qcom_wcn_wlan_disable, +}; + +static int pwrseq_qcom_wcn_pwup_delay(struct pwrseq_device *pwrseq) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + + if (ctx->pdata->pwup_delay_ms) + msleep(ctx->pdata->pwup_delay_ms); + + return 0; +} + +static const struct pwrseq_target_data pwrseq_qcom_wcn_bt_target_data = { + .name = "bluetooth", + .unit = &pwrseq_qcom_wcn_bt_unit_data, + .post_enable = pwrseq_qcom_wcn_pwup_delay, +}; + +static const struct pwrseq_target_data pwrseq_qcom_wcn_wlan_target_data = { + .name = "wlan", + .unit = &pwrseq_qcom_wcn_wlan_unit_data, + .post_enable = pwrseq_qcom_wcn_pwup_delay, +}; + +static const struct pwrseq_target_data *pwrseq_qcom_wcn_targets[] = { + &pwrseq_qcom_wcn_bt_target_data, + &pwrseq_qcom_wcn_wlan_target_data, + NULL +}; + +static const char *const pwrseq_qca6390_vregs[] = { + "vddio", + "vddaon", + "vddpmu", + "vddrfa0p95", + "vddrfa1p3", + "vddrfa1p9", + "vddpcie1p3", + "vddpcie1p9", +}; + +static const struct pwrseq_qcom_wcn_pdata pwrseq_qca6390_of_data = { + .vregs = pwrseq_qca6390_vregs, + .num_vregs = ARRAY_SIZE(pwrseq_qca6390_vregs), + .pwup_delay_ms = 60, + .gpio_enable_delay_ms = 100, +}; + +static const char *const pwrseq_wcn7850_vregs[] = { + "vdd", + "vddio", + "vddio1p2", + "vddaon", + "vdddig", + "vddrfa1p2", + "vddrfa1p8", +}; + +static const struct pwrseq_qcom_wcn_pdata pwrseq_wcn7850_of_data = { + .vregs = pwrseq_wcn7850_vregs, + .num_vregs = ARRAY_SIZE(pwrseq_wcn7850_vregs), + .pwup_delay_ms = 50, +}; + +static int pwrseq_qcom_wcn_match(struct pwrseq_device *pwrseq, + struct device *dev) +{ + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); + struct device_node *dev_node = dev->of_node; + + /* + * The PMU supplies power to the Bluetooth and WLAN modules. both + * consume the PMU AON output so check the presence of the + * 'vddaon-supply' property and whether it leads us to the right + * device. + */ + if (!of_property_present(dev_node, "vddaon-supply")) + return 0; + + struct device_node *reg_node __free(device_node) = + of_parse_phandle(dev_node, "vddaon-supply", 0); + if (!reg_node) + return 0; + + /* + * `reg_node` is the PMU AON regulator, its parent is the `regulators` + * node and finally its grandparent is the PMU device node that we're + * looking for. + */ + if (!reg_node->parent || !reg_node->parent->parent || + reg_node->parent->parent != ctx->of_node) + return 0; + + return 1; +} + +static int pwrseq_qcom_wcn_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pwrseq_qcom_wcn_ctx *ctx; + struct pwrseq_config config; + int i, ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->of_node = dev->of_node; + + ctx->pdata = of_device_get_match_data(dev); + if (!ctx->pdata) + return dev_err_probe(dev, -ENODEV, + "Failed to obtain platform data\n"); + + ctx->regs = devm_kcalloc(dev, ctx->pdata->num_vregs, + sizeof(*ctx->regs), GFP_KERNEL); + if (!ctx->regs) + return -ENOMEM; + + for (i = 0; i < ctx->pdata->num_vregs; i++) + ctx->regs[i].supply = ctx->pdata->vregs[i]; + + ret = devm_regulator_bulk_get(dev, ctx->pdata->num_vregs, ctx->regs); + if (ret < 0) + return dev_err_probe(dev, ret, + "Failed to get all regulators\n"); + + ctx->bt_gpio = devm_gpiod_get_optional(dev, "bt-enable", GPIOD_OUT_LOW); + if (IS_ERR(ctx->bt_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->bt_gpio), + "Failed to get the Bluetooth enable GPIO\n"); + + ctx->wlan_gpio = devm_gpiod_get_optional(dev, "wlan-enable", + GPIOD_OUT_LOW); + if (IS_ERR(ctx->wlan_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->wlan_gpio), + "Failed to get the WLAN enable GPIO\n"); + + ctx->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(ctx->clk)) + return dev_err_probe(dev, PTR_ERR(ctx->clk), + "Failed to get the reference clock\n"); + + memset(&config, 0, sizeof(config)); + + config.parent = dev; + config.owner = THIS_MODULE; + config.drvdata = ctx; + config.match = pwrseq_qcom_wcn_match; + config.targets = pwrseq_qcom_wcn_targets; + + ctx->pwrseq = devm_pwrseq_device_register(dev, &config); + if (IS_ERR(ctx->pwrseq)) + return dev_err_probe(dev, PTR_ERR(ctx->pwrseq), + "Failed to register the power sequencer\n"); + + return 0; +} + +static const struct of_device_id pwrseq_qcom_wcn_of_match[] = { + { + .compatible = "qcom,qca6390-pmu", + .data = &pwrseq_qca6390_of_data, + }, + { + .compatible = "qcom,wcn7850-pmu", + .data = &pwrseq_wcn7850_of_data, + }, + { } +}; +MODULE_DEVICE_TABLE(of, pwrseq_qcom_wcn_of_match); + +static struct platform_driver pwrseq_qcom_wcn_driver = { + .driver = { + .name = "pwrseq-qcom_wcn", + .of_match_table = pwrseq_qcom_wcn_of_match, + }, + .probe = pwrseq_qcom_wcn_probe, +}; +module_platform_driver(pwrseq_qcom_wcn_driver); + +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm WCN PMU power sequencing driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 3e31375491d5..f6321a42aa53 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -860,6 +860,18 @@ config CHARGER_CROS_PCHG the peripheral charge ports from the EC and converts that into power_supply properties. +config CHARGER_CROS_CONTROL + tristate "ChromeOS EC based charge control" + depends on MFD_CROS_EC_DEV + depends on ACPI_BATTERY + default MFD_CROS_EC_DEV + help + Say Y here to enable ChromeOS EC based battery charge control. + This driver can manage charge thresholds and behaviour. + + This driver can also be built as a module. If so, the module will be + called cros_charge-control. + config CHARGER_SC2731 tristate "Spreadtrum SC2731 charger driver" depends on MFD_SC27XX_PMIC || COMPILE_TEST diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile index 58b567278034..31ca6653a564 100644 --- a/drivers/power/supply/Makefile +++ b/drivers/power/supply/Makefile @@ -100,6 +100,7 @@ obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o +obj-$(CONFIG_CHARGER_CROS_CONTROL) += cros_charge-control.o obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o obj-$(CONFIG_CHARGER_CROS_PCHG) += cros_peripheral_charger.o obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c new file mode 100644 index 000000000000..17c53591ce19 --- /dev/null +++ b/drivers/power/supply/cros_charge-control.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ChromeOS EC driver for charge control + * + * Copyright (C) 2024 Thomas Weißschuh <linux@weissschuh.net> + */ +#include <acpi/battery.h> +#include <linux/container_of.h> +#include <linux/dmi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_data/cros_ec_commands.h> +#include <linux/platform_data/cros_ec_proto.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#define EC_CHARGE_CONTROL_BEHAVIOURS (BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO) | \ + BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE) | \ + BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE)) + +enum CROS_CHCTL_ATTR { + CROS_CHCTL_ATTR_START_THRESHOLD, + CROS_CHCTL_ATTR_END_THRESHOLD, + CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR, + _CROS_CHCTL_ATTR_COUNT +}; + +/* + * Semantics of data *returned* from the EC API and Linux sysfs differ + * slightly, also the v1 API can not return any data. + * To match the expected sysfs API, data is never read back from the EC but + * cached in the driver. + * + * Changes to the EC bypassing the driver will not be reflected in sysfs. + * Any change to "charge_behaviour" will synchronize the EC with the driver state. + */ + +struct cros_chctl_priv { + struct cros_ec_device *cros_ec; + struct acpi_battery_hook battery_hook; + struct power_supply *hooked_battery; + u8 cmd_version; + + /* The callbacks need to access this priv structure. + * As neither the struct device nor power_supply are under the drivers + * control, embed the attributes within priv to use with container_of(). + */ + struct device_attribute device_attrs[_CROS_CHCTL_ATTR_COUNT]; + struct attribute *attributes[_CROS_CHCTL_ATTR_COUNT]; + struct attribute_group group; + + enum power_supply_charge_behaviour current_behaviour; + u8 current_start_threshold, current_end_threshold; +}; + +static int cros_chctl_send_charge_control_cmd(struct cros_ec_device *cros_ec, + u8 cmd_version, struct ec_params_charge_control *req) +{ + static const u8 outsizes[] = { + [1] = offsetof(struct ec_params_charge_control, cmd), + [2] = sizeof(struct ec_params_charge_control), + [3] = sizeof(struct ec_params_charge_control), + }; + + struct { + struct cros_ec_command msg; + union { + struct ec_params_charge_control req; + struct ec_response_charge_control resp; + } __packed data; + } __packed buf = { + .msg = { + .command = EC_CMD_CHARGE_CONTROL, + .version = cmd_version, + .insize = 0, + .outsize = outsizes[cmd_version], + }, + .data.req = *req, + }; + + return cros_ec_cmd_xfer_status(cros_ec, &buf.msg); +} + +static int cros_chctl_configure_ec(struct cros_chctl_priv *priv) +{ + struct ec_params_charge_control req = {}; + + req.cmd = EC_CHARGE_CONTROL_CMD_SET; + + switch (priv->current_behaviour) { + case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO: + req.mode = CHARGE_CONTROL_NORMAL; + break; + case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE: + req.mode = CHARGE_CONTROL_IDLE; + break; + case POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE: + req.mode = CHARGE_CONTROL_DISCHARGE; + break; + default: + return -EINVAL; + } + + if (priv->current_behaviour == POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO && + !(priv->current_start_threshold == 0 && priv->current_end_threshold == 100)) { + req.sustain_soc.lower = priv->current_start_threshold; + req.sustain_soc.upper = priv->current_end_threshold; + } else { + /* Disable charging limits */ + req.sustain_soc.lower = -1; + req.sustain_soc.upper = -1; + } + + return cros_chctl_send_charge_control_cmd(priv->cros_ec, priv->cmd_version, &req); +} + +static struct cros_chctl_priv *cros_chctl_attr_to_priv(struct attribute *attr, + enum CROS_CHCTL_ATTR idx) +{ + struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr); + + return container_of(dev_attr, struct cros_chctl_priv, device_attrs[idx]); +} + +static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_priv *priv, + int is_end_threshold, const char *buf, size_t count) +{ + int ret, val; + + ret = kstrtoint(buf, 10, &val); + if (ret < 0) + return ret; + if (val < 0 || val > 100) + return -EINVAL; + + if (is_end_threshold) { + if (val <= priv->current_start_threshold) + return -EINVAL; + priv->current_end_threshold = val; + } else { + if (val >= priv->current_end_threshold) + return -EINVAL; + priv->current_start_threshold = val; + } + + if (priv->current_behaviour == POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO) { + ret = cros_chctl_configure_ec(priv); + if (ret < 0) + return ret; + } + + return count; +} + +static ssize_t charge_control_start_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, + CROS_CHCTL_ATTR_START_THRESHOLD); + + return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_start_threshold); +} + +static ssize_t charge_control_start_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, + CROS_CHCTL_ATTR_START_THRESHOLD); + + return cros_chctl_store_threshold(dev, priv, 0, buf, count); +} + +static ssize_t charge_control_end_threshold_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, + CROS_CHCTL_ATTR_END_THRESHOLD); + + return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_end_threshold); +} + +static ssize_t charge_control_end_threshold_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, + CROS_CHCTL_ATTR_END_THRESHOLD); + + return cros_chctl_store_threshold(dev, priv, 1, buf, count); +} + +static ssize_t charge_behaviour_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, + CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR); + + return power_supply_charge_behaviour_show(dev, EC_CHARGE_CONTROL_BEHAVIOURS, + priv->current_behaviour, buf); +} + +static ssize_t charge_behaviour_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr, + CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR); + int ret; + + ret = power_supply_charge_behaviour_parse(EC_CHARGE_CONTROL_BEHAVIOURS, buf); + if (ret < 0) + return ret; + + priv->current_behaviour = ret; + + ret = cros_chctl_configure_ec(priv); + if (ret < 0) + return ret; + + return count; +} + +static umode_t cros_chtl_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(attr, n); + + if (priv->cmd_version < 2) { + if (n == CROS_CHCTL_ATTR_START_THRESHOLD) + return 0; + if (n == CROS_CHCTL_ATTR_END_THRESHOLD) + return 0; + } + + return attr->mode; +} + +static int cros_chctl_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook) +{ + struct cros_chctl_priv *priv = container_of(hook, struct cros_chctl_priv, battery_hook); + + if (priv->hooked_battery) + return 0; + + priv->hooked_battery = battery; + return device_add_group(&battery->dev, &priv->group); +} + +static int cros_chctl_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook) +{ + struct cros_chctl_priv *priv = container_of(hook, struct cros_chctl_priv, battery_hook); + + if (priv->hooked_battery == battery) { + device_remove_group(&battery->dev, &priv->group); + priv->hooked_battery = NULL; + } + + return 0; +} + +static bool probe_with_fwk_charge_control; +module_param(probe_with_fwk_charge_control, bool, 0644); +MODULE_PARM_DESC(probe_with_fwk_charge_control, + "Probe the driver in the presence of the custom Framework EC charge control"); + +static int cros_chctl_fwk_charge_control_versions(struct cros_ec_device *cros_ec) +{ + if (!dmi_match(DMI_SYS_VENDOR, "Framework")) + return 0; + + return cros_ec_get_cmd_versions(cros_ec, 0x3E03 /* FW_EC_CMD_CHARGE_LIMIT */); +} + +static int cros_chctl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent); + struct cros_ec_device *cros_ec = ec_dev->ec_dev; + struct cros_chctl_priv *priv; + size_t i; + int ret; + + ret = cros_chctl_fwk_charge_control_versions(cros_ec); + if (ret < 0) + return ret; + if (ret > 0 && !probe_with_fwk_charge_control) { + dev_info(dev, "Framework charge control detected, preventing load\n"); + return -ENODEV; + } + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = cros_ec_get_cmd_versions(cros_ec, EC_CMD_CHARGE_CONTROL); + if (ret < 0) + return ret; + else if (ret & EC_VER_MASK(3)) + priv->cmd_version = 3; + else if (ret & EC_VER_MASK(2)) + priv->cmd_version = 2; + else if (ret & EC_VER_MASK(1)) + priv->cmd_version = 1; + else + return -ENODEV; + + dev_dbg(dev, "Command version: %u\n", (unsigned int)priv->cmd_version); + + priv->cros_ec = cros_ec; + priv->device_attrs[CROS_CHCTL_ATTR_START_THRESHOLD] = + (struct device_attribute)__ATTR_RW(charge_control_start_threshold); + priv->device_attrs[CROS_CHCTL_ATTR_END_THRESHOLD] = + (struct device_attribute)__ATTR_RW(charge_control_end_threshold); + priv->device_attrs[CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR] = + (struct device_attribute)__ATTR_RW(charge_behaviour); + for (i = 0; i < _CROS_CHCTL_ATTR_COUNT; i++) { + sysfs_attr_init(&priv->device_attrs[i].attr); + priv->attributes[i] = &priv->device_attrs[i].attr; + } + priv->group.is_visible = cros_chtl_attr_is_visible; + priv->group.attrs = priv->attributes; + + priv->battery_hook.name = dev_name(dev); + priv->battery_hook.add_battery = cros_chctl_add_battery; + priv->battery_hook.remove_battery = cros_chctl_remove_battery; + + priv->current_behaviour = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO; + priv->current_start_threshold = 0; + priv->current_end_threshold = 100; + + /* Bring EC into well-known state */ + ret = cros_chctl_configure_ec(priv); + if (ret < 0) + return ret; + + return devm_battery_hook_register(dev, &priv->battery_hook); +} + +static const struct platform_device_id cros_chctl_id[] = { + { "cros-charge-control", 0 }, + {} +}; + +static struct platform_driver cros_chctl_driver = { + .driver.name = "cros-charge-control", + .probe = cros_chctl_probe, + .id_table = cros_chctl_id, +}; +module_platform_driver(cros_chctl_driver); + +MODULE_DEVICE_TABLE(platform, cros_chctl_id); +MODULE_DESCRIPTION("ChromeOS EC charge control"); +MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c index e18a2cc4e46a..bafc59904ed3 100644 --- a/drivers/powercap/idle_inject.c +++ b/drivers/powercap/idle_inject.c @@ -127,7 +127,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) struct idle_inject_device *ii_dev = container_of(timer, struct idle_inject_device, timer); - if (!ii_dev->update || (ii_dev->update && ii_dev->update())) + if (!ii_dev->update || ii_dev->update()) idle_inject_wakeup(ii_dev); duration_us = READ_ONCE(ii_dev->run_duration_us); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index aac0744011a3..3cffa6c79538 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1222,66 +1222,66 @@ static const struct rapl_defaults rapl_defaults_amd = { }; static const struct x86_cpu_id rapl_ids[] __initconst = { - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &rapl_defaults_core), - - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &rapl_defaults_core), - - X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &rapl_defaults_hsw_server), - - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &rapl_defaults_hsw_server), - - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &rapl_defaults_hsw_server), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &rapl_defaults_hsw_server), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &rapl_defaults_hsw_server), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server), - X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &rapl_defaults_spr_server), - X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core), - - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &rapl_defaults_tng), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &rapl_defaults_ann), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &rapl_defaults_core), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &rapl_defaults_core), - - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &rapl_defaults_hsw_server), - X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &rapl_defaults_hsw_server), + X86_MATCH_VFM(INTEL_SANDYBRIDGE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &rapl_defaults_core), + + X86_MATCH_VFM(INTEL_IVYBRIDGE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &rapl_defaults_core), + + X86_MATCH_VFM(INTEL_HASWELL, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_HASWELL_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_HASWELL_G, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_HASWELL_X, &rapl_defaults_hsw_server), + + X86_MATCH_VFM(INTEL_BROADWELL, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_BROADWELL_G, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_BROADWELL_D, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_BROADWELL_X, &rapl_defaults_hsw_server), + + X86_MATCH_VFM(INTEL_SKYLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_SKYLAKE_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_SKYLAKE_X, &rapl_defaults_hsw_server), + X86_MATCH_VFM(INTEL_KABYLAKE_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_KABYLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_CANNONLAKE_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ICELAKE_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ICELAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ICELAKE_X, &rapl_defaults_hsw_server), + X86_MATCH_VFM(INTEL_ICELAKE_D, &rapl_defaults_hsw_server), + X86_MATCH_VFM(INTEL_COMETLAKE_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_COMETLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_TIGERLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ROCKETLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ALDERLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_RAPTORLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_METEORLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_METEORLAKE_L, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server), + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &rapl_defaults_spr_server), + X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_LAKEFIELD, &rapl_defaults_core), + + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &rapl_defaults_cht), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &rapl_defaults_tng), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &rapl_defaults_ann), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ATOM_TREMONT, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &rapl_defaults_core), + + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &rapl_defaults_hsw_server), + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &rapl_defaults_hsw_server), X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd), X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd), diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 35cb152fa9aa..733a36f67fbc 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -139,14 +139,14 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra) /* List of verified CPUs. */ static const struct x86_cpu_id pl4_support_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), + X86_MATCH_VFM(INTEL_METEORLAKE, NULL), + X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL), {} }; diff --git a/drivers/ptp/ptp_vmw.c b/drivers/ptp/ptp_vmw.c index 7ec90359428a..20ab05c4daa8 100644 --- a/drivers/ptp/ptp_vmw.c +++ b/drivers/ptp/ptp_vmw.c @@ -14,7 +14,6 @@ #include <asm/hypervisor.h> #include <asm/vmware.h> -#define VMWARE_MAGIC 0x564D5868 #define VMWARE_CMD_PCLK(nr) ((nr << 16) | 97) #define VMWARE_CMD_PCLK_GETTIME VMWARE_CMD_PCLK(0) @@ -24,15 +23,10 @@ static struct ptp_clock *ptp_vmw_clock; static int ptp_vmw_pclk_read(u64 *ns) { - u32 ret, nsec_hi, nsec_lo, unused1, unused2, unused3; - - asm volatile (VMWARE_HYPERCALL : - "=a"(ret), "=b"(nsec_hi), "=c"(nsec_lo), "=d"(unused1), - "=S"(unused2), "=D"(unused3) : - "a"(VMWARE_MAGIC), "b"(0), - "c"(VMWARE_CMD_PCLK_GETTIME), "d"(0) : - "memory"); + u32 ret, nsec_hi, nsec_lo; + ret = vmware_hypercall3(VMWARE_CMD_PCLK_GETTIME, 0, + &nsec_hi, &nsec_lo); if (ret == 0) *ns = ((u64)nsec_hi << 32) | nsec_lo; return ret; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 1dd7921194f5..3e53838990f5 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -94,6 +94,19 @@ config PWM_ATMEL_TCB To compile this driver as a module, choose M here: the module will be called pwm-atmel-tcb. +config PWM_AXI_PWMGEN + tristate "Analog Devices AXI PWM generator" + depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || COMPILE_TEST + select REGMAP_MMIO + help + This enables support for the Analog Devices AXI PWM generator. + + This is a configurable PWM generator with variable pulse width and + period. + + To compile this driver as a module, choose M here: the module will be + called pwm-axi-pwmgen. + config PWM_BCM_IPROC tristate "iProc PWM support" depends on ARCH_BCM_IPROC || COMPILE_TEST @@ -223,6 +236,17 @@ config PWM_FSL_FTM To compile this driver as a module, choose M here: the module will be called pwm-fsl-ftm. +config PWM_GPIO + tristate "GPIO PWM support" + depends on GPIOLIB + depends on HIGH_RES_TIMERS + help + Generic PWM framework driver for software PWM toggling a GPIO pin + from kernel high-resolution timers. + + To compile this driver as a module, choose M here: the module + will be called pwm-gpio. + config PWM_HIBVT tristate "HiSilicon BVT PWM support" depends on ARCH_HISI || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 90913519f11a..0be4f3e6dd43 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_PWM_APPLE) += pwm-apple.o obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o +obj-$(CONFIG_PWM_AXI_PWMGEN) += pwm-axi-pwmgen.o obj-$(CONFIG_PWM_BCM_IPROC) += pwm-bcm-iproc.o obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o @@ -18,6 +19,7 @@ obj-$(CONFIG_PWM_DWC_CORE) += pwm-dwc-core.o obj-$(CONFIG_PWM_DWC) += pwm-dwc.o obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o +obj-$(CONFIG_PWM_GPIO) += pwm-gpio.o obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o obj-$(CONFIG_PWM_IMG) += pwm-img.o obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 18574857641e..8acbcf5b6673 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -6,6 +6,8 @@ * Copyright (C) 2011-2012 Avionic Design GmbH */ +#define DEFAULT_SYMBOL_NAMESPACE PWM + #include <linux/acpi.h> #include <linux/module.h> #include <linux/idr.h> @@ -135,6 +137,25 @@ static void pwm_apply_debug(struct pwm_device *pwm, } } +static bool pwm_state_valid(const struct pwm_state *state) +{ + /* + * For a disabled state all other state description is irrelevant and + * and supposed to be ignored. So also ignore any strange values and + * consider the state ok. + */ + if (state->enabled) + return true; + + if (!state->period) + return false; + + if (state->duty_cycle > state->period) + return false; + + return true; +} + /** * __pwm_apply() - atomically apply a new state to a PWM device * @pwm: PWM device @@ -145,9 +166,25 @@ static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state) struct pwm_chip *chip; int err; - if (!pwm || !state || !state->period || - state->duty_cycle > state->period) + if (!pwm || !state) + return -EINVAL; + + if (!pwm_state_valid(state)) { + /* + * Allow to transition from one invalid state to another. + * This ensures that you can e.g. change the polarity while + * the period is zero. (This happens on stm32 when the hardware + * is in its poweron default state.) This greatly simplifies + * working with the sysfs API where you can only change one + * parameter at a time. + */ + if (!pwm_state_valid(&pwm->state)) { + pwm->state = *state; + return 0; + } + return -EINVAL; + } chip = pwm->chip; @@ -291,19 +328,15 @@ EXPORT_SYMBOL_GPL(pwm_adjust_config); int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, unsigned long timeout) { - int err; - if (!pwm || !pwm->chip->ops) return -EINVAL; if (!pwm->chip->ops->capture) return -ENOSYS; - mutex_lock(&pwm_lock); - err = pwm->chip->ops->capture(pwm->chip, pwm, result, timeout); - mutex_unlock(&pwm_lock); + guard(mutex)(&pwm_lock); - return err; + return pwm->chip->ops->capture(pwm->chip, pwm, result, timeout); } EXPORT_SYMBOL_GPL(pwm_capture); @@ -315,19 +348,15 @@ static struct pwm_chip *pwmchip_find_by_name(const char *name) if (!name) return NULL; - mutex_lock(&pwm_lock); + guard(mutex)(&pwm_lock); idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) { const char *chip_name = dev_name(pwmchip_parent(chip)); - if (chip_name && strcmp(chip_name, name) == 0) { - mutex_unlock(&pwm_lock); + if (chip_name && strcmp(chip_name, name) == 0) return chip; - } } - mutex_unlock(&pwm_lock); - return NULL; } @@ -394,9 +423,9 @@ err_get_device: * chip. A negative error code is returned if the index is not valid for the * specified PWM chip or if the PWM device cannot be requested. */ -struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, - unsigned int index, - const char *label) +static struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, + unsigned int index, + const char *label) { struct pwm_device *pwm; int err; @@ -404,18 +433,16 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, if (!chip || index >= chip->npwm) return ERR_PTR(-EINVAL); - mutex_lock(&pwm_lock); + guard(mutex)(&pwm_lock); + pwm = &chip->pwms[index]; err = pwm_device_request(pwm, label); if (err < 0) - pwm = ERR_PTR(err); + return ERR_PTR(err); - mutex_unlock(&pwm_lock); return pwm; } -EXPORT_SYMBOL_GPL(pwm_request_from_chip); - struct pwm_device * of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args) @@ -511,11 +538,11 @@ static ssize_t period_store(struct device *pwm_dev, if (ret) return ret; - mutex_lock(&export->lock); + guard(mutex)(&export->lock); + pwm_get_state(pwm, &state); state.period = val; ret = pwm_apply_might_sleep(pwm, &state); - mutex_unlock(&export->lock); return ret ? : size; } @@ -546,11 +573,11 @@ static ssize_t duty_cycle_store(struct device *pwm_dev, if (ret) return ret; - mutex_lock(&export->lock); + guard(mutex)(&export->lock); + pwm_get_state(pwm, &state); state.duty_cycle = val; ret = pwm_apply_might_sleep(pwm, &state); - mutex_unlock(&export->lock); return ret ? : size; } @@ -580,7 +607,7 @@ static ssize_t enable_store(struct device *pwm_dev, if (ret) return ret; - mutex_lock(&export->lock); + guard(mutex)(&export->lock); pwm_get_state(pwm, &state); @@ -592,14 +619,11 @@ static ssize_t enable_store(struct device *pwm_dev, state.enabled = true; break; default: - ret = -EINVAL; - goto unlock; + return -EINVAL; } ret = pwm_apply_might_sleep(pwm, &state); -unlock: - mutex_unlock(&export->lock); return ret ? : size; } @@ -643,11 +667,11 @@ static ssize_t polarity_store(struct device *pwm_dev, else return -EINVAL; - mutex_lock(&export->lock); + guard(mutex)(&export->lock); + pwm_get_state(pwm, &state); state.polarity = polarity; ret = pwm_apply_might_sleep(pwm, &state); - mutex_unlock(&export->lock); return ret ? : size; } @@ -1102,11 +1126,11 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner) chip->owner = owner; - mutex_lock(&pwm_lock); + guard(mutex)(&pwm_lock); ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL); if (ret < 0) - goto err_idr_alloc; + return ret; chip->id = ret; @@ -1119,8 +1143,6 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner) if (ret) goto err_device_add; - mutex_unlock(&pwm_lock); - return 0; err_device_add: @@ -1128,9 +1150,6 @@ err_device_add: of_pwmchip_remove(chip); idr_remove(&pwm_chips, chip->id); -err_idr_alloc: - - mutex_unlock(&pwm_lock); return ret; } @@ -1149,11 +1168,8 @@ void pwmchip_remove(struct pwm_chip *chip) if (IS_ENABLED(CONFIG_OF)) of_pwmchip_remove(chip); - mutex_lock(&pwm_lock); - - idr_remove(&pwm_chips, chip->id); - - mutex_unlock(&pwm_lock); + scoped_guard(mutex, &pwm_lock) + idr_remove(&pwm_chips, chip->id); device_del(&chip->dev); } @@ -1209,15 +1225,11 @@ static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode) struct pwm_chip *chip; unsigned long id, tmp; - mutex_lock(&pwm_lock); + guard(mutex)(&pwm_lock); idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) - if (pwmchip_parent(chip) && device_match_fwnode(pwmchip_parent(chip), fwnode)) { - mutex_unlock(&pwm_lock); + if (pwmchip_parent(chip) && device_match_fwnode(pwmchip_parent(chip), fwnode)) return chip; - } - - mutex_unlock(&pwm_lock); return ERR_PTR(-EPROBE_DEFER); } @@ -1366,14 +1378,12 @@ static LIST_HEAD(pwm_lookup_list); */ void pwm_add_table(struct pwm_lookup *table, size_t num) { - mutex_lock(&pwm_lookup_lock); + guard(mutex)(&pwm_lookup_lock); while (num--) { list_add_tail(&table->list, &pwm_lookup_list); table++; } - - mutex_unlock(&pwm_lookup_lock); } /** @@ -1383,14 +1393,12 @@ void pwm_add_table(struct pwm_lookup *table, size_t num) */ void pwm_remove_table(struct pwm_lookup *table, size_t num) { - mutex_lock(&pwm_lookup_lock); + guard(mutex)(&pwm_lookup_lock); while (num--) { list_del(&table->list); table++; } - - mutex_unlock(&pwm_lookup_lock); } /** @@ -1451,36 +1459,33 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) * Then we take the most specific entry - with the following order * of precedence: dev+con > dev only > con only. */ - mutex_lock(&pwm_lookup_lock); - - list_for_each_entry(p, &pwm_lookup_list, list) { - match = 0; + scoped_guard(mutex, &pwm_lookup_lock) + list_for_each_entry(p, &pwm_lookup_list, list) { + match = 0; - if (p->dev_id) { - if (!dev_id || strcmp(p->dev_id, dev_id)) - continue; + if (p->dev_id) { + if (!dev_id || strcmp(p->dev_id, dev_id)) + continue; - match += 2; - } + match += 2; + } - if (p->con_id) { - if (!con_id || strcmp(p->con_id, con_id)) - continue; + if (p->con_id) { + if (!con_id || strcmp(p->con_id, con_id)) + continue; - match += 1; - } + match += 1; + } - if (match > best) { - chosen = p; + if (match > best) { + chosen = p; - if (match != 3) - best = match; - else - break; + if (match != 3) + best = match; + else + break; + } } - } - - mutex_unlock(&pwm_lookup_lock); if (!chosen) return ERR_PTR(-ENODEV); @@ -1532,11 +1537,11 @@ void pwm_put(struct pwm_device *pwm) chip = pwm->chip; - mutex_lock(&pwm_lock); + guard(mutex)(&pwm_lock); if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { pr_warn("PWM device already freed\n"); - goto out; + return; } if (chip->ops->free) @@ -1547,8 +1552,6 @@ void pwm_put(struct pwm_device *pwm) put_device(&chip->dev); module_put(chip->owner); -out: - mutex_unlock(&pwm_lock); } EXPORT_SYMBOL_GPL(pwm_put); @@ -1705,9 +1708,17 @@ DEFINE_SEQ_ATTRIBUTE(pwm_debugfs); static int __init pwm_init(void) { + int ret; + + ret = class_register(&pwm_class); + if (ret) { + pr_err("Failed to initialize PWM class (%pe)\n", ERR_PTR(ret)); + return ret; + } + if (IS_ENABLED(CONFIG_DEBUG_FS)) debugfs_create_file("pwm", 0444, NULL, NULL, &pwm_debugfs_fops); - return class_register(&pwm_class); + return 0; } subsys_initcall(pwm_init); diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c index 528e54c5999d..f9a9c12cbcdd 100644 --- a/drivers/pwm/pwm-atmel-tcb.c +++ b/drivers/pwm/pwm-atmel-tcb.c @@ -81,7 +81,8 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip, tcbpwm->period = 0; tcbpwm->div = 0; - spin_lock(&tcbpwmc->lock); + guard(spinlock)(&tcbpwmc->lock); + regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr); /* * Get init config from Timer Counter registers if @@ -107,7 +108,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip, cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0; regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr); - spin_unlock(&tcbpwmc->lock); return 0; } @@ -137,7 +137,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, if (tcbpwm->duty == 0) polarity = !polarity; - spin_lock(&tcbpwmc->lock); regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr); /* flush old setting and set the new one */ @@ -172,8 +171,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, ATMEL_TC_SWTRG); tcbpwmc->bkup.enabled = 0; } - - spin_unlock(&tcbpwmc->lock); } static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, @@ -194,7 +191,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, if (tcbpwm->duty == 0) polarity = !polarity; - spin_lock(&tcbpwmc->lock); regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr); /* flush old setting and set the new one */ @@ -256,7 +252,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CCR), ATMEL_TC_SWTRG | ATMEL_TC_CLKEN); tcbpwmc->bkup.enabled = 1; - spin_unlock(&tcbpwmc->lock); return 0; } @@ -265,7 +260,8 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm]; - struct atmel_tcb_pwm_device *atcbpwm = NULL; + /* companion PWM sharing register values period and div */ + struct atmel_tcb_pwm_device *atcbpwm = &tcbpwmc->pwms[pwm->hwpwm ^ 1]; int i = 0; int slowclk = 0; unsigned period; @@ -310,11 +306,6 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, duty = div_u64(duty_ns, min); period = div_u64(period_ns, min); - if (pwm->hwpwm == 0) - atcbpwm = &tcbpwmc->pwms[1]; - else - atcbpwm = &tcbpwmc->pwms[0]; - /* * PWM devices provided by the TCB driver are grouped by 2. * PWM devices in a given group must be configured with the @@ -323,8 +314,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * We're checking the period value of the second PWM device * in this group before applying the new config. */ - if ((atcbpwm && atcbpwm->duty > 0 && - atcbpwm->duty != atcbpwm->period) && + if ((atcbpwm->duty > 0 && atcbpwm->duty != atcbpwm->period) && (atcbpwm->div != i || atcbpwm->period != period)) { dev_err(pwmchip_parent(chip), "failed to configure period_ns: PWM group already configured with a different value\n"); @@ -341,9 +331,12 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { + struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); int duty_cycle, period; int ret; + guard(spinlock)(&tcbpwmc->lock); + if (!state->enabled) { atmel_tcb_pwm_disable(chip, pwm, state->polarity); return 0; @@ -389,17 +382,17 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev) { struct pwm_chip *chip; const struct of_device_id *match; - struct atmel_tcb_pwm_chip *tcbpwm; + struct atmel_tcb_pwm_chip *tcbpwmc; const struct atmel_tcb_config *config; struct device_node *np = pdev->dev.of_node; char clk_name[] = "t0_clk"; int err; int channel; - chip = devm_pwmchip_alloc(&pdev->dev, NPWM, sizeof(*tcbpwm)); + chip = devm_pwmchip_alloc(&pdev->dev, NPWM, sizeof(*tcbpwmc)); if (IS_ERR(chip)) return PTR_ERR(chip); - tcbpwm = to_tcb_chip(chip); + tcbpwmc = to_tcb_chip(chip); err = of_property_read_u32(np, "reg", &channel); if (err < 0) { @@ -409,20 +402,20 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev) return err; } - tcbpwm->regmap = syscon_node_to_regmap(np->parent); - if (IS_ERR(tcbpwm->regmap)) - return PTR_ERR(tcbpwm->regmap); + tcbpwmc->regmap = syscon_node_to_regmap(np->parent); + if (IS_ERR(tcbpwmc->regmap)) + return PTR_ERR(tcbpwmc->regmap); - tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk"); - if (IS_ERR(tcbpwm->slow_clk)) - return PTR_ERR(tcbpwm->slow_clk); + tcbpwmc->slow_clk = of_clk_get_by_name(np->parent, "slow_clk"); + if (IS_ERR(tcbpwmc->slow_clk)) + return PTR_ERR(tcbpwmc->slow_clk); clk_name[1] += channel; - tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name); - if (IS_ERR(tcbpwm->clk)) - tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk"); - if (IS_ERR(tcbpwm->clk)) { - err = PTR_ERR(tcbpwm->clk); + tcbpwmc->clk = of_clk_get_by_name(np->parent, clk_name); + if (IS_ERR(tcbpwmc->clk)) + tcbpwmc->clk = of_clk_get_by_name(np->parent, "t0_clk"); + if (IS_ERR(tcbpwmc->clk)) { + err = PTR_ERR(tcbpwmc->clk); goto err_slow_clk; } @@ -430,22 +423,22 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev) config = match->data; if (config->has_gclk) { - tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk"); - if (IS_ERR(tcbpwm->gclk)) { - err = PTR_ERR(tcbpwm->gclk); + tcbpwmc->gclk = of_clk_get_by_name(np->parent, "gclk"); + if (IS_ERR(tcbpwmc->gclk)) { + err = PTR_ERR(tcbpwmc->gclk); goto err_clk; } } chip->ops = &atmel_tcb_pwm_ops; - tcbpwm->channel = channel; - tcbpwm->width = config->counter_width; + tcbpwmc->channel = channel; + tcbpwmc->width = config->counter_width; - err = clk_prepare_enable(tcbpwm->slow_clk); + err = clk_prepare_enable(tcbpwmc->slow_clk); if (err) goto err_gclk; - spin_lock_init(&tcbpwm->lock); + spin_lock_init(&tcbpwmc->lock); err = pwmchip_add(chip); if (err < 0) @@ -456,16 +449,16 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev) return 0; err_disable_clk: - clk_disable_unprepare(tcbpwm->slow_clk); + clk_disable_unprepare(tcbpwmc->slow_clk); err_gclk: - clk_put(tcbpwm->gclk); + clk_put(tcbpwmc->gclk); err_clk: - clk_put(tcbpwm->clk); + clk_put(tcbpwmc->clk); err_slow_clk: - clk_put(tcbpwm->slow_clk); + clk_put(tcbpwmc->slow_clk); return err; } @@ -473,14 +466,14 @@ err_slow_clk: static void atmel_tcb_pwm_remove(struct platform_device *pdev) { struct pwm_chip *chip = platform_get_drvdata(pdev); - struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip); + struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); pwmchip_remove(chip); - clk_disable_unprepare(tcbpwm->slow_clk); - clk_put(tcbpwm->gclk); - clk_put(tcbpwm->clk); - clk_put(tcbpwm->slow_clk); + clk_disable_unprepare(tcbpwmc->slow_clk); + clk_put(tcbpwmc->gclk); + clk_put(tcbpwmc->clk); + clk_put(tcbpwmc->slow_clk); } static const struct of_device_id atmel_tcb_pwm_dt_ids[] = { @@ -492,14 +485,14 @@ MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids); static int atmel_tcb_pwm_suspend(struct device *dev) { struct pwm_chip *chip = dev_get_drvdata(dev); - struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip); - struct atmel_tcb_channel *chan = &tcbpwm->bkup; - unsigned int channel = tcbpwm->channel; + struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); + struct atmel_tcb_channel *chan = &tcbpwmc->bkup; + unsigned int channel = tcbpwmc->channel; - regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), &chan->cmr); - regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), &chan->ra); - regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), &chan->rb); - regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), &chan->rc); + regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, CMR), &chan->cmr); + regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, RA), &chan->ra); + regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, RB), &chan->rb); + regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, RC), &chan->rc); return 0; } @@ -507,17 +500,17 @@ static int atmel_tcb_pwm_suspend(struct device *dev) static int atmel_tcb_pwm_resume(struct device *dev) { struct pwm_chip *chip = dev_get_drvdata(dev); - struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip); - struct atmel_tcb_channel *chan = &tcbpwm->bkup; - unsigned int channel = tcbpwm->channel; + struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); + struct atmel_tcb_channel *chan = &tcbpwmc->bkup; + unsigned int channel = tcbpwmc->channel; - regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), chan->cmr); - regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), chan->ra); - regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), chan->rb); - regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), chan->rc); + regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, CMR), chan->cmr); + regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, RA), chan->ra); + regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, RB), chan->rb); + regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, RC), chan->rc); if (chan->enabled) - regmap_write(tcbpwm->regmap, + regmap_write(tcbpwmc->regmap, ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, ATMEL_TC_REG(channel, CCR)); diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c new file mode 100644 index 000000000000..3ad60edf20a5 --- /dev/null +++ b/drivers/pwm/pwm-axi-pwmgen.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Analog Devices AXI PWM generator + * + * Copyright 2024 Analog Devices Inc. + * Copyright 2024 Baylibre SAS + * + * Device docs: https://analogdevicesinc.github.io/hdl/library/axi_pwm_gen/index.html + * + * Limitations: + * - The writes to registers for period and duty are shadowed until + * LOAD_CONFIG is written to AXI_PWMGEN_REG_CONFIG, at which point + * they take effect. + * - Writing LOAD_CONFIG also has the effect of re-synchronizing all + * enabled channels, which could cause glitching on other channels. It + * is therefore expected that channels are assigned harmonic periods + * and all have a single user coordinating this. + * - Supports normal polarity. Does not support changing polarity. + * - On disable, the PWM output becomes low (inactive). + */ +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/fpga/adi-axi-common.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#define AXI_PWMGEN_REG_CORE_VERSION 0x00 +#define AXI_PWMGEN_REG_ID 0x04 +#define AXI_PWMGEN_REG_SCRATCHPAD 0x08 +#define AXI_PWMGEN_REG_CORE_MAGIC 0x0C +#define AXI_PWMGEN_REG_CONFIG 0x10 +#define AXI_PWMGEN_REG_NPWM 0x14 +#define AXI_PWMGEN_CHX_PERIOD(ch) (0x40 + (4 * (ch))) +#define AXI_PWMGEN_CHX_DUTY(ch) (0x80 + (4 * (ch))) +#define AXI_PWMGEN_CHX_OFFSET(ch) (0xC0 + (4 * (ch))) +#define AXI_PWMGEN_REG_CORE_MAGIC_VAL 0x601A3471 /* Identification number to test during setup */ +#define AXI_PWMGEN_LOAD_CONFIG BIT(1) +#define AXI_PWMGEN_REG_CONFIG_RESET BIT(0) + +struct axi_pwmgen_ddata { + struct regmap *regmap; + unsigned long clk_rate_hz; +}; + +static const struct regmap_config axi_pwmgen_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xFC, +}; + +static int axi_pwmgen_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct axi_pwmgen_ddata *ddata = pwmchip_get_drvdata(chip); + unsigned int ch = pwm->hwpwm; + struct regmap *regmap = ddata->regmap; + u64 period_cnt, duty_cnt; + int ret; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (state->enabled) { + period_cnt = mul_u64_u64_div_u64(state->period, ddata->clk_rate_hz, NSEC_PER_SEC); + if (period_cnt > UINT_MAX) + period_cnt = UINT_MAX; + + if (period_cnt == 0) + return -EINVAL; + + ret = regmap_write(regmap, AXI_PWMGEN_CHX_PERIOD(ch), period_cnt); + if (ret) + return ret; + + duty_cnt = mul_u64_u64_div_u64(state->duty_cycle, ddata->clk_rate_hz, NSEC_PER_SEC); + if (duty_cnt > UINT_MAX) + duty_cnt = UINT_MAX; + + ret = regmap_write(regmap, AXI_PWMGEN_CHX_DUTY(ch), duty_cnt); + if (ret) + return ret; + } else { + ret = regmap_write(regmap, AXI_PWMGEN_CHX_PERIOD(ch), 0); + if (ret) + return ret; + + ret = regmap_write(regmap, AXI_PWMGEN_CHX_DUTY(ch), 0); + if (ret) + return ret; + } + + return regmap_write(regmap, AXI_PWMGEN_REG_CONFIG, AXI_PWMGEN_LOAD_CONFIG); +} + +static int axi_pwmgen_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct axi_pwmgen_ddata *ddata = pwmchip_get_drvdata(chip); + struct regmap *regmap = ddata->regmap; + unsigned int ch = pwm->hwpwm; + u32 cnt; + int ret; + + ret = regmap_read(regmap, AXI_PWMGEN_CHX_PERIOD(ch), &cnt); + if (ret) + return ret; + + state->enabled = cnt != 0; + + state->period = DIV_ROUND_UP_ULL((u64)cnt * NSEC_PER_SEC, ddata->clk_rate_hz); + + ret = regmap_read(regmap, AXI_PWMGEN_CHX_DUTY(ch), &cnt); + if (ret) + return ret; + + state->duty_cycle = DIV_ROUND_UP_ULL((u64)cnt * NSEC_PER_SEC, ddata->clk_rate_hz); + + state->polarity = PWM_POLARITY_NORMAL; + + return 0; +} + +static const struct pwm_ops axi_pwmgen_pwm_ops = { + .apply = axi_pwmgen_apply, + .get_state = axi_pwmgen_get_state, +}; + +static int axi_pwmgen_setup(struct regmap *regmap, struct device *dev) +{ + int ret; + u32 val; + + ret = regmap_read(regmap, AXI_PWMGEN_REG_CORE_MAGIC, &val); + if (ret) + return ret; + + if (val != AXI_PWMGEN_REG_CORE_MAGIC_VAL) + return dev_err_probe(dev, -ENODEV, + "failed to read expected value from register: got %08x, expected %08x\n", + val, AXI_PWMGEN_REG_CORE_MAGIC_VAL); + + ret = regmap_read(regmap, AXI_PWMGEN_REG_CORE_VERSION, &val); + if (ret) + return ret; + + if (ADI_AXI_PCORE_VER_MAJOR(val) != 2) { + return dev_err_probe(dev, -ENODEV, "Unsupported peripheral version %u.%u.%u\n", + ADI_AXI_PCORE_VER_MAJOR(val), + ADI_AXI_PCORE_VER_MINOR(val), + ADI_AXI_PCORE_VER_PATCH(val)); + } + + /* Enable the core */ + ret = regmap_clear_bits(regmap, AXI_PWMGEN_REG_CONFIG, AXI_PWMGEN_REG_CONFIG_RESET); + if (ret) + return ret; + + ret = regmap_read(regmap, AXI_PWMGEN_REG_NPWM, &val); + if (ret) + return ret; + + /* Return the number of PWMs */ + return val; +} + +static int axi_pwmgen_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regmap *regmap; + struct pwm_chip *chip; + struct axi_pwmgen_ddata *ddata; + struct clk *clk; + void __iomem *io_base; + int ret; + + io_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(io_base)) + return PTR_ERR(io_base); + + regmap = devm_regmap_init_mmio(dev, io_base, &axi_pwmgen_regmap_config); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "failed to init register map\n"); + + ret = axi_pwmgen_setup(regmap, dev); + if (ret < 0) + return ret; + + chip = devm_pwmchip_alloc(dev, ret, sizeof(*ddata)); + if (IS_ERR(chip)) + return PTR_ERR(chip); + ddata = pwmchip_get_drvdata(chip); + ddata->regmap = regmap; + + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n"); + + ret = devm_clk_rate_exclusive_get(dev, clk); + if (ret) + return dev_err_probe(dev, ret, "failed to get exclusive rate\n"); + + ddata->clk_rate_hz = clk_get_rate(clk); + if (!ddata->clk_rate_hz || ddata->clk_rate_hz > NSEC_PER_SEC) + return dev_err_probe(dev, -EINVAL, + "Invalid clock rate: %lu\n", ddata->clk_rate_hz); + + chip->ops = &axi_pwmgen_pwm_ops; + chip->atomic = true; + + ret = devm_pwmchip_add(dev, chip); + if (ret) + return dev_err_probe(dev, ret, "could not add PWM chip\n"); + + return 0; +} + +static const struct of_device_id axi_pwmgen_ids[] = { + { .compatible = "adi,axi-pwmgen-2.00.a" }, + { } +}; +MODULE_DEVICE_TABLE(of, axi_pwmgen_ids); + +static struct platform_driver axi_pwmgen_driver = { + .driver = { + .name = "axi-pwmgen", + .of_match_table = axi_pwmgen_ids, + }, + .probe = axi_pwmgen_probe, +}; +module_platform_driver(axi_pwmgen_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sergiu Cuciurean <sergiu.cuciurean@analog.com>"); +MODULE_AUTHOR("Trevor Gamblin <tgamblin@baylibre.com>"); +MODULE_DESCRIPTION("Driver for the Analog Devices AXI PWM generator"); diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c index 606ccfdaf4cc..189301dc395e 100644 --- a/drivers/pwm/pwm-cros-ec.c +++ b/drivers/pwm/pwm-cros-ec.c @@ -20,20 +20,10 @@ * * @ec: Pointer to EC device * @use_pwm_type: Use PWM types instead of generic channels - * @channel: array with per-channel data */ struct cros_ec_pwm_device { struct cros_ec_device *ec; bool use_pwm_type; - struct cros_ec_pwm *channel; -}; - -/** - * struct cros_ec_pwm - per-PWM driver data - * @duty_cycle: cached duty cycle - */ -struct cros_ec_pwm { - u16 duty_cycle; }; static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *chip) @@ -135,7 +125,6 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip); - struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm]; u16 duty_cycle; int ret; @@ -156,8 +145,6 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (ret < 0) return ret; - channel->duty_cycle = state->duty_cycle; - return 0; } @@ -165,7 +152,6 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip); - struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm]; int ret; ret = cros_ec_pwm_get_duty(ec_pwm->ec, ec_pwm->use_pwm_type, pwm->hwpwm); @@ -175,44 +161,13 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, } state->enabled = (ret > 0); + state->duty_cycle = ret; state->period = EC_PWM_MAX_DUTY; state->polarity = PWM_POLARITY_NORMAL; - /* - * Note that "disabled" and "duty cycle == 0" are treated the same. If - * the cached duty cycle is not zero, used the cached duty cycle. This - * ensures that the configured duty cycle is kept across a disable and - * enable operation and avoids potentially confusing consumers. - * - * For the case of the initial hardware readout, channel->duty_cycle - * will be 0 and the actual duty cycle read from the EC is used. - */ - if (ret == 0 && channel->duty_cycle > 0) - state->duty_cycle = channel->duty_cycle; - else - state->duty_cycle = ret; - return 0; } -static struct pwm_device * -cros_ec_pwm_xlate(struct pwm_chip *chip, const struct of_phandle_args *args) -{ - struct pwm_device *pwm; - - if (args->args[0] >= chip->npwm) - return ERR_PTR(-EINVAL); - - pwm = pwm_request_from_chip(chip, args->args[0], NULL); - if (IS_ERR(pwm)) - return pwm; - - /* The EC won't let us change the period */ - pwm->args.period = EC_PWM_MAX_DUTY; - - return pwm; -} - static const struct pwm_ops cros_ec_pwm_ops = { .get_state = cros_ec_pwm_get_state, .apply = cros_ec_pwm_apply, @@ -263,7 +218,7 @@ static int cros_ec_pwm_probe(struct platform_device *pdev) struct cros_ec_pwm_device *ec_pwm; struct pwm_chip *chip; bool use_pwm_type = false; - unsigned int npwm; + unsigned int i, npwm; int ret; if (!ec) @@ -289,12 +244,17 @@ static int cros_ec_pwm_probe(struct platform_device *pdev) /* PWM chip */ chip->ops = &cros_ec_pwm_ops; - chip->of_xlate = cros_ec_pwm_xlate; - ec_pwm->channel = devm_kcalloc(dev, chip->npwm, sizeof(*ec_pwm->channel), - GFP_KERNEL); - if (!ec_pwm->channel) - return -ENOMEM; + /* + * The device tree binding for this device is special as it only uses a + * single cell (for the hwid) and so doesn't provide a default period. + * This isn't a big problem though as the hardware only supports a + * single period length, it's just a bit ugly to make this fit into the + * pwm core abstractions. So initialize the period here, as + * of_pwm_xlate_with_flags() won't do that for us. + */ + for (i = 0; i < npwm; ++i) + chip->pwms[i].args.period = EC_PWM_MAX_DUTY; dev_dbg(dev, "Probed %u PWMs\n", chip->npwm); diff --git a/drivers/pwm/pwm-gpio.c b/drivers/pwm/pwm-gpio.c new file mode 100644 index 000000000000..9f8884ac7504 --- /dev/null +++ b/drivers/pwm/pwm-gpio.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic software PWM for modulating GPIOs + * + * Copyright (C) 2020 Axis Communications AB + * Copyright (C) 2020 Nicola Di Lieto + * Copyright (C) 2024 Stefan Wahren + * Copyright (C) 2024 Linus Walleij + */ + +#include <linux/cleanup.h> +#include <linux/container_of.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/hrtimer.h> +#include <linux/math.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/pwm.h> +#include <linux/spinlock.h> +#include <linux/time.h> +#include <linux/types.h> + +struct pwm_gpio { + struct hrtimer gpio_timer; + struct gpio_desc *gpio; + struct pwm_state state; + struct pwm_state next_state; + + /* Protect internal state between pwm_ops and hrtimer */ + spinlock_t lock; + + bool changing; + bool running; + bool level; +}; + +static void pwm_gpio_round(struct pwm_state *dest, const struct pwm_state *src) +{ + u64 dividend; + u32 remainder; + + *dest = *src; + + /* Round down to hrtimer resolution */ + dividend = dest->period; + remainder = do_div(dividend, hrtimer_resolution); + dest->period -= remainder; + + dividend = dest->duty_cycle; + remainder = do_div(dividend, hrtimer_resolution); + dest->duty_cycle -= remainder; +} + +static u64 pwm_gpio_toggle(struct pwm_gpio *gpwm, bool level) +{ + const struct pwm_state *state = &gpwm->state; + bool invert = state->polarity == PWM_POLARITY_INVERSED; + + gpwm->level = level; + gpiod_set_value(gpwm->gpio, gpwm->level ^ invert); + + if (!state->duty_cycle || state->duty_cycle == state->period) { + gpwm->running = false; + return 0; + } + + gpwm->running = true; + return level ? state->duty_cycle : state->period - state->duty_cycle; +} + +static enum hrtimer_restart pwm_gpio_timer(struct hrtimer *gpio_timer) +{ + struct pwm_gpio *gpwm = container_of(gpio_timer, struct pwm_gpio, + gpio_timer); + u64 next_toggle; + bool new_level; + + guard(spinlock_irqsave)(&gpwm->lock); + + /* Apply new state at end of current period */ + if (!gpwm->level && gpwm->changing) { + gpwm->changing = false; + gpwm->state = gpwm->next_state; + new_level = !!gpwm->state.duty_cycle; + } else { + new_level = !gpwm->level; + } + + next_toggle = pwm_gpio_toggle(gpwm, new_level); + if (next_toggle) + hrtimer_forward(gpio_timer, hrtimer_get_expires(gpio_timer), + ns_to_ktime(next_toggle)); + + return next_toggle ? HRTIMER_RESTART : HRTIMER_NORESTART; +} + +static int pwm_gpio_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct pwm_gpio *gpwm = pwmchip_get_drvdata(chip); + bool invert = state->polarity == PWM_POLARITY_INVERSED; + + if (state->duty_cycle && state->duty_cycle < hrtimer_resolution) + return -EINVAL; + + if (state->duty_cycle != state->period && + (state->period - state->duty_cycle < hrtimer_resolution)) + return -EINVAL; + + if (!state->enabled) { + hrtimer_cancel(&gpwm->gpio_timer); + } else if (!gpwm->running) { + int ret; + + /* + * This just enables the output, but pwm_gpio_toggle() + * really starts the duty cycle. + */ + ret = gpiod_direction_output(gpwm->gpio, invert); + if (ret) + return ret; + } + + guard(spinlock_irqsave)(&gpwm->lock); + + if (!state->enabled) { + pwm_gpio_round(&gpwm->state, state); + gpwm->running = false; + gpwm->changing = false; + + gpiod_set_value(gpwm->gpio, invert); + } else if (gpwm->running) { + pwm_gpio_round(&gpwm->next_state, state); + gpwm->changing = true; + } else { + unsigned long next_toggle; + + pwm_gpio_round(&gpwm->state, state); + gpwm->changing = false; + + next_toggle = pwm_gpio_toggle(gpwm, !!state->duty_cycle); + if (next_toggle) + hrtimer_start(&gpwm->gpio_timer, next_toggle, + HRTIMER_MODE_REL); + } + + return 0; +} + +static int pwm_gpio_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct pwm_gpio *gpwm = pwmchip_get_drvdata(chip); + + guard(spinlock_irqsave)(&gpwm->lock); + + if (gpwm->changing) + *state = gpwm->next_state; + else + *state = gpwm->state; + + return 0; +} + +static const struct pwm_ops pwm_gpio_ops = { + .apply = pwm_gpio_apply, + .get_state = pwm_gpio_get_state, +}; + +static void pwm_gpio_disable_hrtimer(void *data) +{ + struct pwm_gpio *gpwm = data; + + hrtimer_cancel(&gpwm->gpio_timer); +} + +static int pwm_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pwm_chip *chip; + struct pwm_gpio *gpwm; + int ret; + + chip = devm_pwmchip_alloc(dev, 1, sizeof(*gpwm)); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + gpwm = pwmchip_get_drvdata(chip); + + spin_lock_init(&gpwm->lock); + + gpwm->gpio = devm_gpiod_get(dev, NULL, GPIOD_ASIS); + if (IS_ERR(gpwm->gpio)) + return dev_err_probe(dev, PTR_ERR(gpwm->gpio), + "%pfw: could not get gpio\n", + dev_fwnode(dev)); + + if (gpiod_cansleep(gpwm->gpio)) + return dev_err_probe(dev, -EINVAL, + "%pfw: sleeping GPIO not supported\n", + dev_fwnode(dev)); + + chip->ops = &pwm_gpio_ops; + chip->atomic = true; + + hrtimer_init(&gpwm->gpio_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ret = devm_add_action_or_reset(dev, pwm_gpio_disable_hrtimer, gpwm); + if (ret) + return ret; + + gpwm->gpio_timer.function = pwm_gpio_timer; + + ret = pwmchip_add(chip); + if (ret < 0) + return dev_err_probe(dev, ret, "could not add pwmchip\n"); + + return 0; +} + +static const struct of_device_id pwm_gpio_dt_ids[] = { + { .compatible = "pwm-gpio" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, pwm_gpio_dt_ids); + +static struct platform_driver pwm_gpio_driver = { + .driver = { + .name = "pwm-gpio", + .of_match_table = pwm_gpio_dt_ids, + }, + .probe = pwm_gpio_probe, +}; +module_platform_driver(pwm_gpio_driver); + +MODULE_DESCRIPTION("PWM GPIO driver"); +MODULE_AUTHOR("Vincent Whitchurch"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c index c50ddbac43c8..96ea343856f0 100644 --- a/drivers/pwm/pwm-imx-tpm.c +++ b/drivers/pwm/pwm-imx-tpm.c @@ -20,6 +20,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> @@ -380,6 +381,7 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev) static int pwm_imx_tpm_suspend(struct device *dev) { struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev); + int ret; if (tpm->enable_count > 0) return -EBUSY; @@ -393,7 +395,11 @@ static int pwm_imx_tpm_suspend(struct device *dev) clk_disable_unprepare(tpm->clk); - return 0; + ret = pinctrl_pm_select_sleep_state(dev); + if (ret) + clk_prepare_enable(tpm->clk); + + return ret; } static int pwm_imx_tpm_resume(struct device *dev) @@ -401,9 +407,15 @@ static int pwm_imx_tpm_resume(struct device *dev) struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev); int ret = 0; - ret = clk_prepare_enable(tpm->clk); + ret = pinctrl_pm_select_default_state(dev); if (ret) + return ret; + + ret = clk_prepare_enable(tpm->clk); + if (ret) { dev_err(dev, "failed to prepare or enable clock: %d\n", ret); + pinctrl_pm_select_sleep_state(dev); + } return ret; } diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c index 1d2aae2d278f..d5535d208005 100644 --- a/drivers/pwm/pwm-imx1.c +++ b/drivers/pwm/pwm-imx1.c @@ -194,5 +194,6 @@ static struct platform_driver pwm_imx1_driver = { }; module_platform_driver(pwm_imx1_driver); +MODULE_DESCRIPTION("i.MX1 and i.MX21 Pulse Width Modulator driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c index e1412116ef65..9e2bbf5b4a8c 100644 --- a/drivers/pwm/pwm-imx27.c +++ b/drivers/pwm/pwm-imx27.c @@ -352,5 +352,6 @@ static struct platform_driver imx_pwm_driver = { }; module_platform_driver(imx_pwm_driver); +MODULE_DESCRIPTION("i.MX27 and later i.MX SoCs Pulse Width Modulator driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c index f9cc7c17c8f0..084c71a0a11b 100644 --- a/drivers/pwm/pwm-intel-lgm.c +++ b/drivers/pwm/pwm-intel-lgm.c @@ -230,4 +230,5 @@ static struct platform_driver lgm_pwm_driver = { }; module_platform_driver(lgm_pwm_driver); +MODULE_DESCRIPTION("Intel LGM Pulse Width Modulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c index da4bf543d357..6bdb01619380 100644 --- a/drivers/pwm/pwm-jz4740.c +++ b/drivers/pwm/pwm-jz4740.c @@ -201,12 +201,11 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, * state instead of its inactive state. */ if ((state->polarity == PWM_POLARITY_NORMAL) ^ state->enabled) - regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm), - TCU_TCSR_PWM_INITL_HIGH, 0); + regmap_clear_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm), + TCU_TCSR_PWM_INITL_HIGH); else - regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm), - TCU_TCSR_PWM_INITL_HIGH, - TCU_TCSR_PWM_INITL_HIGH); + regmap_set_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm), + TCU_TCSR_PWM_INITL_HIGH); if (state->enabled) jz4740_pwm_enable(chip, pwm); diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c index 25045c229520..f7ece2809e6b 100644 --- a/drivers/pwm/pwm-lpss-pci.c +++ b/drivers/pwm/pwm-lpss-pci.c @@ -46,25 +46,6 @@ static void pwm_lpss_remove_pci(struct pci_dev *pdev) pm_runtime_get_sync(&pdev->dev); } -static int pwm_lpss_runtime_suspend_pci(struct device *dev) -{ - /* - * The PCI core will handle transition to D3 automatically. We only - * need to provide runtime PM hooks for that to happen. - */ - return 0; -} - -static int pwm_lpss_runtime_resume_pci(struct device *dev) -{ - return 0; -} - -static DEFINE_RUNTIME_DEV_PM_OPS(pwm_lpss_pci_pm, - pwm_lpss_runtime_suspend_pci, - pwm_lpss_runtime_resume_pci, - NULL); - static const struct pci_device_id pwm_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, @@ -84,9 +65,6 @@ static struct pci_driver pwm_lpss_driver_pci = { .id_table = pwm_lpss_pci_ids, .probe = pwm_lpss_probe_pci, .remove = pwm_lpss_remove_pci, - .driver = { - .pm = pm_ptr(&pwm_lpss_pci_pm), - }, }; module_pci_driver(pwm_lpss_driver_pci); diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index dbc9f5b17bdc..5130238a4567 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -55,14 +55,7 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev) DPM_FLAG_SMART_SUSPEND); pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - - return 0; -} - -static void pwm_lpss_remove_platform(struct platform_device *pdev) -{ - pm_runtime_disable(&pdev->dev); + return devm_pm_runtime_enable(&pdev->dev); } static const struct acpi_device_id pwm_lpss_acpi_match[] = { @@ -80,7 +73,6 @@ static struct platform_driver pwm_lpss_driver_platform = { .acpi_match_table = pwm_lpss_acpi_match, }, .probe = pwm_lpss_probe_platform, - .remove_new = pwm_lpss_remove_platform, }; module_platform_driver(pwm_lpss_driver_platform); diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 19a87873ad60..01dfa0fab80a 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -395,4 +395,5 @@ static struct platform_driver pwm_mediatek_driver = { module_platform_driver(pwm_mediatek_driver); MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("MediaTek general purpose Pulse Width Modulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index b2f97dfb01bb..98e6c1533312 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -460,6 +460,37 @@ static int meson_pwm_init_channels_meson8b_v2(struct pwm_chip *chip) return meson_pwm_init_clocks_meson8b(chip, mux_parent_data); } +static void meson_pwm_s4_put_clk(void *data) +{ + struct clk *clk = data; + + clk_put(clk); +} + +static int meson_pwm_init_channels_s4(struct pwm_chip *chip) +{ + struct device *dev = pwmchip_parent(chip); + struct device_node *np = dev->of_node; + struct meson_pwm *meson = to_meson_pwm(chip); + int i, ret; + + for (i = 0; i < MESON_NUM_PWMS; i++) { + meson->channels[i].clk = of_clk_get(np, i); + if (IS_ERR(meson->channels[i].clk)) + return dev_err_probe(dev, + PTR_ERR(meson->channels[i].clk), + "Failed to get clk\n"); + + ret = devm_add_action_or_reset(dev, meson_pwm_s4_put_clk, + meson->channels[i].clk); + if (ret) + return dev_err_probe(dev, ret, + "Failed to add clk_put action\n"); + } + + return 0; +} + static const struct meson_pwm_data pwm_meson8b_data = { .parent_names = { "xtal", NULL, "fclk_div4", "fclk_div3" }, .channels_init = meson_pwm_init_channels_meson8b_legacy, @@ -498,6 +529,10 @@ static const struct meson_pwm_data pwm_meson8_v2_data = { .channels_init = meson_pwm_init_channels_meson8b_v2, }; +static const struct meson_pwm_data pwm_s4_data = { + .channels_init = meson_pwm_init_channels_s4, +}; + static const struct of_device_id meson_pwm_matches[] = { { .compatible = "amlogic,meson8-pwm-v2", @@ -536,6 +571,10 @@ static const struct of_device_id meson_pwm_matches[] = { .compatible = "amlogic,meson-g12a-ao-pwm-cd", .data = &pwm_g12a_ao_cd_data }, + { + .compatible = "amlogic,meson-s4-pwm", + .data = &pwm_s4_data + }, {}, }; MODULE_DEVICE_TABLE(of, meson_pwm_matches); diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index bb7bb48b2e6d..430bd6a709e9 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c @@ -208,4 +208,5 @@ static struct platform_driver pwm_driver = { module_platform_driver(pwm_driver); +MODULE_DESCRIPTION("PXA Pulse Width Modulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index efb60c9f0cb3..7adf4f2b1049 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -644,6 +644,7 @@ static struct platform_driver pwm_samsung_driver = { }; module_platform_driver(pwm_samsung_driver); +MODULE_DESCRIPTION("Samsung Pulse Width Modulator driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>"); MODULE_ALIAS("platform:samsung-pwm"); diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c index 6c6f3b38c835..4f372279f313 100644 --- a/drivers/pwm/pwm-spear.c +++ b/drivers/pwm/pwm-spear.c @@ -255,6 +255,7 @@ static struct platform_driver spear_pwm_driver = { module_platform_driver(spear_pwm_driver); +MODULE_DESCRIPTION("ST Microelectronics SPEAr Pulse Width Modulator driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>"); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.com>"); diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index 8bae3fd2b330..fd754a99cf2e 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -368,7 +368,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch, dty = mul_u64_u64_div_u64(duty_ns, clk_get_rate(priv->clk), (u64)NSEC_PER_SEC * (prescaler + 1)); - regmap_write(priv->regmap, TIM_CCR1 + 4 * ch, dty); + regmap_write(priv->regmap, TIM_CCRx(ch + 1), dty); /* Configure output mode */ shift = (ch & 0x1) * CCMR_CHANNEL_SHIFT; @@ -390,9 +390,9 @@ static int stm32_pwm_set_polarity(struct stm32_pwm *priv, unsigned int ch, { u32 mask; - mask = TIM_CCER_CC1P << (ch * 4); + mask = TIM_CCER_CCxP(ch + 1); if (priv->have_complementary_output) - mask |= TIM_CCER_CC1NP << (ch * 4); + mask |= TIM_CCER_CCxNP(ch + 1); regmap_update_bits(priv->regmap, TIM_CCER, mask, polarity == PWM_POLARITY_NORMAL ? 0 : mask); @@ -410,9 +410,9 @@ static int stm32_pwm_enable(struct stm32_pwm *priv, unsigned int ch) return ret; /* Enable channel */ - mask = TIM_CCER_CC1E << (ch * 4); + mask = TIM_CCER_CCxE(ch + 1); if (priv->have_complementary_output) - mask |= TIM_CCER_CC1NE << (ch * 4); + mask |= TIM_CCER_CCxNE(ch); regmap_set_bits(priv->regmap, TIM_CCER, mask); @@ -430,9 +430,9 @@ static void stm32_pwm_disable(struct stm32_pwm *priv, unsigned int ch) u32 mask; /* Disable channel */ - mask = TIM_CCER_CC1E << (ch * 4); + mask = TIM_CCER_CCxE(ch + 1); if (priv->have_complementary_output) - mask |= TIM_CCER_CC1NE << (ch * 4); + mask |= TIM_CCER_CCxNE(ch + 1); regmap_clear_bits(priv->regmap, TIM_CCER, mask); @@ -452,8 +452,9 @@ static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, enabled = pwm->state.enabled; - if (enabled && !state->enabled) { - stm32_pwm_disable(priv, pwm->hwpwm); + if (!state->enabled) { + if (enabled) + stm32_pwm_disable(priv, pwm->hwpwm); return 0; } @@ -501,8 +502,8 @@ static int stm32_pwm_get_state(struct pwm_chip *chip, if (ret) goto out; - state->enabled = ccer & (TIM_CCER_CC1E << (ch * 4)); - state->polarity = (ccer & (TIM_CCER_CC1P << (ch * 4))) ? + state->enabled = ccer & TIM_CCER_CCxE(ch + 1); + state->polarity = (ccer & TIM_CCER_CCxP(ch + 1)) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL; ret = regmap_read(priv->regmap, TIM_PSC, &psc); if (ret) @@ -510,7 +511,7 @@ static int stm32_pwm_get_state(struct pwm_chip *chip, ret = regmap_read(priv->regmap, TIM_ARR, &arr); if (ret) goto out; - ret = regmap_read(priv->regmap, TIM_CCR1 + 4 * ch, &ccr); + ret = regmap_read(priv->regmap, TIM_CCRx(ch + 1), &ccr); if (ret) goto out; @@ -711,7 +712,7 @@ static int stm32_pwm_suspend(struct device *dev) ccer = active_channels(priv); for (i = 0; i < chip->npwm; i++) { - mask = TIM_CCER_CC1E << (i * 4); + mask = TIM_CCER_CCxE(i + 1); if (ccer & mask) { dev_err(dev, "PWM %u still in use by consumer %s\n", i, chip->pwms[i].label); diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c index 9e55380957be..28fae4979e3f 100644 --- a/drivers/pwm/pwm-visconti.c +++ b/drivers/pwm/pwm-visconti.c @@ -170,6 +170,7 @@ static struct platform_driver visconti_pwm_driver = { }; module_platform_driver(visconti_pwm_driver); +MODULE_DESCRIPTION("Toshiba Visconti Pulse Width Modulator driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>"); MODULE_ALIAS("platform:pwm-visconti"); diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c index 3a7deebb0d0c..52c241982807 100644 --- a/drivers/pwm/pwm-xilinx.c +++ b/drivers/pwm/pwm-xilinx.c @@ -224,7 +224,6 @@ static int xilinx_pwm_probe(struct platform_device *pdev) if (IS_ERR(chip)) return PTR_ERR(chip); priv = xilinx_pwm_chip_to_priv(chip); - platform_set_drvdata(pdev, chip); regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) @@ -263,37 +262,24 @@ static int xilinx_pwm_probe(struct platform_device *pdev) * alas, such properties are not allowed to be used. */ - priv->clk = devm_clk_get(dev, "s_axi_aclk"); + priv->clk = devm_clk_get_enabled(dev, "s_axi_aclk"); if (IS_ERR(priv->clk)) return dev_err_probe(dev, PTR_ERR(priv->clk), "Could not get clock\n"); - ret = clk_prepare_enable(priv->clk); + ret = devm_clk_rate_exclusive_get(dev, priv->clk); if (ret) - return dev_err_probe(dev, ret, "Clock enable failed\n"); - clk_rate_exclusive_get(priv->clk); + return dev_err_probe(dev, ret, + "Failed to lock clock rate\n"); chip->ops = &xilinx_pwm_ops; - ret = pwmchip_add(chip); - if (ret) { - clk_rate_exclusive_put(priv->clk); - clk_disable_unprepare(priv->clk); + ret = devm_pwmchip_add(dev, chip); + if (ret) return dev_err_probe(dev, ret, "Could not register PWM chip\n"); - } return 0; } -static void xilinx_pwm_remove(struct platform_device *pdev) -{ - struct pwm_chip *chip = platform_get_drvdata(pdev); - struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip); - - pwmchip_remove(chip); - clk_rate_exclusive_put(priv->clk); - clk_disable_unprepare(priv->clk); -} - static const struct of_device_id xilinx_pwm_of_match[] = { { .compatible = "xlnx,xps-timer-1.00.a", }, {}, @@ -302,7 +288,6 @@ MODULE_DEVICE_TABLE(of, xilinx_pwm_of_match); static struct platform_driver xilinx_pwm_driver = { .probe = xilinx_pwm_probe, - .remove_new = xilinx_pwm_remove, .driver = { .name = "xilinx-pwm", .of_match_table = of_match_ptr(xilinx_pwm_of_match), diff --git a/drivers/ras/amd/atl/core.c b/drivers/ras/amd/atl/core.c index 6dc4e06305f7..4197e10993ac 100644 --- a/drivers/ras/amd/atl/core.c +++ b/drivers/ras/amd/atl/core.c @@ -49,26 +49,26 @@ static bool legacy_hole_en(struct addr_ctx *ctx) return FIELD_GET(DF_LEGACY_MMIO_HOLE_EN, reg); } -static int add_legacy_hole(struct addr_ctx *ctx) +static u64 add_legacy_hole(struct addr_ctx *ctx, u64 addr) { - u32 dram_hole_base; - u8 func = 0; - if (!legacy_hole_en(ctx)) - return 0; + return addr; - if (df_cfg.rev >= DF4) - func = 7; + if (addr >= df_cfg.dram_hole_base) + addr += (BIT_ULL(32) - df_cfg.dram_hole_base); - if (df_indirect_read_broadcast(ctx->node_id, func, 0x104, &dram_hole_base)) - return -EINVAL; + return addr; +} - dram_hole_base &= DF_DRAM_HOLE_BASE_MASK; +static u64 remove_legacy_hole(struct addr_ctx *ctx, u64 addr) +{ + if (!legacy_hole_en(ctx)) + return addr; - if (ctx->ret_addr >= dram_hole_base) - ctx->ret_addr += (BIT_ULL(32) - dram_hole_base); + if (addr >= df_cfg.dram_hole_base) + addr -= (BIT_ULL(32) - df_cfg.dram_hole_base); - return 0; + return addr; } static u64 get_base_addr(struct addr_ctx *ctx) @@ -83,14 +83,14 @@ static u64 get_base_addr(struct addr_ctx *ctx) return base_addr << DF_DRAM_BASE_LIMIT_LSB; } -static int add_base_and_hole(struct addr_ctx *ctx) +u64 add_base_and_hole(struct addr_ctx *ctx, u64 addr) { - ctx->ret_addr += get_base_addr(ctx); - - if (add_legacy_hole(ctx)) - return -EINVAL; + return add_legacy_hole(ctx, addr + get_base_addr(ctx)); +} - return 0; +u64 remove_base_and_hole(struct addr_ctx *ctx, u64 addr) +{ + return remove_legacy_hole(ctx, addr) - get_base_addr(ctx); } static bool late_hole_remove(struct addr_ctx *ctx) @@ -125,6 +125,9 @@ unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsig ctx.inputs.die_id = die_id; ctx.inputs.coh_st_inst_id = coh_st_inst_id; + if (legacy_hole_en(&ctx) && !df_cfg.dram_hole_base) + return -EINVAL; + if (determine_node_id(&ctx, socket_id, die_id)) return -EINVAL; @@ -134,14 +137,14 @@ unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsig if (denormalize_address(&ctx)) return -EINVAL; - if (!late_hole_remove(&ctx) && add_base_and_hole(&ctx)) - return -EINVAL; + if (!late_hole_remove(&ctx)) + ctx.ret_addr = add_base_and_hole(&ctx, ctx.ret_addr); if (dehash_address(&ctx)) return -EINVAL; - if (late_hole_remove(&ctx) && add_base_and_hole(&ctx)) - return -EINVAL; + if (late_hole_remove(&ctx)) + ctx.ret_addr = add_base_and_hole(&ctx, ctx.ret_addr); if (addr_over_limit(&ctx)) return -EINVAL; @@ -206,7 +209,7 @@ static int __init amd_atl_init(void) __module_get(THIS_MODULE); amd_atl_register_decoder(convert_umc_mca_addr_to_sys_addr); - pr_info("AMD Address Translation Library initialized"); + pr_info("AMD Address Translation Library initialized\n"); return 0; } @@ -222,4 +225,5 @@ static void __exit amd_atl_exit(void) module_init(amd_atl_init); module_exit(amd_atl_exit); +MODULE_DESCRIPTION("AMD Address Translation Library"); MODULE_LICENSE("GPL"); diff --git a/drivers/ras/amd/atl/dehash.c b/drivers/ras/amd/atl/dehash.c index 4ea46262c4f5..d4ee7ecabaee 100644 --- a/drivers/ras/amd/atl/dehash.c +++ b/drivers/ras/amd/atl/dehash.c @@ -12,41 +12,10 @@ #include "internal.h" -/* - * Verify the interleave bits are correct in the different interleaving - * settings. - * - * If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the - * respective interleaving is disabled. - */ -static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2, - u8 num_intlv_dies, u8 num_intlv_sockets) -{ - if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) { - pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos); - return false; - } - - if (ctx->map.num_intlv_dies > num_intlv_dies) { - pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies); - return false; - } - - if (ctx->map.num_intlv_sockets > num_intlv_sockets) { - pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets); - return false; - } - - return true; -} - static int df2_dehash_addr(struct addr_ctx *ctx) { u8 hashed_bit, intlv_bit, intlv_bit_pos; - if (!map_bits_valid(ctx, 8, 9, 1, 1)) - return -EINVAL; - intlv_bit_pos = ctx->map.intlv_bit_pos; intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr); @@ -67,9 +36,6 @@ static int df3_dehash_addr(struct addr_ctx *ctx) bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G; u8 hashed_bit, intlv_bit, intlv_bit_pos; - if (!map_bits_valid(ctx, 8, 9, 1, 1)) - return -EINVAL; - hash_ctl_64k = FIELD_GET(DF3_HASH_CTL_64K, ctx->map.ctl); hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl); hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl); @@ -171,9 +137,6 @@ static int df4_dehash_addr(struct addr_ctx *ctx) bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G; u8 hashed_bit, intlv_bit; - if (!map_bits_valid(ctx, 8, 8, 1, 2)) - return -EINVAL; - hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl); @@ -247,9 +210,6 @@ static int df4p5_dehash_addr(struct addr_ctx *ctx) u8 hashed_bit, intlv_bit; u64 rehash_vector; - if (!map_bits_valid(ctx, 8, 8, 1, 2)) - return -EINVAL; - hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl); @@ -360,9 +320,6 @@ static int mi300_dehash_addr(struct addr_ctx *ctx) bool hashed_bit, intlv_bit, test_bit; u8 num_intlv_bits, base_bit, i; - if (!map_bits_valid(ctx, 8, 8, 4, 1)) - return -EINVAL; - hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl); hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); diff --git a/drivers/ras/amd/atl/denormalize.c b/drivers/ras/amd/atl/denormalize.c index e279224288d6..1a525cfa983c 100644 --- a/drivers/ras/amd/atl/denormalize.c +++ b/drivers/ras/amd/atl/denormalize.c @@ -448,6 +448,118 @@ static u16 get_logical_coh_st_fabric_id(struct addr_ctx *ctx) return (phys_fabric_id & df_cfg.node_id_mask) | log_fabric_id; } +static u16 get_logical_coh_st_fabric_id_for_current_spa(struct addr_ctx *ctx, + struct df4p5_denorm_ctx *denorm_ctx) +{ + bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T; + bool hash_pa8, hash_pa9, hash_pa12, hash_pa13; + u64 cs_id = 0; + + hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); + hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); + hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl); + hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl); + + hash_pa8 = FIELD_GET(BIT_ULL(8), denorm_ctx->current_spa); + hash_pa8 ^= FIELD_GET(BIT_ULL(14), denorm_ctx->current_spa); + hash_pa8 ^= FIELD_GET(BIT_ULL(16), denorm_ctx->current_spa) & hash_ctl_64k; + hash_pa8 ^= FIELD_GET(BIT_ULL(21), denorm_ctx->current_spa) & hash_ctl_2M; + hash_pa8 ^= FIELD_GET(BIT_ULL(30), denorm_ctx->current_spa) & hash_ctl_1G; + hash_pa8 ^= FIELD_GET(BIT_ULL(40), denorm_ctx->current_spa) & hash_ctl_1T; + + hash_pa9 = FIELD_GET(BIT_ULL(9), denorm_ctx->current_spa); + hash_pa9 ^= FIELD_GET(BIT_ULL(17), denorm_ctx->current_spa) & hash_ctl_64k; + hash_pa9 ^= FIELD_GET(BIT_ULL(22), denorm_ctx->current_spa) & hash_ctl_2M; + hash_pa9 ^= FIELD_GET(BIT_ULL(31), denorm_ctx->current_spa) & hash_ctl_1G; + hash_pa9 ^= FIELD_GET(BIT_ULL(41), denorm_ctx->current_spa) & hash_ctl_1T; + + hash_pa12 = FIELD_GET(BIT_ULL(12), denorm_ctx->current_spa); + hash_pa12 ^= FIELD_GET(BIT_ULL(18), denorm_ctx->current_spa) & hash_ctl_64k; + hash_pa12 ^= FIELD_GET(BIT_ULL(23), denorm_ctx->current_spa) & hash_ctl_2M; + hash_pa12 ^= FIELD_GET(BIT_ULL(32), denorm_ctx->current_spa) & hash_ctl_1G; + hash_pa12 ^= FIELD_GET(BIT_ULL(42), denorm_ctx->current_spa) & hash_ctl_1T; + + hash_pa13 = FIELD_GET(BIT_ULL(13), denorm_ctx->current_spa); + hash_pa13 ^= FIELD_GET(BIT_ULL(19), denorm_ctx->current_spa) & hash_ctl_64k; + hash_pa13 ^= FIELD_GET(BIT_ULL(24), denorm_ctx->current_spa) & hash_ctl_2M; + hash_pa13 ^= FIELD_GET(BIT_ULL(33), denorm_ctx->current_spa) & hash_ctl_1G; + hash_pa13 ^= FIELD_GET(BIT_ULL(43), denorm_ctx->current_spa) & hash_ctl_1T; + + switch (ctx->map.intlv_mode) { + case DF4p5_NPS0_24CHAN_1K_HASH: + cs_id = FIELD_GET(GENMASK_ULL(63, 13), denorm_ctx->current_spa) << 3; + cs_id %= denorm_ctx->mod_value; + cs_id <<= 2; + cs_id |= (hash_pa9 | (hash_pa12 << 1)); + cs_id |= hash_pa8 << df_cfg.socket_id_shift; + break; + + case DF4p5_NPS0_24CHAN_2K_HASH: + cs_id = FIELD_GET(GENMASK_ULL(63, 14), denorm_ctx->current_spa) << 4; + cs_id %= denorm_ctx->mod_value; + cs_id <<= 2; + cs_id |= (hash_pa12 | (hash_pa13 << 1)); + cs_id |= hash_pa8 << df_cfg.socket_id_shift; + break; + + case DF4p5_NPS1_12CHAN_1K_HASH: + cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2; + cs_id %= denorm_ctx->mod_value; + cs_id <<= 2; + cs_id |= (hash_pa8 | (hash_pa9 << 1)); + break; + + case DF4p5_NPS1_12CHAN_2K_HASH: + cs_id = FIELD_GET(GENMASK_ULL(63, 13), denorm_ctx->current_spa) << 3; + cs_id %= denorm_ctx->mod_value; + cs_id <<= 2; + cs_id |= (hash_pa8 | (hash_pa12 << 1)); + break; + + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2; + cs_id |= (FIELD_GET(BIT_ULL(9), denorm_ctx->current_spa) << 1); + cs_id %= denorm_ctx->mod_value; + cs_id <<= 1; + cs_id |= hash_pa8; + break; + + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2; + cs_id %= denorm_ctx->mod_value; + cs_id <<= 1; + cs_id |= hash_pa8; + break; + + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2; + cs_id |= FIELD_GET(GENMASK_ULL(9, 8), denorm_ctx->current_spa); + cs_id %= denorm_ctx->mod_value; + break; + + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + cs_id = FIELD_GET(GENMASK_ULL(63, 12), denorm_ctx->current_spa) << 2; + cs_id |= FIELD_GET(BIT_ULL(8), denorm_ctx->current_spa) << 1; + cs_id %= denorm_ctx->mod_value; + break; + + default: + atl_debug_on_bad_intlv_mode(ctx); + return 0; + } + + if (cs_id > 0xffff) { + atl_debug(ctx, "Translation error: Resulting cs_id larger than u16\n"); + return 0; + } + + return cs_id; +} + static int denorm_addr_common(struct addr_ctx *ctx) { u64 denorm_addr; @@ -699,6 +811,442 @@ static int denorm_addr_df4_np2(struct addr_ctx *ctx) return 0; } +static u64 normalize_addr_df4p5_np2(struct addr_ctx *ctx, struct df4p5_denorm_ctx *denorm_ctx, + u64 addr) +{ + u64 temp_addr_a = 0, temp_addr_b = 0; + + switch (ctx->map.intlv_mode) { + case DF4p5_NPS0_24CHAN_1K_HASH: + case DF4p5_NPS1_12CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + temp_addr_a = FIELD_GET(GENMASK_ULL(11, 10), addr) << 8; + break; + + case DF4p5_NPS0_24CHAN_2K_HASH: + case DF4p5_NPS1_12CHAN_2K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + temp_addr_a = FIELD_GET(GENMASK_ULL(11, 9), addr) << 8; + break; + + default: + atl_debug_on_bad_intlv_mode(ctx); + return 0; + } + + switch (ctx->map.intlv_mode) { + case DF4p5_NPS0_24CHAN_1K_HASH: + temp_addr_b = FIELD_GET(GENMASK_ULL(63, 13), addr) / denorm_ctx->mod_value; + temp_addr_b <<= 10; + break; + + case DF4p5_NPS0_24CHAN_2K_HASH: + temp_addr_b = FIELD_GET(GENMASK_ULL(63, 14), addr) / denorm_ctx->mod_value; + temp_addr_b <<= 11; + break; + + case DF4p5_NPS1_12CHAN_1K_HASH: + temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) / denorm_ctx->mod_value; + temp_addr_b <<= 10; + break; + + case DF4p5_NPS1_12CHAN_2K_HASH: + temp_addr_b = FIELD_GET(GENMASK_ULL(63, 13), addr) / denorm_ctx->mod_value; + temp_addr_b <<= 11; + break; + + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) << 1; + temp_addr_b |= FIELD_GET(BIT_ULL(9), addr); + temp_addr_b /= denorm_ctx->mod_value; + temp_addr_b <<= 10; + break; + + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) / denorm_ctx->mod_value; + temp_addr_b <<= 11; + break; + + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) << 2; + temp_addr_b |= FIELD_GET(GENMASK_ULL(9, 8), addr); + temp_addr_b /= denorm_ctx->mod_value; + temp_addr_b <<= 10; + break; + + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + temp_addr_b = FIELD_GET(GENMASK_ULL(63, 12), addr) << 1; + temp_addr_b |= FIELD_GET(BIT_ULL(8), addr); + temp_addr_b /= denorm_ctx->mod_value; + temp_addr_b <<= 11; + break; + + default: + atl_debug_on_bad_intlv_mode(ctx); + return 0; + } + + return denorm_ctx->base_denorm_addr | temp_addr_a | temp_addr_b; +} + +static void recalculate_hashed_bits_df4p5_np2(struct addr_ctx *ctx, + struct df4p5_denorm_ctx *denorm_ctx) +{ + bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T, hashed_bit; + + if (!denorm_ctx->rehash_vector) + return; + + hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); + hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); + hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl); + hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl); + + if (denorm_ctx->rehash_vector & BIT_ULL(8)) { + hashed_bit = FIELD_GET(BIT_ULL(8), denorm_ctx->current_spa); + hashed_bit ^= FIELD_GET(BIT_ULL(14), denorm_ctx->current_spa); + hashed_bit ^= FIELD_GET(BIT_ULL(16), denorm_ctx->current_spa) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(21), denorm_ctx->current_spa) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(30), denorm_ctx->current_spa) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(40), denorm_ctx->current_spa) & hash_ctl_1T; + + if (FIELD_GET(BIT_ULL(8), denorm_ctx->current_spa) != hashed_bit) + denorm_ctx->current_spa ^= BIT_ULL(8); + } + + if (denorm_ctx->rehash_vector & BIT_ULL(9)) { + hashed_bit = FIELD_GET(BIT_ULL(9), denorm_ctx->current_spa); + hashed_bit ^= FIELD_GET(BIT_ULL(17), denorm_ctx->current_spa) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(22), denorm_ctx->current_spa) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(31), denorm_ctx->current_spa) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(41), denorm_ctx->current_spa) & hash_ctl_1T; + + if (FIELD_GET(BIT_ULL(9), denorm_ctx->current_spa) != hashed_bit) + denorm_ctx->current_spa ^= BIT_ULL(9); + } + + if (denorm_ctx->rehash_vector & BIT_ULL(12)) { + hashed_bit = FIELD_GET(BIT_ULL(12), denorm_ctx->current_spa); + hashed_bit ^= FIELD_GET(BIT_ULL(18), denorm_ctx->current_spa) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(23), denorm_ctx->current_spa) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(32), denorm_ctx->current_spa) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(42), denorm_ctx->current_spa) & hash_ctl_1T; + + if (FIELD_GET(BIT_ULL(12), denorm_ctx->current_spa) != hashed_bit) + denorm_ctx->current_spa ^= BIT_ULL(12); + } + + if (denorm_ctx->rehash_vector & BIT_ULL(13)) { + hashed_bit = FIELD_GET(BIT_ULL(13), denorm_ctx->current_spa); + hashed_bit ^= FIELD_GET(BIT_ULL(19), denorm_ctx->current_spa) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(24), denorm_ctx->current_spa) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(33), denorm_ctx->current_spa) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(43), denorm_ctx->current_spa) & hash_ctl_1T; + + if (FIELD_GET(BIT_ULL(13), denorm_ctx->current_spa) != hashed_bit) + denorm_ctx->current_spa ^= BIT_ULL(13); + } +} + +static bool match_logical_coh_st_fabric_id(struct addr_ctx *ctx, + struct df4p5_denorm_ctx *denorm_ctx) +{ + /* + * The logical CS fabric ID of the permutation must be calculated from the + * current SPA with the base and with the MMIO hole. + */ + u16 id = get_logical_coh_st_fabric_id_for_current_spa(ctx, denorm_ctx); + + atl_debug(ctx, "Checking calculated logical coherent station fabric id:\n"); + atl_debug(ctx, " calculated fabric id = 0x%x\n", id); + atl_debug(ctx, " expected fabric id = 0x%x\n", denorm_ctx->coh_st_fabric_id); + + return denorm_ctx->coh_st_fabric_id == id; +} + +static bool match_norm_addr(struct addr_ctx *ctx, struct df4p5_denorm_ctx *denorm_ctx) +{ + u64 addr = remove_base_and_hole(ctx, denorm_ctx->current_spa); + + /* + * The normalized address must be calculated with the current SPA without + * the base and without the MMIO hole. + */ + addr = normalize_addr_df4p5_np2(ctx, denorm_ctx, addr); + + atl_debug(ctx, "Checking calculated normalized address:\n"); + atl_debug(ctx, " calculated normalized addr = 0x%016llx\n", addr); + atl_debug(ctx, " expected normalized addr = 0x%016llx\n", ctx->ret_addr); + + return addr == ctx->ret_addr; +} + +static int check_permutations(struct addr_ctx *ctx, struct df4p5_denorm_ctx *denorm_ctx) +{ + u64 test_perm, temp_addr, denorm_addr, num_perms; + unsigned int dropped_remainder; + + denorm_ctx->div_addr *= denorm_ctx->mod_value; + + /* + * The high order bits of num_permutations represent the permutations + * of the dropped remainder. This will be either 0-3 or 0-5 depending + * on the interleave mode. The low order bits represent the + * permutations of other "lost" bits which will be any combination of + * 1, 2, or 3 bits depending on the interleave mode. + */ + num_perms = denorm_ctx->mod_value << denorm_ctx->perm_shift; + + for (test_perm = 0; test_perm < num_perms; test_perm++) { + denorm_addr = denorm_ctx->base_denorm_addr; + dropped_remainder = test_perm >> denorm_ctx->perm_shift; + temp_addr = denorm_ctx->div_addr + dropped_remainder; + + switch (ctx->map.intlv_mode) { + case DF4p5_NPS0_24CHAN_2K_HASH: + denorm_addr |= temp_addr << 14; + break; + + case DF4p5_NPS0_24CHAN_1K_HASH: + case DF4p5_NPS1_12CHAN_2K_HASH: + denorm_addr |= temp_addr << 13; + break; + + case DF4p5_NPS1_12CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + denorm_addr |= temp_addr << 12; + break; + + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + denorm_addr |= FIELD_GET(BIT_ULL(0), temp_addr) << 9; + denorm_addr |= FIELD_GET(GENMASK_ULL(63, 1), temp_addr) << 12; + break; + + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + denorm_addr |= FIELD_GET(GENMASK_ULL(1, 0), temp_addr) << 8; + denorm_addr |= FIELD_GET(GENMASK_ULL(63, 2), (temp_addr)) << 12; + break; + + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + denorm_addr |= FIELD_GET(BIT_ULL(0), temp_addr) << 8; + denorm_addr |= FIELD_GET(GENMASK_ULL(63, 1), temp_addr) << 12; + break; + + default: + atl_debug_on_bad_intlv_mode(ctx); + return -EINVAL; + } + + switch (ctx->map.intlv_mode) { + case DF4p5_NPS0_24CHAN_1K_HASH: + denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8; + denorm_addr |= FIELD_GET(BIT_ULL(1), test_perm) << 9; + denorm_addr |= FIELD_GET(BIT_ULL(2), test_perm) << 12; + break; + + case DF4p5_NPS0_24CHAN_2K_HASH: + denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8; + denorm_addr |= FIELD_GET(BIT_ULL(1), test_perm) << 12; + denorm_addr |= FIELD_GET(BIT_ULL(2), test_perm) << 13; + break; + + case DF4p5_NPS1_12CHAN_2K_HASH: + denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8; + denorm_addr |= FIELD_GET(BIT_ULL(1), test_perm) << 12; + break; + + case DF4p5_NPS1_12CHAN_1K_HASH: + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8; + denorm_addr |= FIELD_GET(BIT_ULL(1), test_perm) << 9; + break; + + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + denorm_addr |= FIELD_GET(BIT_ULL(0), test_perm) << 8; + break; + + default: + atl_debug_on_bad_intlv_mode(ctx); + return -EINVAL; + } + + denorm_ctx->current_spa = add_base_and_hole(ctx, denorm_addr); + recalculate_hashed_bits_df4p5_np2(ctx, denorm_ctx); + + atl_debug(ctx, "Checking potential system physical address 0x%016llx\n", + denorm_ctx->current_spa); + + if (!match_logical_coh_st_fabric_id(ctx, denorm_ctx)) + continue; + + if (!match_norm_addr(ctx, denorm_ctx)) + continue; + + if (denorm_ctx->resolved_spa == INVALID_SPA || + denorm_ctx->current_spa > denorm_ctx->resolved_spa) + denorm_ctx->resolved_spa = denorm_ctx->current_spa; + } + + if (denorm_ctx->resolved_spa == INVALID_SPA) { + atl_debug(ctx, "Failed to find valid SPA for normalized address 0x%016llx\n", + ctx->ret_addr); + return -EINVAL; + } + + /* Return the resolved SPA without the base, without the MMIO hole */ + ctx->ret_addr = remove_base_and_hole(ctx, denorm_ctx->resolved_spa); + + return 0; +} + +static int init_df4p5_denorm_ctx(struct addr_ctx *ctx, struct df4p5_denorm_ctx *denorm_ctx) +{ + denorm_ctx->current_spa = INVALID_SPA; + denorm_ctx->resolved_spa = INVALID_SPA; + + switch (ctx->map.intlv_mode) { + case DF4p5_NPS0_24CHAN_1K_HASH: + denorm_ctx->perm_shift = 3; + denorm_ctx->rehash_vector = BIT(8) | BIT(9) | BIT(12); + break; + + case DF4p5_NPS0_24CHAN_2K_HASH: + denorm_ctx->perm_shift = 3; + denorm_ctx->rehash_vector = BIT(8) | BIT(12) | BIT(13); + break; + + case DF4p5_NPS1_12CHAN_1K_HASH: + denorm_ctx->perm_shift = 2; + denorm_ctx->rehash_vector = BIT(8); + break; + + case DF4p5_NPS1_12CHAN_2K_HASH: + denorm_ctx->perm_shift = 2; + denorm_ctx->rehash_vector = BIT(8) | BIT(12); + break; + + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + denorm_ctx->perm_shift = 1; + denorm_ctx->rehash_vector = BIT(8); + break; + + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + denorm_ctx->perm_shift = 2; + denorm_ctx->rehash_vector = 0; + break; + + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + denorm_ctx->perm_shift = 1; + denorm_ctx->rehash_vector = 0; + break; + + default: + atl_debug_on_bad_intlv_mode(ctx); + return -EINVAL; + } + + denorm_ctx->base_denorm_addr = FIELD_GET(GENMASK_ULL(7, 0), ctx->ret_addr); + + switch (ctx->map.intlv_mode) { + case DF4p5_NPS0_24CHAN_1K_HASH: + case DF4p5_NPS1_12CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + denorm_ctx->base_denorm_addr |= FIELD_GET(GENMASK_ULL(9, 8), ctx->ret_addr) << 10; + denorm_ctx->div_addr = FIELD_GET(GENMASK_ULL(63, 10), ctx->ret_addr); + break; + + case DF4p5_NPS0_24CHAN_2K_HASH: + case DF4p5_NPS1_12CHAN_2K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + denorm_ctx->base_denorm_addr |= FIELD_GET(GENMASK_ULL(10, 8), ctx->ret_addr) << 9; + denorm_ctx->div_addr = FIELD_GET(GENMASK_ULL(63, 11), ctx->ret_addr); + break; + + default: + atl_debug_on_bad_intlv_mode(ctx); + return -EINVAL; + } + + if (ctx->map.num_intlv_chan % 3 == 0) + denorm_ctx->mod_value = 3; + else + denorm_ctx->mod_value = 5; + + denorm_ctx->coh_st_fabric_id = get_logical_coh_st_fabric_id(ctx) - get_dst_fabric_id(ctx); + + atl_debug(ctx, "Initialized df4p5_denorm_ctx:"); + atl_debug(ctx, " mod_value = %d", denorm_ctx->mod_value); + atl_debug(ctx, " perm_shift = %d", denorm_ctx->perm_shift); + atl_debug(ctx, " rehash_vector = 0x%x", denorm_ctx->rehash_vector); + atl_debug(ctx, " base_denorm_addr = 0x%016llx", denorm_ctx->base_denorm_addr); + atl_debug(ctx, " div_addr = 0x%016llx", denorm_ctx->div_addr); + atl_debug(ctx, " coh_st_fabric_id = 0x%x", denorm_ctx->coh_st_fabric_id); + + return 0; +} + +/* + * For DF 4.5, parts of the physical address can be directly pulled from the + * normalized address. The exact bits will differ between interleave modes, but + * using NPS0_24CHAN_1K_HASH as an example, the normalized address consists of + * bits [63:13] (divided by 3), bits [11:10], and bits [7:0] of the system + * physical address. + * + * In this case, there is no way to reconstruct the missing bits (bits 8, 9, + * and 12) from the normalized address. Additionally, when bits [63:13] are + * divided by 3, the remainder is dropped. Determine the proper combination of + * "lost" bits and dropped remainder by iterating through each possible + * permutation of these bits and then normalizing the generated system physical + * addresses. If the normalized address matches the address we are trying to + * translate, then we have found the correct permutation of bits. + */ +static int denorm_addr_df4p5_np2(struct addr_ctx *ctx) +{ + struct df4p5_denorm_ctx denorm_ctx; + int ret = 0; + + memset(&denorm_ctx, 0, sizeof(denorm_ctx)); + + atl_debug(ctx, "Denormalizing DF 4.5 normalized address 0x%016llx", ctx->ret_addr); + + ret = init_df4p5_denorm_ctx(ctx, &denorm_ctx); + if (ret) + return ret; + + return check_permutations(ctx, &denorm_ctx); +} + int denormalize_address(struct addr_ctx *ctx) { switch (ctx->map.intlv_mode) { @@ -710,6 +1258,19 @@ int denormalize_address(struct addr_ctx *ctx) case DF4_NPS2_5CHAN_HASH: case DF4_NPS1_10CHAN_HASH: return denorm_addr_df4_np2(ctx); + case DF4p5_NPS0_24CHAN_1K_HASH: + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS1_12CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS1_12CHAN_2K_HASH: + case DF4p5_NPS0_24CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + return denorm_addr_df4p5_np2(ctx); case DF3_6CHAN: return denorm_addr_df3_6chan(ctx); default: diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h index 196c1c8b578c..9de5d53d0568 100644 --- a/drivers/ras/amd/atl/internal.h +++ b/drivers/ras/amd/atl/internal.h @@ -21,6 +21,9 @@ #include "reg_fields.h" +#undef pr_fmt +#define pr_fmt(fmt) "amd_atl: " fmt + /* Maximum possible number of Coherent Stations within a single Data Fabric. */ #define MAX_COH_ST_CHANNELS 32 @@ -34,6 +37,8 @@ #define DF_DRAM_BASE_LIMIT_LSB 28 #define MI300_DRAM_LIMIT_LSB 20 +#define INVALID_SPA ~0ULL + enum df_revisions { UNKNOWN, DF2, @@ -90,6 +95,44 @@ enum intlv_modes { DF4p5_NPS1_10CHAN_2K_HASH = 0x49, }; +struct df4p5_denorm_ctx { + /* Indicates the number of "lost" bits. This will be 1, 2, or 3. */ + u8 perm_shift; + + /* A mask indicating the bits that need to be rehashed. */ + u16 rehash_vector; + + /* + * Represents the value that the high bits of the normalized address + * are divided by during normalization. This value will be 3 for + * interleave modes with a number of channels divisible by 3 or the + * value will be 5 for interleave modes with a number of channels + * divisible by 5. Power-of-two interleave modes are handled + * separately. + */ + u8 mod_value; + + /* + * Represents the bits that can be directly pulled from the normalized + * address. In each case, pass through bits [7:0] of the normalized + * address. The other bits depend on the interleave bit position which + * will be bit 10 for 1K interleave stripe cases and bit 11 for 2K + * interleave stripe cases. + */ + u64 base_denorm_addr; + + /* + * Represents the high bits of the physical address that have been + * divided by the mod_value. + */ + u64 div_addr; + + u64 current_spa; + u64 resolved_spa; + + u16 coh_st_fabric_id; +}; + struct df_flags { __u8 legacy_ficaa : 1, socket_id_shift_quirk : 1, @@ -132,6 +175,8 @@ struct df_config { /* Number of DRAM Address maps visible in a Coherent Station. */ u8 num_coh_st_maps; + u32 dram_hole_base; + /* Global flags to handle special cases. */ struct df_flags flags; }; @@ -234,6 +279,9 @@ int dehash_address(struct addr_ctx *ctx); unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr); unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err); +u64 add_base_and_hole(struct addr_ctx *ctx, u64 addr); +u64 remove_base_and_hole(struct addr_ctx *ctx, u64 addr); + /* * Make a gap in @data that is @num_bits long starting at @bit_num. * e.g. data = 11111111'b diff --git a/drivers/ras/amd/atl/map.c b/drivers/ras/amd/atl/map.c index 8b908e8d7495..24a05af747d5 100644 --- a/drivers/ras/amd/atl/map.c +++ b/drivers/ras/amd/atl/map.c @@ -642,6 +642,99 @@ static int get_global_map_data(struct addr_ctx *ctx) return 0; } +/* + * Verify the interleave bits are correct in the different interleaving + * settings. + * + * If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the + * respective interleaving is disabled. + */ +static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2, + u8 num_intlv_dies, u8 num_intlv_sockets) +{ + if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) { + pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos); + return false; + } + + if (ctx->map.num_intlv_dies > num_intlv_dies) { + pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies); + return false; + } + + if (ctx->map.num_intlv_sockets > num_intlv_sockets) { + pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets); + return false; + } + + return true; +} + +static int validate_address_map(struct addr_ctx *ctx) +{ + switch (ctx->map.intlv_mode) { + case DF2_2CHAN_HASH: + case DF3_COD4_2CHAN_HASH: + case DF3_COD2_4CHAN_HASH: + case DF3_COD1_8CHAN_HASH: + if (!map_bits_valid(ctx, 8, 9, 1, 1)) + goto err; + break; + + case DF4_NPS4_2CHAN_HASH: + case DF4_NPS2_4CHAN_HASH: + case DF4_NPS1_8CHAN_HASH: + case DF4p5_NPS4_2CHAN_1K_HASH: + case DF4p5_NPS4_2CHAN_2K_HASH: + case DF4p5_NPS2_4CHAN_1K_HASH: + case DF4p5_NPS2_4CHAN_2K_HASH: + case DF4p5_NPS1_8CHAN_1K_HASH: + case DF4p5_NPS1_8CHAN_2K_HASH: + case DF4p5_NPS1_16CHAN_1K_HASH: + case DF4p5_NPS1_16CHAN_2K_HASH: + if (!map_bits_valid(ctx, 8, 8, 1, 2)) + goto err; + break; + + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + case DF4p5_NPS1_12CHAN_1K_HASH: + case DF4p5_NPS1_12CHAN_2K_HASH: + if (ctx->map.num_intlv_sockets != 1 || !map_bits_valid(ctx, 8, 0, 1, 1)) + goto err; + break; + + case DF4p5_NPS0_24CHAN_1K_HASH: + case DF4p5_NPS0_24CHAN_2K_HASH: + if (ctx->map.num_intlv_sockets < 2 || !map_bits_valid(ctx, 8, 0, 1, 2)) + goto err; + break; + + case MI3_HASH_8CHAN: + case MI3_HASH_16CHAN: + case MI3_HASH_32CHAN: + if (!map_bits_valid(ctx, 8, 8, 4, 1)) + goto err; + break; + + /* Nothing to do for modes that don't need special validation checks. */ + default: + break; + } + + return 0; + +err: + atl_debug(ctx, "Inconsistent address map"); + return -EINVAL; +} + static void dump_address_map(struct dram_addr_map *map) { u8 i; @@ -678,5 +771,9 @@ int get_address_map(struct addr_ctx *ctx) dump_address_map(&ctx->map); + ret = validate_address_map(ctx); + if (ret) + return ret; + return ret; } diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c index 6979fa3d4fe2..e18d916d5e8b 100644 --- a/drivers/ras/amd/atl/system.c +++ b/drivers/ras/amd/atl/system.c @@ -223,6 +223,21 @@ static int determine_df_rev(void) return -EINVAL; } +static int get_dram_hole_base(void) +{ + u8 func = 0; + + if (df_cfg.rev >= DF4) + func = 7; + + if (df_indirect_read_broadcast(0, func, 0x104, &df_cfg.dram_hole_base)) + return -EINVAL; + + df_cfg.dram_hole_base &= DF_DRAM_HOLE_BASE_MASK; + + return 0; +} + static void get_num_maps(void) { switch (df_cfg.rev) { @@ -266,6 +281,7 @@ static void dump_df_cfg(void) pr_debug("num_coh_st_maps=%u", df_cfg.num_coh_st_maps); + pr_debug("dram_hole_base=0x%x", df_cfg.dram_hole_base); pr_debug("flags.legacy_ficaa=%u", df_cfg.flags.legacy_ficaa); pr_debug("flags.socket_id_shift_quirk=%u", df_cfg.flags.socket_id_shift_quirk); } @@ -273,7 +289,7 @@ static void dump_df_cfg(void) int get_df_system_info(void) { if (determine_df_rev()) { - pr_warn("amd_atl: Failed to determine DF Revision"); + pr_warn("Failed to determine DF Revision"); df_cfg.rev = UNKNOWN; return -EINVAL; } @@ -282,6 +298,9 @@ int get_df_system_info(void) get_num_maps(); + if (get_dram_hole_base()) + pr_warn("Failed to read DRAM hole base"); + dump_df_cfg(); return 0; diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c index 271dfad05d68..90de737fbc90 100644 --- a/drivers/ras/amd/fmpm.c +++ b/drivers/ras/amd/fmpm.c @@ -56,6 +56,8 @@ #include "../debugfs.h" +#include "atl/internal.h" + #define INVALID_CPU UINT_MAX /* Validation Bits */ @@ -116,8 +118,6 @@ static struct fru_rec **fru_records; /* system physical addresses array */ static u64 *spa_entries; -#define INVALID_SPA ~0ULL - static struct dentry *fmpm_dfs_dir; static struct dentry *fmpm_dfs_entries; diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index d333be2bea3b..0281a9a6f4ce 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -1634,6 +1634,15 @@ config REGULATOR_UNIPHIER help Support for regulators implemented on Socionext UniPhier SoCs. +config REGULATOR_RZG2L_VBCTRL + tristate "Renesas RZ/G2L USB VBUS regulator driver" + depends on ARCH_RZG2L || COMPILE_TEST + depends on OF + select REGMAP_MMIO + default ARCH_RZG2L + help + Support for VBUS regulators implemented on Renesas RZ/G2L SoCs. + config REGULATOR_VCTRL tristate "Voltage controlled regulators" depends on OF diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index ba15fa5f30ad..6127ffb4b011 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -189,6 +189,7 @@ obj-$(CONFIG_REGULATOR_TPS65132) += tps65132-regulator.o obj-$(CONFIG_REGULATOR_TPS68470) += tps68470-regulator.o obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o +obj-$(CONFIG_REGULATOR_RZG2L_VBCTRL) += renesas-usb-vbus-regulator.o obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 844e9587a880..7674b7f2df14 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3409,6 +3409,34 @@ int regulator_list_hardware_vsel(struct regulator *regulator, EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel); /** + * regulator_hardware_enable - access the HW for enable/disable regulator + * @regulator: regulator source + * @enable: true for enable, false for disable + * + * Request that the regulator be enabled/disabled with the regulator output at + * the predefined voltage or current value. + * + * On success 0 is returned, otherwise a negative errno is returned. + */ +int regulator_hardware_enable(struct regulator *regulator, bool enable) +{ + struct regulator_dev *rdev = regulator->rdev; + const struct regulator_ops *ops = rdev->desc->ops; + int ret = -EOPNOTSUPP; + + if (!rdev->exclusive || !ops || !ops->enable || !ops->disable) + return ret; + + if (enable) + ret = ops->enable(rdev); + else + ret = ops->disable(rdev); + + return ret; +} +EXPORT_SYMBOL_GPL(regulator_hardware_enable); + +/** * regulator_get_linear_step - return the voltage step size between VSEL values * @regulator: regulator source * diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c index 96257551bb12..d97162f73793 100644 --- a/drivers/regulator/da9121-regulator.c +++ b/drivers/regulator/da9121-regulator.c @@ -865,7 +865,7 @@ static const struct regmap_access_table da9121_volatile_table = { }; /* DA9121 regmap config for 1 channel variants */ -static struct regmap_config da9121_1ch_regmap_config = { +static const struct regmap_config da9121_1ch_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = DA9121_REG_OTP_CONFIG_ID, @@ -876,7 +876,7 @@ static struct regmap_config da9121_1ch_regmap_config = { }; /* DA9121 regmap config for 2 channel variants */ -static struct regmap_config da9121_2ch_regmap_config = { +static const struct regmap_config da9121_2ch_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = DA9121_REG_OTP_CONFIG_ID, @@ -993,7 +993,7 @@ error: static int da9121_assign_chip_model(struct i2c_client *i2c, struct da9121 *chip) { - struct regmap_config *regmap; + const struct regmap_config *regmap; int ret = 0; chip->dev = &i2c->dev; @@ -1192,4 +1192,5 @@ static struct i2c_driver da9121_regulator_driver = { module_i2c_driver(da9121_regulator_driver); +MODULE_DESCRIPTION("Dialog Semiconductor DA9121/DA9122/DA9220/DA9217/DA9130/DA9131/DA9132 regulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index 02b85ca4a6fc..39ade0dba40f 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -202,8 +202,8 @@ static int da9210_i2c_probe(struct i2c_client *i2c) } static const struct i2c_device_id da9210_i2c_id[] = { - {"da9210", 0}, - {}, + { "da9210" }, + {} }; MODULE_DEVICE_TABLE(i2c, da9210_i2c_id); diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index e1b5c45f97f4..d4dab86fe385 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -439,7 +439,7 @@ static int lp3971_i2c_probe(struct i2c_client *i2c) } static const struct i2c_device_id lp3971_i2c_id[] = { - { "lp3971", 0 }, + { "lp3971" }, { } }; MODULE_DEVICE_TABLE(i2c, lp3971_i2c_id); diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c index 7bd6f05edd8d..1b918fb72134 100644 --- a/drivers/regulator/lp3972.c +++ b/drivers/regulator/lp3972.c @@ -537,7 +537,7 @@ static int lp3972_i2c_probe(struct i2c_client *i2c) } static const struct i2c_device_id lp3972_i2c_id[] = { - { "lp3972", 0 }, + { "lp3972" }, { } }; MODULE_DEVICE_TABLE(i2c, lp3972_i2c_id); diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c index 8d01e18046f3..5509bee49bda 100644 --- a/drivers/regulator/lp8755.c +++ b/drivers/regulator/lp8755.c @@ -430,7 +430,7 @@ static void lp8755_remove(struct i2c_client *client) } static const struct i2c_device_id lp8755_id[] = { - {LP8755_NAME, 0}, + { LP8755_NAME }, {} }; diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index 0f133129252e..4242fbb7b147 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c @@ -276,7 +276,7 @@ static int max1586_pmic_probe(struct i2c_client *client) } static const struct i2c_device_id max1586_id[] = { - { "max1586", 0 }, + { "max1586" }, { } }; MODULE_DEVICE_TABLE(i2c, max1586_id); diff --git a/drivers/regulator/max20411-regulator.c b/drivers/regulator/max20411-regulator.c index 8c09dc71b16d..02d7009ea0e6 100644 --- a/drivers/regulator/max20411-regulator.c +++ b/drivers/regulator/max20411-regulator.c @@ -145,8 +145,8 @@ static const struct of_device_id of_max20411_match_tbl[] = { MODULE_DEVICE_TABLE(of, of_max20411_match_tbl); static const struct i2c_device_id max20411_id[] = { - { "max20411", 0 }, - { }, + { "max20411" }, + { } }; MODULE_DEVICE_TABLE(i2c, max20411_id); @@ -161,4 +161,5 @@ static struct i2c_driver max20411_i2c_driver = { }; module_i2c_driver(max20411_i2c_driver); +MODULE_DESCRIPTION("Maxim MAX20411 High-Efficiency Single Step-Down Converter driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/max77503-regulator.c b/drivers/regulator/max77503-regulator.c index 4a6ba4dd2acd..c7c94e868fc1 100644 --- a/drivers/regulator/max77503-regulator.c +++ b/drivers/regulator/max77503-regulator.c @@ -25,14 +25,6 @@ #define MAX77503_AD_ENABLED 0x1 #define MAX77503_AD_DISABLED 0x0 -struct max77503_dev { - struct device *dev; - struct device_node *of_node; - struct regulator_desc desc; - struct regulator_dev *rdev; - struct regmap *regmap; -}; - static const struct regmap_config max77503_regmap_config = { .reg_bits = 8, .val_bits = 8, diff --git a/drivers/regulator/max77857-regulator.c b/drivers/regulator/max77857-regulator.c index 145ad0281857..bc28dc8503a8 100644 --- a/drivers/regulator/max77857-regulator.c +++ b/drivers/regulator/max77857-regulator.c @@ -67,7 +67,7 @@ static bool max77857_volatile_reg(struct device *dev, unsigned int reg) } } -static struct regmap_config max77857_regmap_config = { +static const struct regmap_config max77857_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_MAPLE, diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 24e1dfba78c8..f57c588bcf28 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c @@ -240,7 +240,7 @@ static int max8649_regulator_probe(struct i2c_client *client) } static const struct i2c_device_id max8649_id[] = { - { "max8649", 0 }, + { "max8649" }, { } }; MODULE_DEVICE_TABLE(i2c, max8649_id); diff --git a/drivers/regulator/max8893.c b/drivers/regulator/max8893.c index 30592425e193..5a90633d8536 100644 --- a/drivers/regulator/max8893.c +++ b/drivers/regulator/max8893.c @@ -162,8 +162,8 @@ MODULE_DEVICE_TABLE(of, max8893_dt_match); #endif static const struct i2c_device_id max8893_ids[] = { - { "max8893", 0 }, - { }, + { "max8893" }, + { } }; MODULE_DEVICE_TABLE(i2c, max8893_ids); diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c index 0b0b841d214a..1f94315bfb02 100644 --- a/drivers/regulator/max8952.c +++ b/drivers/regulator/max8952.c @@ -307,8 +307,8 @@ static int max8952_pmic_probe(struct i2c_client *client) } static const struct i2c_device_id max8952_ids[] = { - { "max8952", 0 }, - { }, + { "max8952" }, + { } }; MODULE_DEVICE_TABLE(i2c, max8952_ids); diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c index 0c15a19fe83a..5de9d4fa5113 100644 --- a/drivers/regulator/mcp16502.c +++ b/drivers/regulator/mcp16502.c @@ -577,7 +577,7 @@ static const struct dev_pm_ops mcp16502_pm_ops = { }; #endif static const struct i2c_device_id mcp16502_i2c_id[] = { - { "mcp16502", 0 }, + { "mcp16502" }, { } }; MODULE_DEVICE_TABLE(i2c, mcp16502_i2c_id); diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c index c00638cd2d1e..2ebc1c0b5e6f 100644 --- a/drivers/regulator/mt6311-regulator.c +++ b/drivers/regulator/mt6311-regulator.c @@ -133,8 +133,8 @@ static int mt6311_i2c_probe(struct i2c_client *i2c) } static const struct i2c_device_id mt6311_i2c_id[] = { - {"mt6311", 0}, - {}, + { "mt6311" }, + {} }; MODULE_DEVICE_TABLE(i2c, mt6311_i2c_id); diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c index f1280d45265d..9bf4163221f1 100644 --- a/drivers/regulator/mtk-dvfsrc-regulator.c +++ b/drivers/regulator/mtk-dvfsrc-regulator.c @@ -1,99 +1,94 @@ // SPDX-License-Identifier: GPL-2.0 -// -// Copyright (c) 2020 MediaTek Inc. +/* + * Copyright (C) 2020 MediaTek Inc. + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ -#include <linux/err.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/of_.h> +#include <linux/of.h> #include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> -#include <linux/soc/mediatek/mtk_dvfsrc.h> - -#define DVFSRC_ID_VCORE 0 -#define DVFSRC_ID_VSCP 1 - -#define MT_DVFSRC_REGULAR(match, _name, _volt_table) \ -[DVFSRC_ID_##_name] = { \ - .desc = { \ - .name = match, \ - .of_match = of_match_ptr(match), \ - .ops = &dvfsrc_vcore_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = DVFSRC_ID_##_name, \ - .owner = THIS_MODULE, \ - .n_voltages = ARRAY_SIZE(_volt_table), \ - .volt_table = _volt_table, \ - }, \ -} - -/* - * DVFSRC regulators' information - * - * @desc: standard fields of regulator description. - * @voltage_selector: Selector used for get_voltage_sel() and - * set_voltage_sel() callbacks - */ +#include <linux/soc/mediatek/dvfsrc.h> -struct dvfsrc_regulator { - struct regulator_desc desc; +enum dvfsrc_regulator_id { + DVFSRC_ID_VCORE, + DVFSRC_ID_VSCP, + DVFSRC_ID_MAX }; -/* - * MTK DVFSRC regulators' init data - * - * @size: num of regulators - * @regulator_info: regulator info. - */ -struct dvfsrc_regulator_init_data { +struct dvfsrc_regulator_pdata { + struct regulator_desc *descs; u32 size; - struct dvfsrc_regulator *regulator_info; }; -static inline struct device *to_dvfsrc_dev(struct regulator_dev *rdev) +#define MTK_DVFSRC_VREG(match, _name, _volt_table) \ +{ \ + .name = match, \ + .of_match = match, \ + .ops = &dvfsrc_vcore_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = DVFSRC_ID_##_name, \ + .owner = THIS_MODULE, \ + .n_voltages = ARRAY_SIZE(_volt_table), \ + .volt_table = _volt_table, \ +} + +static inline struct device *to_dvfs_regulator_dev(struct regulator_dev *rdev) { return rdev_get_dev(rdev)->parent; } +static inline struct device *to_dvfsrc_dev(struct regulator_dev *rdev) +{ + return to_dvfs_regulator_dev(rdev)->parent; +} + +static int dvfsrc_get_cmd(int rdev_id, enum mtk_dvfsrc_cmd *cmd) +{ + switch (rdev_id) { + case DVFSRC_ID_VCORE: + *cmd = MTK_DVFSRC_CMD_VCORE_LEVEL; + break; + case DVFSRC_ID_VSCP: + *cmd = MTK_DVFSRC_CMD_VSCP_LEVEL; + break; + default: + return -EINVAL; + } + + return 0; +} + static int dvfsrc_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector) { struct device *dvfsrc_dev = to_dvfsrc_dev(rdev); + enum mtk_dvfsrc_cmd req_cmd; int id = rdev_get_id(rdev); + int ret; - if (id == DVFSRC_ID_VCORE) - mtk_dvfsrc_send_request(dvfsrc_dev, - MTK_DVFSRC_CMD_VCORE_REQUEST, - selector); - else if (id == DVFSRC_ID_VSCP) - mtk_dvfsrc_send_request(dvfsrc_dev, - MTK_DVFSRC_CMD_VSCP_REQUEST, - selector); - else - return -EINVAL; + ret = dvfsrc_get_cmd(id, &req_cmd); + if (ret) + return ret; - return 0; + return mtk_dvfsrc_send_request(dvfsrc_dev, req_cmd, selector); } static int dvfsrc_get_voltage_sel(struct regulator_dev *rdev) { struct device *dvfsrc_dev = to_dvfsrc_dev(rdev); + enum mtk_dvfsrc_cmd query_cmd; int id = rdev_get_id(rdev); int val, ret; - if (id == DVFSRC_ID_VCORE) - ret = mtk_dvfsrc_query_info(dvfsrc_dev, - MTK_DVFSRC_CMD_VCORE_LEVEL_QUERY, - &val); - else if (id == DVFSRC_ID_VSCP) - ret = mtk_dvfsrc_query_info(dvfsrc_dev, - MTK_DVFSRC_CMD_VSCP_LEVEL_QUERY, - &val); - else - return -EINVAL; + ret = dvfsrc_get_cmd(id, &query_cmd); + if (ret) + return ret; - if (ret != 0) + ret = mtk_dvfsrc_query_info(dvfsrc_dev, query_cmd, &val); + if (ret) return ret; return val; @@ -105,110 +100,97 @@ static const struct regulator_ops dvfsrc_vcore_ops = { .set_voltage_sel = dvfsrc_set_voltage_sel, }; +static const unsigned int mt6873_voltages[] = { + 575000, + 600000, + 650000, + 725000, +}; + +static struct regulator_desc mt6873_regulators[] = { + MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt6873_voltages), + MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt6873_voltages), +}; + +static const struct dvfsrc_regulator_pdata mt6873_data = { + .descs = mt6873_regulators, + .size = ARRAY_SIZE(mt6873_regulators), +}; + static const unsigned int mt8183_voltages[] = { 725000, 800000, }; -static struct dvfsrc_regulator mt8183_regulators[] = { - MT_DVFSRC_REGULAR("dvfsrc-vcore", VCORE, - mt8183_voltages), +static struct regulator_desc mt8183_regulators[] = { + MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8183_voltages), }; -static const struct dvfsrc_regulator_init_data regulator_mt8183_data = { +static const struct dvfsrc_regulator_pdata mt8183_data = { + .descs = mt8183_regulators, .size = ARRAY_SIZE(mt8183_regulators), - .regulator_info = &mt8183_regulators[0], }; -static const unsigned int mt6873_voltages[] = { - 575000, +static const unsigned int mt8195_voltages[] = { + 550000, 600000, 650000, - 725000, + 750000, }; -static struct dvfsrc_regulator mt6873_regulators[] = { - MT_DVFSRC_REGULAR("dvfsrc-vcore", VCORE, - mt6873_voltages), - MT_DVFSRC_REGULAR("dvfsrc-vscp", VSCP, - mt6873_voltages), -}; - -static const struct dvfsrc_regulator_init_data regulator_mt6873_data = { - .size = ARRAY_SIZE(mt6873_regulators), - .regulator_info = &mt6873_regulators[0], +static struct regulator_desc mt8195_regulators[] = { + MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8195_voltages), + MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt8195_voltages), }; -static const struct of_device_id mtk_dvfsrc_regulator_match[] = { - { - .compatible = "mediatek,mt8183-dvfsrc", - .data = ®ulator_mt8183_data, - }, { - .compatible = "mediatek,mt8192-dvfsrc", - .data = ®ulator_mt6873_data, - }, { - .compatible = "mediatek,mt6873-dvfsrc", - .data = ®ulator_mt6873_data, - }, { - /* sentinel */ - }, +static const struct dvfsrc_regulator_pdata mt8195_data = { + .descs = mt8195_regulators, + .size = ARRAY_SIZE(mt8195_regulators), }; -MODULE_DEVICE_TABLE(of, mtk_dvfsrc_regulator_match); static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev) { - const struct of_device_id *match; - struct device *dev = &pdev->dev; - struct regulator_config config = { }; - struct regulator_dev *rdev; - const struct dvfsrc_regulator_init_data *regulator_init_data; - struct dvfsrc_regulator *mt_regulators; + struct regulator_config config = { .dev = &pdev->dev }; + const struct dvfsrc_regulator_pdata *pdata; int i; - match = of_match_node(mtk_dvfsrc_regulator_match, dev->parent->of_node); + pdata = device_get_match_data(&pdev->dev); + if (!pdata) + return -EINVAL; - if (!match) { - dev_err(dev, "invalid compatible string\n"); - return -ENODEV; - } + for (i = 0; i < pdata->size; i++) { + struct regulator_desc *vrdesc = &pdata->descs[i]; + struct regulator_dev *rdev; - regulator_init_data = match->data; - - mt_regulators = regulator_init_data->regulator_info; - for (i = 0; i < regulator_init_data->size; i++) { - config.dev = dev->parent; - config.driver_data = (mt_regulators + i); - rdev = devm_regulator_register(dev, &(mt_regulators + i)->desc, - &config); - if (IS_ERR(rdev)) { - dev_err(dev, "failed to register %s\n", - (mt_regulators + i)->desc.name); - return PTR_ERR(rdev); - } + rdev = devm_regulator_register(&pdev->dev, vrdesc, &config); + if (IS_ERR(rdev)) + return dev_err_probe(&pdev->dev, PTR_ERR(rdev), + "failed to register %s\n", vrdesc->name); } return 0; } +static const struct of_device_id mtk_dvfsrc_regulator_match[] = { + { .compatible = "mediatek,mt6873-dvfsrc-regulator", .data = &mt6873_data }, + { .compatible = "mediatek,mt8183-dvfsrc-regulator", .data = &mt8183_data }, + { .compatible = "mediatek,mt8192-dvfsrc-regulator", .data = &mt6873_data }, + { .compatible = "mediatek,mt8195-dvfsrc-regulator", .data = &mt8195_data }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_dvfsrc_regulator_match); + static struct platform_driver mtk_dvfsrc_regulator_driver = { .driver = { .name = "mtk-dvfsrc-regulator", + .of_match_table = mtk_dvfsrc_regulator_match, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = dvfsrc_vcore_regulator_probe, }; +module_platform_driver(mtk_dvfsrc_regulator_driver); -static int __init mtk_dvfsrc_regulator_init(void) -{ - return platform_driver_register(&mtk_dvfsrc_regulator_driver); -} -subsys_initcall(mtk_dvfsrc_regulator_init); - -static void __exit mtk_dvfsrc_regulator_exit(void) -{ - platform_driver_unregister(&mtk_dvfsrc_regulator_driver); -} -module_exit(mtk_dvfsrc_regulator_exit); - +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); MODULE_AUTHOR("Arvin wang <arvin.wang@mediatek.com>"); -MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MediaTek DVFS Resource Collector Regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index be488c5dff14..9714afe347dc 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -891,11 +891,6 @@ static int pca9450_i2c_probe(struct i2c_client *i2c) unsigned int reset_ctrl; int ret; - if (!i2c->irq) { - dev_err(&i2c->dev, "No IRQ configured?\n"); - return -EINVAL; - } - pca9450 = devm_kzalloc(&i2c->dev, sizeof(struct pca9450), GFP_KERNEL); if (!pca9450) return -ENOMEM; @@ -967,23 +962,25 @@ static int pca9450_i2c_probe(struct i2c_client *i2c) } } - ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL, - pca9450_irq_handler, - (IRQF_TRIGGER_FALLING | IRQF_ONESHOT), - "pca9450-irq", pca9450); - if (ret != 0) { - dev_err(pca9450->dev, "Failed to request IRQ: %d\n", - pca9450->irq); - return ret; - } - /* Unmask all interrupt except PWRON/WDOG/RSVD */ - ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK, - IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS | - IRQ_THERM_105 | IRQ_THERM_125, - IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD); - if (ret) { - dev_err(&i2c->dev, "Unmask irq error\n"); - return ret; + if (pca9450->irq) { + ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL, + pca9450_irq_handler, + (IRQF_TRIGGER_FALLING | IRQF_ONESHOT), + "pca9450-irq", pca9450); + if (ret != 0) { + dev_err(pca9450->dev, "Failed to request IRQ: %d\n", + pca9450->irq); + return ret; + } + /* Unmask all interrupt except PWRON/WDOG/RSVD */ + ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK, + IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS | + IRQ_THERM_105 | IRQ_THERM_125, + IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD); + if (ret) { + dev_err(&i2c->dev, "Unmask irq error\n"); + return ret; + } } /* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */ diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c index 9fd8e0949b32..ea3611de42b4 100644 --- a/drivers/regulator/pf8x00-regulator.c +++ b/drivers/regulator/pf8x00-regulator.c @@ -596,10 +596,10 @@ static const struct of_device_id pf8x00_dt_ids[] = { MODULE_DEVICE_TABLE(of, pf8x00_dt_ids); static const struct i2c_device_id pf8x00_i2c_id[] = { - { "pf8100", 0 }, - { "pf8121a", 0 }, - { "pf8200", 0 }, - {}, + { "pf8100" }, + { "pf8121a" }, + { "pf8200" }, + {} }; MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id); diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c index aa90360fa046..ae1c4b9daaa1 100644 --- a/drivers/regulator/pv88060-regulator.c +++ b/drivers/regulator/pv88060-regulator.c @@ -360,8 +360,8 @@ static int pv88060_i2c_probe(struct i2c_client *i2c) } static const struct i2c_device_id pv88060_i2c_id[] = { - {"pv88060", 0}, - {}, + { "pv88060" }, + {} }; MODULE_DEVICE_TABLE(i2c, pv88060_i2c_id); diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c index f4acde4d56c8..3c48757bbbda 100644 --- a/drivers/regulator/pv88090-regulator.c +++ b/drivers/regulator/pv88090-regulator.c @@ -381,8 +381,8 @@ static int pv88090_i2c_probe(struct i2c_client *i2c) } static const struct i2c_device_id pv88090_i2c_id[] = { - {"pv88090", 0}, - {}, + { "pv88090" }, + {} }; MODULE_DEVICE_TABLE(i2c, pv88090_i2c_id); diff --git a/drivers/regulator/renesas-usb-vbus-regulator.c b/drivers/regulator/renesas-usb-vbus-regulator.c new file mode 100644 index 000000000000..4eceb6b54497 --- /dev/null +++ b/drivers/regulator/renesas-usb-vbus-regulator.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Renesas USB VBUS output regulator driver +// +// Copyright (C) 2024 Renesas Electronics Corporation +// + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> + +static const struct regulator_ops rzg2l_usb_vbus_reg_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static const struct regulator_desc rzg2l_usb_vbus_rdesc = { + .name = "vbus", + .of_match = of_match_ptr("regulator-vbus"), + .ops = &rzg2l_usb_vbus_reg_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .enable_reg = 0, + .enable_mask = BIT(0), + .enable_is_inverted = true, + .fixed_uV = 5000000, + .n_voltages = 1, +}; + +static int rzg2l_usb_vbus_regulator_probe(struct platform_device *pdev) +{ + struct regulator_config config = { }; + struct device *dev = &pdev->dev; + struct regulator_dev *rdev; + + config.regmap = dev_get_regmap(dev->parent, NULL); + if (!config.regmap) + return dev_err_probe(dev, -ENOENT, "Failed to get regmap\n"); + + config.dev = dev; + config.of_node = of_get_child_by_name(dev->parent->of_node, "regulator-vbus"); + if (!config.of_node) + return dev_err_probe(dev, -ENODEV, "regulator node not found\n"); + + rdev = devm_regulator_register(dev, &rzg2l_usb_vbus_rdesc, &config); + if (IS_ERR(rdev)) { + of_node_put(config.of_node); + return dev_err_probe(dev, PTR_ERR(rdev), + "not able to register vbus regulator\n"); + } + + of_node_put(config.of_node); + + return 0; +} + +static struct platform_driver rzg2l_usb_vbus_regulator_driver = { + .probe = rzg2l_usb_vbus_regulator_probe, + .driver = { + .name = "rzg2l-usb-vbus-regulator", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(rzg2l_usb_vbus_regulator_driver); + +MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/G2L USB Vbus Regulator Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/rt4831-regulator.c b/drivers/regulator/rt4831-regulator.c index 97e6f7e2a0ba..dfc868a24056 100644 --- a/drivers/regulator/rt4831-regulator.c +++ b/drivers/regulator/rt4831-regulator.c @@ -202,4 +202,5 @@ static struct platform_driver rt4831_regulator_driver = { module_platform_driver(rt4831_regulator_driver); MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>"); +MODULE_DESCRIPTION("Richtek RT4831 DSV Regulators driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c index c31b6dc3229c..a5c126afc648 100644 --- a/drivers/regulator/rtq2208-regulator.c +++ b/drivers/regulator/rtq2208-regulator.c @@ -219,7 +219,7 @@ static const struct regulator_ops rtq2208_regulator_buck_ops = { .set_suspend_mode = rtq2208_set_suspend_mode, }; -static const struct regulator_ops rtq2208_regulator_ldo_ops = { +static const struct regulator_ops rtq2208_regulator_ldo_fix_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -228,6 +228,23 @@ static const struct regulator_ops rtq2208_regulator_ldo_ops = { .set_suspend_disable = rtq2208_set_suspend_disable, }; +static const struct regulator_ops rtq2208_regulator_ldo_adj_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_suspend_enable = rtq2208_set_suspend_enable, + .set_suspend_disable = rtq2208_set_suspend_disable, +}; + +static const unsigned int rtq2208_ldo_volt_table[] = { + 1800000, + 3300000, +}; + static struct of_regulator_match rtq2208_ldo_match[] = { {.name = "ldo2", }, {.name = "ldo1", }, @@ -331,8 +348,9 @@ static int rtq2208_of_get_ldo_dvs_ability(struct device *dev) { struct device_node *np; struct of_regulator_match *match; - struct rtq2208_regulator_desc *rdesc; + struct regulator_desc *desc; struct regulator_init_data *init_data; + u32 fixed_uV; int ret, i; if (!dev->of_node) @@ -352,13 +370,27 @@ static int rtq2208_of_get_ldo_dvs_ability(struct device *dev) for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) { match = rtq2208_ldo_match + i; init_data = match->init_data; - rdesc = (struct rtq2208_regulator_desc *)match->driver_data; + desc = (struct regulator_desc *)match->desc; - if (!init_data || !rdesc) + if (!init_data || !desc) continue; - if (init_data->constraints.min_uV == init_data->constraints.max_uV) - rdesc->desc.fixed_uV = init_data->constraints.min_uV; + /* specify working fixed voltage if the propery exists */ + ret = of_property_read_u32(match->of_node, "richtek,fixed-microvolt", &fixed_uV); + + if (!ret) { + if (fixed_uV != init_data->constraints.min_uV || + fixed_uV != init_data->constraints.max_uV) + return -EINVAL; + desc->n_voltages = 1; + desc->fixed_uV = fixed_uV; + desc->fixed_uV = init_data->constraints.min_uV; + desc->ops = &rtq2208_regulator_ldo_fix_ops; + } else { + desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table); + desc->volt_table = rtq2208_ldo_volt_table; + desc->ops = &rtq2208_regulator_ldo_adj_ops; + } } return 0; diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index 59aa16825d8a..3bbd4a29e6d3 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -497,8 +497,8 @@ static int slg51000_i2c_probe(struct i2c_client *client) } static const struct i2c_device_id slg51000_i2c_id[] = { - {"slg51000", 0}, - {}, + { "slg51000" }, + {} }; MODULE_DEVICE_TABLE(i2c, slg51000_i2c_id); diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c index 85b0102fb9b1..b7aeef6e09e7 100644 --- a/drivers/regulator/stm32-pwr.c +++ b/drivers/regulator/stm32-pwr.c @@ -166,6 +166,7 @@ static int stm32_pwr_regulator_probe(struct platform_device *pdev) static const struct of_device_id __maybe_unused stm32_pwr_of_match[] = { { .compatible = "st,stm32mp1,pwr-reg", }, + { .compatible = "st,stm32mp13-pwr-reg", }, {}, }; MODULE_DEVICE_TABLE(of, stm32_pwr_of_match); diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c index 1bcfdd6dcfc1..d79a4cc25a0d 100644 --- a/drivers/regulator/sy8106a-regulator.c +++ b/drivers/regulator/sy8106a-regulator.c @@ -130,8 +130,8 @@ static const struct of_device_id sy8106a_i2c_of_match[] = { MODULE_DEVICE_TABLE(of, sy8106a_i2c_of_match); static const struct i2c_device_id sy8106a_i2c_id[] = { - { "sy8106a", 0 }, - { }, + { "sy8106a" }, + { } }; MODULE_DEVICE_TABLE(i2c, sy8106a_i2c_id); diff --git a/drivers/regulator/tps6286x-regulator.c b/drivers/regulator/tps6286x-regulator.c index 758c70269653..75f441f36de7 100644 --- a/drivers/regulator/tps6286x-regulator.c +++ b/drivers/regulator/tps6286x-regulator.c @@ -136,11 +136,11 @@ static int tps6286x_i2c_probe(struct i2c_client *i2c) } static const struct i2c_device_id tps6286x_i2c_id[] = { - { "tps62864", 0 }, - { "tps62866", 0 }, - { "tps62868", 0 }, - { "tps62869", 0 }, - {}, + { "tps62864" }, + { "tps62866" }, + { "tps62868" }, + { "tps62869" }, + {} }; MODULE_DEVICE_TABLE(i2c, tps6286x_i2c_id); @@ -156,4 +156,5 @@ static struct i2c_driver tps6286x_regulator_driver = { module_i2c_driver(tps6286x_regulator_driver); +MODULE_DESCRIPTION("TI TPS6286x Power Regulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c index 3c9d79e003e4..7a0551f0c8c0 100644 --- a/drivers/regulator/tps6287x-regulator.c +++ b/drivers/regulator/tps6287x-regulator.c @@ -165,11 +165,11 @@ static const struct of_device_id tps6287x_dt_ids[] = { MODULE_DEVICE_TABLE(of, tps6287x_dt_ids); static const struct i2c_device_id tps6287x_i2c_id[] = { - { "tps62870", 0 }, - { "tps62871", 0 }, - { "tps62872", 0 }, - { "tps62873", 0 }, - {}, + { "tps62870" }, + { "tps62871" }, + { "tps62872" }, + { "tps62873" }, + {} }; MODULE_DEVICE_TABLE(i2c, tps6287x_i2c_id); diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c index 86a626a4f610..6153d0295b6d 100644 --- a/drivers/regulator/userspace-consumer.c +++ b/drivers/regulator/userspace-consumer.c @@ -158,10 +158,8 @@ static int regulator_userspace_consumer_probe(struct platform_device *pdev) ret = devm_regulator_bulk_get_exclusive(&pdev->dev, drvdata->num_supplies, drvdata->supplies); - if (ret) { - dev_err(&pdev->dev, "Failed to get supplies: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to get supplies\n"); platform_set_drvdata(pdev, drvdata); diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 7112f5932609..67bce340a87e 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -68,6 +68,7 @@ config RESET_BRCMSTB_RESCAL config RESET_GPIO tristate "GPIO reset controller" + depends on GPIOLIB help This enables a generic reset controller for resets attached via GPIOs. Typically for OF platforms this driver expects "reset-gpios" @@ -91,6 +92,14 @@ config RESET_IMX7 help This enables the reset controller driver for i.MX7 SoCs. +config RESET_IMX8MP_AUDIOMIX + tristate "i.MX8MP AudioMix Reset Driver" + depends on ARCH_MXC || COMPILE_TEST + select AUXILIARY_BUS + default CLK_IMX8MP + help + This enables the reset controller driver for i.MX8MP AudioMix + config RESET_INTEL_GW bool "Intel Reset Controller Driver" depends on X86 || COMPILE_TEST @@ -328,6 +337,12 @@ config RESET_ZYNQ help This enables the reset controller driver for Xilinx Zynq SoCs. +config RESET_ZYNQMP + bool "ZYNQMP Reset Driver" if COMPILE_TEST + default ARCH_ZYNQMP + help + This enables the reset controller driver for Xilinx ZynqMP SoCs. + source "drivers/reset/starfive/Kconfig" source "drivers/reset/sti/Kconfig" source "drivers/reset/hisilicon/Kconfig" diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index fd8b49fa46fc..27b0bbdfcc04 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -2,8 +2,8 @@ obj-y += core.o obj-y += hisilicon/ obj-y += starfive/ -obj-$(CONFIG_ARCH_STI) += sti/ -obj-$(CONFIG_ARCH_TEGRA) += tegra/ +obj-y += sti/ +obj-y += tegra/ obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o obj-$(CONFIG_RESET_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o @@ -14,6 +14,7 @@ obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o obj-$(CONFIG_RESET_GPIO) += reset-gpio.o obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o obj-$(CONFIG_RESET_IMX7) += reset-imx7.o +obj-$(CONFIG_RESET_IMX8MP_AUDIOMIX) += reset-imx8mp-audiomix.o obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o obj-$(CONFIG_RESET_K210) += reset-k210.o obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o @@ -41,4 +42,4 @@ obj-$(CONFIG_RESET_TN48M_CPLD) += reset-tn48m.o obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o -obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o +obj-$(CONFIG_RESET_ZYNQMP) += reset-zynqmp.o diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c index 5c3267acd2b1..65aa5ff5ed82 100644 --- a/drivers/reset/hisilicon/hi6220_reset.c +++ b/drivers/reset/hisilicon/hi6220_reset.c @@ -219,4 +219,5 @@ static int __init hi6220_reset_init(void) postcore_initcall(hi6220_reset_init); +MODULE_DESCRIPTION("Hisilicon Hi6220 reset controller driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/reset/reset-imx8mp-audiomix.c b/drivers/reset/reset-imx8mp-audiomix.c new file mode 100644 index 000000000000..6e3f3069f727 --- /dev/null +++ b/drivers/reset/reset-imx8mp-audiomix.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2024 NXP + */ + +#include <linux/auxiliary_bus.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/reset-controller.h> + +#define EARC 0x200 +#define EARC_RESET_MASK 0x3 + +struct imx8mp_audiomix_reset { + struct reset_controller_dev rcdev; + spinlock_t lock; /* protect register read-modify-write cycle */ + void __iomem *base; +}; + +static struct imx8mp_audiomix_reset *to_imx8mp_audiomix_reset(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct imx8mp_audiomix_reset, rcdev); +} + +static int imx8mp_audiomix_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct imx8mp_audiomix_reset *priv = to_imx8mp_audiomix_reset(rcdev); + void __iomem *reg_addr = priv->base; + unsigned int mask, reg; + unsigned long flags; + + mask = BIT(id); + spin_lock_irqsave(&priv->lock, flags); + reg = readl(reg_addr + EARC); + writel(reg & ~mask, reg_addr + EARC); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int imx8mp_audiomix_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct imx8mp_audiomix_reset *priv = to_imx8mp_audiomix_reset(rcdev); + void __iomem *reg_addr = priv->base; + unsigned int mask, reg; + unsigned long flags; + + mask = BIT(id); + spin_lock_irqsave(&priv->lock, flags); + reg = readl(reg_addr + EARC); + writel(reg | mask, reg_addr + EARC); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static const struct reset_control_ops imx8mp_audiomix_reset_ops = { + .assert = imx8mp_audiomix_reset_assert, + .deassert = imx8mp_audiomix_reset_deassert, +}; + +static int imx8mp_audiomix_reset_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct imx8mp_audiomix_reset *priv; + struct device *dev = &adev->dev; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->lock); + + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.nr_resets = fls(EARC_RESET_MASK); + priv->rcdev.ops = &imx8mp_audiomix_reset_ops; + priv->rcdev.of_node = dev->parent->of_node; + priv->rcdev.dev = dev; + priv->rcdev.of_reset_n_cells = 1; + priv->base = of_iomap(dev->parent->of_node, 0); + if (!priv->base) + return -ENOMEM; + + dev_set_drvdata(dev, priv); + + ret = devm_reset_controller_register(dev, &priv->rcdev); + if (ret) + goto out_unmap; + + return 0; + +out_unmap: + iounmap(priv->base); + return ret; +} + +static void imx8mp_audiomix_reset_remove(struct auxiliary_device *adev) +{ + struct imx8mp_audiomix_reset *priv = dev_get_drvdata(&adev->dev); + + iounmap(priv->base); +} + +static const struct auxiliary_device_id imx8mp_audiomix_reset_ids[] = { + { + .name = "clk_imx8mp_audiomix.reset", + }, + { } +}; +MODULE_DEVICE_TABLE(auxiliary, imx8mp_audiomix_reset_ids); + +static struct auxiliary_driver imx8mp_audiomix_reset_driver = { + .probe = imx8mp_audiomix_reset_probe, + .remove = imx8mp_audiomix_reset_remove, + .id_table = imx8mp_audiomix_reset_ids, +}; + +module_auxiliary_driver(imx8mp_audiomix_reset_driver); + +MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>"); +MODULE_DESCRIPTION("Freescale i.MX8MP Audio Block Controller reset driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c index 7891d52fa899..894ad9d37a66 100644 --- a/drivers/reset/reset-meson-audio-arb.c +++ b/drivers/reset/reset-meson-audio-arb.c @@ -129,8 +129,6 @@ static int meson_audio_arb_remove(struct platform_device *pdev) writel(0, arb->regs); spin_unlock(&arb->lock); - clk_disable_unprepare(arb->clk); - return 0; } @@ -150,7 +148,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, arb); - arb->clk = devm_clk_get(dev, NULL); + arb->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(arb->clk)) return dev_err_probe(dev, PTR_ERR(arb->clk), "failed to get clock\n"); @@ -170,11 +168,6 @@ static int meson_audio_arb_probe(struct platform_device *pdev) * In the initial state, all memory interfaces are disabled * and the general bit is on */ - ret = clk_prepare_enable(arb->clk); - if (ret) { - dev_err(dev, "failed to enable arb clock\n"); - return ret; - } writel(BIT(ARB_GENERAL_BIT), arb->regs); /* Register reset controller */ diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c index a8dde4606360..255c894a4782 100644 --- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c +++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c @@ -10,10 +10,12 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/regmap.h> #include <linux/reset.h> #include <linux/reset-controller.h> #define RESET 0x000 +#define VBENCTL 0x03c #define RESET_SEL_PLLRESET BIT(12) #define RESET_PLLRESET BIT(8) @@ -32,6 +34,7 @@ struct rzg2l_usbphy_ctrl_priv { struct reset_controller_dev rcdev; struct reset_control *rstc; void __iomem *base; + struct platform_device *vdev; spinlock_t lock; }; @@ -100,10 +103,19 @@ static const struct reset_control_ops rzg2l_usbphy_ctrl_reset_ops = { .status = rzg2l_usbphy_ctrl_status, }; +static const struct regmap_config rzg2l_usb_regconf = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = 1, +}; + static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rzg2l_usbphy_ctrl_priv *priv; + struct platform_device *vdev; + struct regmap *regmap; unsigned long flags; int error; u32 val; @@ -116,6 +128,10 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + regmap = devm_regmap_init_mmio(dev, priv->base + VBENCTL, &rzg2l_usb_regconf); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(priv->rstc)) return dev_err_probe(dev, PTR_ERR(priv->rstc), @@ -125,25 +141,14 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) if (error) return error; - priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops; - priv->rcdev.of_reset_n_cells = 1; - priv->rcdev.nr_resets = NUM_PORTS; - priv->rcdev.of_node = dev->of_node; - priv->rcdev.dev = dev; - - error = devm_reset_controller_register(dev, &priv->rcdev); - if (error) - return error; - spin_lock_init(&priv->lock); dev_set_drvdata(dev, priv); pm_runtime_enable(&pdev->dev); error = pm_runtime_resume_and_get(&pdev->dev); if (error < 0) { - pm_runtime_disable(&pdev->dev); - reset_control_assert(priv->rstc); - return dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed"); + dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed"); + goto err_pm_disable_reset_deassert; } /* put pll and phy into reset state */ @@ -153,13 +158,45 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) writel(val, priv->base + RESET); spin_unlock_irqrestore(&priv->lock, flags); + priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops; + priv->rcdev.of_reset_n_cells = 1; + priv->rcdev.nr_resets = NUM_PORTS; + priv->rcdev.of_node = dev->of_node; + priv->rcdev.dev = dev; + + error = devm_reset_controller_register(dev, &priv->rcdev); + if (error) + goto err_pm_runtime_put; + + vdev = platform_device_alloc("rzg2l-usb-vbus-regulator", pdev->id); + if (!vdev) { + error = -ENOMEM; + goto err_pm_runtime_put; + } + vdev->dev.parent = dev; + priv->vdev = vdev; + + error = platform_device_add(vdev); + if (error) + goto err_device_put; + return 0; + +err_device_put: + platform_device_put(vdev); +err_pm_runtime_put: + pm_runtime_put(&pdev->dev); +err_pm_disable_reset_deassert: + pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); + return error; } static int rzg2l_usbphy_ctrl_remove(struct platform_device *pdev) { struct rzg2l_usbphy_ctrl_priv *priv = dev_get_drvdata(&pdev->dev); + platform_device_unregister(priv->vdev); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig index a2622e146b8b..0b599f7cf6ed 100644 --- a/drivers/reset/sti/Kconfig +++ b/drivers/reset/sti/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -if ARCH_STI +if ARCH_STI || COMPILE_TEST config STIH407_RESET - bool + bool "STIH407 Reset Driver" if COMPILE_TEST endif diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig index e4a9a389e98c..4a2d26d1210a 100644 --- a/drivers/reset/tegra/Kconfig +++ b/drivers/reset/tegra/Kconfig @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only config RESET_TEGRA_BPMP - def_bool TEGRA_BPMP + bool "Tegra BPMP Reset Driver" if COMPILE_TEST + default TEGRA_BPMP diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 2f16f543079b..a76c6af9ea63 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4906,7 +4906,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) ccw++; if (dst) { if (ccw->flags & CCW_FLAG_IDA) - cda = *((char **)dma32_to_virt(ccw->cda)); + cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda))); else cda = dma32_to_virt(ccw->cda); if (dst != cda) { @@ -5525,7 +5525,7 @@ dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from, /* get pointer to data (consider IDALs) */ if (from->flags & CCW_FLAG_IDA) - datap = (char *)*((addr_t *)dma32_to_virt(from->cda)); + datap = dma64_to_virt(*((dma64_t *)dma32_to_virt(from->cda))); else datap = dma32_to_virt(from->cda); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 361e9bd75257..9f2023a077c2 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -585,7 +585,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) ccw++; if (dst) { if (ccw->flags & CCW_FLAG_IDA) - cda = *((char **)dma32_to_virt(ccw->cda)); + cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda))); else cda = dma32_to_virt(ccw->cda); if (dst != cda) { diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 4533dd055ca8..1aa426b1dedd 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -68,7 +68,6 @@ int dasd_gendisk_alloc(struct dasd_block *block) blk_mq_free_tag_set(&block->tag_set); return PTR_ERR(gdp); } - blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue); /* Initialize gendisk structure. */ gdp->major = DASD_MAJOR; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 6d1689a2717e..d5a5d11ae0dc 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -548,6 +548,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char { struct queue_limits lim = { .logical_block_size = 4096, + .features = BLK_FEAT_DAX, }; int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; @@ -643,7 +644,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->fops = &dcssblk_devops; dev_info->gd->private_data = dev_info; dev_info->gd->flags |= GENHD_FL_NO_PART; - blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 1d456a5a3bfb..3fcfe029db1b 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -439,7 +439,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) .logical_block_size = 1 << 12, }; unsigned int devindex; - struct request_queue *rq; int len, ret; lim.max_segments = min(scmdev->nr_max_block, @@ -474,10 +473,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) ret = PTR_ERR(bdev->gendisk); goto out_tag; } - rq = bdev->rq = bdev->gendisk->queue; - blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); - bdev->gendisk->private_data = scmdev; bdev->gendisk->fops = &scm_blk_devops; bdev->gendisk->major = scm_major; diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 6e5c508b1e07..5f6e10225627 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -490,13 +490,14 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw, struct channel_program *cp) { struct ccwchain *iter; - u32 cda, ccw_head; + u32 offset, ccw_head; list_for_each_entry(iter, &cp->ccwchain_list, next) { ccw_head = iter->ch_iova; if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) { - cda = (u64)iter->ch_ccw + dma32_to_u32(ccw->cda) - ccw_head; - ccw->cda = u32_to_dma32(cda); + /* Calculate offset of TIC target */ + offset = dma32_to_u32(ccw->cda) - ccw_head; + ccw->cda = virt_to_dma32((void *)iter->ch_ccw + offset); return 0; } } @@ -914,7 +915,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) * in the ioctl directly. Path status changes etc. */ list_for_each_entry(chain, &cp->ccwchain_list, next) { - ccw_head = (u32)(u64)chain->ch_ccw; + ccw_head = dma32_to_u32(virt_to_dma32(chain->ch_ccw)); /* * On successful execution, cpa points just beyond the end * of the chain. diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 065db86d6021..37c24ffea65c 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -82,7 +82,6 @@ comment "SCSI support type (disk, tape, CD-ROM)" config BLK_DEV_SD tristate "SCSI disk support" depends on SCSI - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY help If you want to use SCSI hard disks, Fibre Channel disks, Serial ATA (SATA) or Parallel ATA (PATA) hard disks, diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 60688f18fac6..c708e1059638 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -1057,15 +1057,15 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) return 0; } -static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) +static int iscsi_sw_tcp_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); struct iscsi_session *session = tcp_sw_host->session; struct iscsi_conn *conn = session->leadconn; if (conn->datadgst_en) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, - sdev->request_queue); + lim->features |= BLK_FEAT_STABLE_WRITES; return 0; } @@ -1083,7 +1083,7 @@ static const struct scsi_host_template iscsi_sw_tcp_sht = { .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .dma_boundary = PAGE_SIZE - 1, - .slave_configure = iscsi_sw_tcp_slave_configure, + .device_configure = iscsi_sw_tcp_device_configure, .proc_name = "iscsi_tcp", .this_id = -1, .track_queue_depth = 1, diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 85948963fb97..03d6ec1eb970 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -145,6 +145,20 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i func, dev->parent ? "exp-attached" : "direct-attached", SAS_ADDR(dev->sas_addr), err); + + /* + * If the device probe failed, the expander phy attached address + * needs to be reset so that the phy will not be treated as flutter + * in the next revalidation + */ + if (dev->parent && !dev_is_expander(dev->dev_type)) { + struct sas_phy *phy = dev->phy; + struct domain_device *parent = dev->parent; + struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number]; + + memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + } + sas_unregister_dev(dev->port, dev); } diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 5297cacc8beb..0cef5d089f34 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1363,6 +1363,16 @@ lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, atomic_inc(&lpfc_nvmet->xmt_ls_abort); } +static int +lpfc_nvmet_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn) +{ + struct lpfc_nodelist *ndlp = hosthandle; + + *wwnn = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + *wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn); + return 0; +} + static void lpfc_nvmet_host_release(void *hosthandle) { @@ -1413,6 +1423,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { .ls_req = lpfc_nvmet_ls_req, .ls_abort = lpfc_nvmet_ls_abort, .host_release = lpfc_nvmet_host_release, + .host_traddr = lpfc_nvmet_host_traddr, .max_hw_queues = 1, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 88acefbf9aea..6c79c350a4d5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1981,8 +1981,6 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, lim->max_hw_sectors = max_io_size / 512; lim->virt_boundary_mask = mr_nvme_pg_size - 1; - - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); } /* diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 870ec2cb4af4..97c2472cd434 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2680,12 +2680,6 @@ scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim) pcie_device_put(pcie_device); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); mpt3sas_scsih_change_queue_depth(sdev, qdepth); - /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be - ** merged and can eliminate holes created during merging - ** operation. - **/ - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, - sdev->request_queue); lim->virt_boundary_mask = ioc->page_size - 1; return 0; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index acf0592d63da..a9d8a9c62663 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -69,6 +69,8 @@ static const char *sdebug_version_date = "20210520"; /* Additional Sense Code (ASC) */ #define NO_ADDITIONAL_SENSE 0x0 +#define OVERLAP_ATOMIC_COMMAND_ASC 0x0 +#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23 #define LOGICAL_UNIT_NOT_READY 0x4 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8 #define UNRECOVERED_READ_ERR 0x11 @@ -103,6 +105,7 @@ static const char *sdebug_version_date = "20210520"; #define READ_BOUNDARY_ASCQ 0x7 #define ATTEMPT_ACCESS_GAP 0x9 #define INSUFF_ZONE_ASCQ 0xe +/* see drivers/scsi/sense_codes.h */ /* Additional Sense Code Qualifier (ASCQ) */ #define ACK_NAK_TO 0x3 @@ -152,6 +155,12 @@ static const char *sdebug_version_date = "20210520"; #define DEF_VIRTUAL_GB 0 #define DEF_VPD_USE_HOSTNO 1 #define DEF_WRITESAME_LENGTH 0xFFFF +#define DEF_ATOMIC_WR 0 +#define DEF_ATOMIC_WR_MAX_LENGTH 8192 +#define DEF_ATOMIC_WR_ALIGN 2 +#define DEF_ATOMIC_WR_GRAN 2 +#define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH) +#define DEF_ATOMIC_WR_MAX_BNDRY 128 #define DEF_STRICT 0 #define DEF_STATISTICS false #define DEF_SUBMIT_QUEUES 1 @@ -374,7 +383,9 @@ struct sdebug_host_info { /* There is an xarray of pointers to this struct's objects, one per host */ struct sdeb_store_info { - rwlock_t macc_lck; /* for atomic media access on this store */ + rwlock_t macc_data_lck; /* for media data access on this store */ + rwlock_t macc_meta_lck; /* for atomic media meta access on this store */ + rwlock_t macc_sector_lck; /* per-sector media data access on this store */ u8 *storep; /* user data storage (ram) */ struct t10_pi_tuple *dif_storep; /* protection info */ void *map_storep; /* provisioning map */ @@ -398,12 +409,20 @@ struct sdebug_defer { enum sdeb_defer_type defer_t; }; +struct sdebug_device_access_info { + bool atomic_write; + u64 lba; + u32 num; + struct scsi_cmnd *self; +}; + struct sdebug_queued_cmd { /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue * instance indicates this slot is in use. */ struct sdebug_defer sd_dp; struct scsi_cmnd *scmd; + struct sdebug_device_access_info *i; }; struct sdebug_scsi_cmd { @@ -463,7 +482,8 @@ enum sdeb_opcode_index { SDEB_I_PRE_FETCH = 29, /* 10, 16 */ SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */ SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */ - SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */ + SDEB_I_ATOMIC_WRITE_16 = 32, + SDEB_I_LAST_ELEM_P1 = 33, /* keep this last (previous + 1) */ }; @@ -497,7 +517,8 @@ static const unsigned char opcode_ind_arr[256] = { 0, 0, 0, SDEB_I_VERIFY, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0, - 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16, + 0, 0, 0, 0, + SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16, /* 0xa0; 0xa0->0xbf: 12 byte cdbs */ SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN, SDEB_I_MAINT_OUT, 0, 0, 0, @@ -547,6 +568,7 @@ static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *); @@ -788,6 +810,11 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} }, +/* 31 */ + {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO, + resp_atomic_write, NULL, /* ATOMIC WRITE 16 */ + {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, /* sentinel */ {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, @@ -835,6 +862,13 @@ static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY; static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC; static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH; +static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR; +static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH; +static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN; +static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN; +static unsigned int sdebug_atomic_wr_max_length_bndry = + DEF_ATOMIC_WR_MAX_LENGTH_BNDRY; +static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY; static int sdebug_uuid_ctl = DEF_UUID_CTL; static bool sdebug_random = DEF_RANDOM; static bool sdebug_per_host_store = DEF_PER_HOST_STORE; @@ -926,6 +960,7 @@ static const int device_qfull_result = static const int condition_met_result = SAM_STAT_CONDITION_MET; static struct dentry *sdebug_debugfs_root; +static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain); static void sdebug_err_free(struct rcu_head *head) { @@ -1148,6 +1183,8 @@ static int sdebug_target_alloc(struct scsi_target *starget) if (!targetip) return -ENOMEM; + async_synchronize_full_domain(&sdebug_async_domain); + targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev), sdebug_debugfs_root); @@ -1174,7 +1211,8 @@ static void sdebug_target_destroy(struct scsi_target *starget) targetip = (struct sdebug_target_info *)starget->hostdata; if (targetip) { starget->hostdata = NULL; - async_schedule(sdebug_tartget_cleanup_async, targetip); + async_schedule_domain(sdebug_tartget_cleanup_async, targetip, + &sdebug_async_domain); } } @@ -1188,6 +1226,11 @@ static inline bool scsi_debug_lbp(void) (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); } +static inline bool scsi_debug_atomic_write(void) +{ + return sdebug_fake_rw == 0 && sdebug_atomic_wr; +} + static void *lba2fake_store(struct sdeb_store_info *sip, unsigned long long lba) { @@ -1815,6 +1858,14 @@ static int inquiry_vpd_b0(unsigned char *arr) /* Maximum WRITE SAME Length */ put_unaligned_be64(sdebug_write_same_length, &arr[32]); + if (sdebug_atomic_wr) { + put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]); + put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]); + put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]); + put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]); + put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]); + } + return 0x3c; /* Mandatory page length for Logical Block Provisioning */ } @@ -3377,16 +3428,238 @@ static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip, return xa_load(per_store_ap, devip->sdbg_host->si_idx); } +static inline void +sdeb_read_lock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __acquire(lock); + else + read_lock(lock); +} + +static inline void +sdeb_read_unlock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __release(lock); + else + read_unlock(lock); +} + +static inline void +sdeb_write_lock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __acquire(lock); + else + write_lock(lock); +} + +static inline void +sdeb_write_unlock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __release(lock); + else + write_unlock(lock); +} + +static inline void +sdeb_data_read_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_lock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_read_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_unlock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_write_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_lock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_write_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_unlock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_sector_read_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_lock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_read_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_unlock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_write_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_lock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_write_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_unlock(&sip->macc_sector_lck); +} + +/* + * Atomic locking: + * We simplify the atomic model to allow only 1x atomic write and many non- + * atomic reads or writes for all LBAs. + + * A RW lock has a similar bahaviour: + * Only 1x writer and many readers. + + * So use a RW lock for per-device read and write locking: + * An atomic access grabs the lock as a writer and non-atomic grabs the lock + * as a reader. + */ + +static inline void +sdeb_data_lock(struct sdeb_store_info *sip, bool atomic) +{ + if (atomic) + sdeb_data_write_lock(sip); + else + sdeb_data_read_lock(sip); +} + +static inline void +sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic) +{ + if (atomic) + sdeb_data_write_unlock(sip); + else + sdeb_data_read_unlock(sip); +} + +/* Allow many reads but only 1x write per sector */ +static inline void +sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write) +{ + if (do_write) + sdeb_data_sector_write_lock(sip); + else + sdeb_data_sector_read_lock(sip); +} + +static inline void +sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write) +{ + if (do_write) + sdeb_data_sector_write_unlock(sip); + else + sdeb_data_sector_read_unlock(sip); +} + +static inline void +sdeb_meta_read_lock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __acquire(&sip->macc_meta_lck); + else + __acquire(&sdeb_fake_rw_lck); + } else { + if (sip) + read_lock(&sip->macc_meta_lck); + else + read_lock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_read_unlock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __release(&sip->macc_meta_lck); + else + __release(&sdeb_fake_rw_lck); + } else { + if (sip) + read_unlock(&sip->macc_meta_lck); + else + read_unlock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_write_lock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __acquire(&sip->macc_meta_lck); + else + __acquire(&sdeb_fake_rw_lck); + } else { + if (sip) + write_lock(&sip->macc_meta_lck); + else + write_lock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_write_unlock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __release(&sip->macc_meta_lck); + else + __release(&sdeb_fake_rw_lck); + } else { + if (sip) + write_unlock(&sip->macc_meta_lck); + else + write_unlock(&sdeb_fake_rw_lck); + } +} + /* Returns number of bytes copied or -1 if error. */ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, - u32 sg_skip, u64 lba, u32 num, bool do_write, - u8 group_number) + u32 sg_skip, u64 lba, u32 num, u8 group_number, + bool do_write, bool atomic) { int ret; - u64 block, rest = 0; + u64 block; enum dma_data_direction dir; struct scsi_data_buffer *sdb = &scp->sdb; u8 *fsp; + int i; + + /* + * Even though reads are inherently atomic (in this driver), we expect + * the atomic flag only for writes. + */ + if (!do_write && atomic) + return -1; if (do_write) { dir = DMA_TO_DEVICE; @@ -3406,21 +3679,26 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, fsp = sip->storep; block = do_div(lba, sdebug_store_sectors); - if (block + num > sdebug_store_sectors) - rest = block + num - sdebug_store_sectors; - ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, + /* Only allow 1x atomic write or multiple non-atomic writes at any given time */ + sdeb_data_lock(sip, atomic); + for (i = 0; i < num; i++) { + /* We shouldn't need to lock for atomic writes, but do it anyway */ + sdeb_data_sector_lock(sip, do_write); + ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, fsp + (block * sdebug_sector_size), - (num - rest) * sdebug_sector_size, sg_skip, do_write); - if (ret != (num - rest) * sdebug_sector_size) - return ret; - - if (rest) { - ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, - fsp, rest * sdebug_sector_size, - sg_skip + ((num - rest) * sdebug_sector_size), - do_write); + sdebug_sector_size, sg_skip, do_write); + sdeb_data_sector_unlock(sip, do_write); + if (ret != sdebug_sector_size) { + ret += (i * sdebug_sector_size); + break; + } + sg_skip += sdebug_sector_size; + if (++block >= sdebug_store_sectors) + block = 0; } + ret = num * sdebug_sector_size; + sdeb_data_unlock(sip, atomic); return ret; } @@ -3596,70 +3874,6 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec, return ret; } -static inline void -sdeb_read_lock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __acquire(&sip->macc_lck); - else - __acquire(&sdeb_fake_rw_lck); - } else { - if (sip) - read_lock(&sip->macc_lck); - else - read_lock(&sdeb_fake_rw_lck); - } -} - -static inline void -sdeb_read_unlock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __release(&sip->macc_lck); - else - __release(&sdeb_fake_rw_lck); - } else { - if (sip) - read_unlock(&sip->macc_lck); - else - read_unlock(&sdeb_fake_rw_lck); - } -} - -static inline void -sdeb_write_lock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __acquire(&sip->macc_lck); - else - __acquire(&sdeb_fake_rw_lck); - } else { - if (sip) - write_lock(&sip->macc_lck); - else - write_lock(&sdeb_fake_rw_lck); - } -} - -static inline void -sdeb_write_unlock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __release(&sip->macc_lck); - else - __release(&sdeb_fake_rw_lck); - } else { - if (sip) - write_unlock(&sip->macc_lck); - else - write_unlock(&sdeb_fake_rw_lck); - } -} - static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { bool check_prot; @@ -3669,6 +3883,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u64 lba; struct sdeb_store_info *sip = devip2sip(devip, true); u8 *cmd = scp->cmnd; + bool meta_data_locked = false; switch (cmd[0]) { case READ_16: @@ -3727,6 +3942,10 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) atomic_set(&sdeb_inject_pending, 0); } + /* + * When checking device access params, for reads we only check data + * versus what is set at init time, so no need to lock. + */ ret = check_device_access_params(scp, lba, num, false); if (ret) return ret; @@ -3746,29 +3965,33 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } - sdeb_read_lock(sip); + if (sdebug_dev_is_zoned(devip) || + (sdebug_dix && scsi_prot_sg_count(scp))) { + sdeb_meta_read_lock(sip); + meta_data_locked = true; + } /* DIX + T10 DIF */ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { switch (prot_verify_read(scp, lba, num, ei_lba)) { case 1: /* Guard tag error */ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); return check_condition_result; } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); return illegal_condition_result; } break; case 3: /* Reference tag error */ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); return check_condition_result; } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); return illegal_condition_result; } @@ -3776,8 +3999,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } } - ret = do_device_access(sip, scp, 0, lba, num, false, 0); - sdeb_read_unlock(sip); + ret = do_device_access(sip, scp, 0, lba, num, 0, false, false); + if (meta_data_locked) + sdeb_meta_read_unlock(sip); if (unlikely(ret == -1)) return DID_ERROR << 16; @@ -3967,6 +4191,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u64 lba; struct sdeb_store_info *sip = devip2sip(devip, true); u8 *cmd = scp->cmnd; + bool meta_data_locked = false; switch (cmd[0]) { case WRITE_16: @@ -4025,10 +4250,17 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) "to DIF device\n"); } - sdeb_write_lock(sip); + if (sdebug_dev_is_zoned(devip) || + (sdebug_dix && scsi_prot_sg_count(scp)) || + scsi_debug_lbp()) { + sdeb_meta_write_lock(sip); + meta_data_locked = true; + } + ret = check_device_access_params(scp, lba, num, true); if (ret) { - sdeb_write_unlock(sip); + if (meta_data_locked) + sdeb_meta_write_unlock(sip); return ret; } @@ -4037,22 +4269,22 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) switch (prot_verify_write(scp, lba, num, ei_lba)) { case 1: /* Guard tag error */ if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); return illegal_condition_result; } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); return check_condition_result; } break; case 3: /* Reference tag error */ if (scp->prot_flags & SCSI_PROT_REF_CHECK) { - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); return illegal_condition_result; } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); return check_condition_result; } @@ -4060,13 +4292,16 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } } - ret = do_device_access(sip, scp, 0, lba, num, true, group); + ret = do_device_access(sip, scp, 0, lba, num, group, true, false); if (unlikely(scsi_debug_lbp())) map_region(sip, lba, num); + /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); - sdeb_write_unlock(sip); + if (meta_data_locked) + sdeb_meta_write_unlock(sip); + if (unlikely(-1 == ret)) return DID_ERROR << 16; else if (unlikely(sdebug_verbose && @@ -4176,7 +4411,8 @@ static int resp_write_scat(struct scsi_cmnd *scp, goto err_out; } - sdeb_write_lock(sip); + /* Just keep it simple and always lock for now */ + sdeb_meta_write_lock(sip); sg_off = lbdof_blen; /* Spec says Buffer xfer Length field in number of LBs in dout */ cum_lb = 0; @@ -4219,7 +4455,11 @@ static int resp_write_scat(struct scsi_cmnd *scp, } } - ret = do_device_access(sip, scp, sg_off, lba, num, true, group); + /* + * Write ranges atomically to keep as close to pre-atomic + * writes behaviour as possible. + */ + ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true); /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); @@ -4258,7 +4498,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, } ret = 0; err_out_unlock: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); err_out: kfree(lrdp); return ret; @@ -4277,14 +4517,16 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, scp->device->hostdata, true); u8 *fs1p; u8 *fsp; + bool meta_data_locked = false; - sdeb_write_lock(sip); + if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) { + sdeb_meta_write_lock(sip); + meta_data_locked = true; + } ret = check_device_access_params(scp, lba, num, true); - if (ret) { - sdeb_write_unlock(sip); - return ret; - } + if (ret) + goto out; if (unmap && scsi_debug_lbp()) { unmap_region(sip, lba, num); @@ -4295,6 +4537,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, /* if ndob then zero 1 logical block, else fetch 1 logical block */ fsp = sip->storep; fs1p = fsp + (block * lb_size); + sdeb_data_write_lock(sip); if (ndob) { memset(fs1p, 0, lb_size); ret = 0; @@ -4302,8 +4545,8 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, ret = fetch_to_dev_buffer(scp, fs1p, lb_size); if (-1 == ret) { - sdeb_write_unlock(sip); - return DID_ERROR << 16; + ret = DID_ERROR << 16; + goto out; } else if (sdebug_verbose && !ndob && (ret < lb_size)) sdev_printk(KERN_INFO, scp->device, "%s: %s: lb size=%u, IO sent=%d bytes\n", @@ -4320,10 +4563,12 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); + sdeb_data_write_unlock(sip); + ret = 0; out: - sdeb_write_unlock(sip); - - return 0; + if (meta_data_locked) + sdeb_meta_write_unlock(sip); + return ret; } static int resp_write_same_10(struct scsi_cmnd *scp, @@ -4466,25 +4711,30 @@ static int resp_comp_write(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); - ret = do_dout_fetch(scp, dnum, arr); if (ret == -1) { retval = DID_ERROR << 16; - goto cleanup; + goto cleanup_free; } else if (sdebug_verbose && (ret < (dnum * lb_size))) sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " "indicated=%u, IO sent=%d bytes\n", my_name, dnum * lb_size, ret); + + sdeb_data_write_lock(sip); + sdeb_meta_write_lock(sip); if (!comp_write_worker(sip, lba, num, arr, false)) { mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); retval = check_condition_result; - goto cleanup; + goto cleanup_unlock; } + + /* Cover sip->map_storep (which map_region()) sets with data lock */ if (scsi_debug_lbp()) map_region(sip, lba, num); -cleanup: - sdeb_write_unlock(sip); +cleanup_unlock: + sdeb_meta_write_unlock(sip); + sdeb_data_write_unlock(sip); +cleanup_free: kfree(arr); return retval; } @@ -4528,7 +4778,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) desc = (void *)&buf[8]; - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); for (i = 0 ; i < descriptors ; i++) { unsigned long long lba = get_unaligned_be64(&desc[i].lba); @@ -4544,7 +4794,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ret = 0; out: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); kfree(buf); return ret; @@ -4702,12 +4952,13 @@ static int resp_pre_fetch(struct scsi_cmnd *scp, rest = block + nblks - sdebug_store_sectors; /* Try to bring the PRE-FETCH range into CPU's cache */ - sdeb_read_lock(sip); + sdeb_data_read_lock(sip); prefetch_range(fsp + (sdebug_sector_size * block), (nblks - rest) * sdebug_sector_size); if (rest) prefetch_range(fsp, rest * sdebug_sector_size); - sdeb_read_unlock(sip); + + sdeb_data_read_unlock(sip); fini: if (cmd[1] & 0x2) res = SDEG_RES_IMMED_MASK; @@ -4866,7 +5117,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } /* Not changing store, so only need read access */ - sdeb_read_lock(sip); + sdeb_data_read_lock(sip); ret = do_dout_fetch(scp, a_num, arr); if (ret == -1) { @@ -4888,7 +5139,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) goto cleanup; } cleanup: - sdeb_read_unlock(sip); + sdeb_data_read_unlock(sip); kfree(arr); return ret; } @@ -4934,7 +5185,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_read_lock(sip); + sdeb_meta_read_lock(sip); desc = arr + 64; for (lba = zs_lba; lba < sdebug_capacity; @@ -5032,11 +5283,70 @@ static int resp_report_zones(struct scsi_cmnd *scp, ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len)); fini: - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); kfree(arr); return ret; } +static int resp_atomic_write(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + struct sdeb_store_info *sip; + u8 *cmd = scp->cmnd; + u16 boundary, len; + u64 lba, lba_tmp; + int ret; + + if (!scsi_debug_atomic_write()) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + + sip = devip2sip(devip, true); + + lba = get_unaligned_be64(cmd + 2); + boundary = get_unaligned_be16(cmd + 10); + len = get_unaligned_be16(cmd + 12); + + lba_tmp = lba; + if (sdebug_atomic_wr_align && + do_div(lba_tmp, sdebug_atomic_wr_align)) { + /* Does not meet alignment requirement */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + + if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) { + /* Does not meet alignment requirement */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + + if (boundary > 0) { + if (boundary > sdebug_atomic_wr_max_bndry) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + + if (len > sdebug_atomic_wr_max_length_bndry) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + } else { + if (len > sdebug_atomic_wr_max_length) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + } + + ret = do_device_access(sip, scp, 0, lba, len, 0, true, true); + if (unlikely(ret == -1)) + return DID_ERROR << 16; + if (unlikely(ret != len * sdebug_sector_size)) + return DID_ERROR << 16; + return 0; +} + /* Logic transplanted from tcmu-runner, file_zbc.c */ static void zbc_open_all(struct sdebug_dev_info *devip) { @@ -5063,8 +5373,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) mk_sense_invalid_opcode(scp); return check_condition_result; } - - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { /* Check if all closed zones can be open */ @@ -5113,7 +5422,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) zbc_open_zone(devip, zsp, true); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5140,7 +5449,7 @@ static int resp_close_zone(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_close_all(devip); @@ -5169,7 +5478,7 @@ static int resp_close_zone(struct scsi_cmnd *scp, zbc_close_zone(devip, zsp); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5212,7 +5521,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_finish_all(devip); @@ -5241,7 +5550,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp, zbc_finish_zone(devip, zsp, true); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5292,7 +5601,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_rwp_all(devip); @@ -5320,7 +5629,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) zbc_rwp_zone(devip, zsp); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -6284,6 +6593,7 @@ module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO); module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); +module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO); module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO); module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR); module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR); @@ -6318,6 +6628,11 @@ module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO); module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO); module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO); module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO); +module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO); +module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO); +module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO); +module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO); +module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO); module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, @@ -6361,6 +6676,7 @@ MODULE_PARM_DESC(lbprz, MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)"); MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); @@ -6392,6 +6708,11 @@ MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)" MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); +MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)"); +MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)"); +MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)"); +MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)"); +MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)"); MODULE_PARM_DESC(uuid_ctl, "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); @@ -7563,6 +7884,7 @@ static int __init scsi_debug_init(void) return -EINVAL; } } + xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); if (want_store) { idx = sdebug_add_store(); @@ -7770,7 +8092,9 @@ static int sdebug_add_store(void) map_region(sip, 0, 2); } - rwlock_init(&sip->macc_lck); + rwlock_init(&sip->macc_data_lck); + rwlock_init(&sip->macc_meta_lck); + rwlock_init(&sip->macc_sector_lck); return (int)n_idx; err: sdebug_erase_store((int)n_idx, sip); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ec39acc986d6..3958a6d14bf4 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -631,8 +631,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_update_request(req, error, bytes)) return true; - // XXX: - if (blk_queue_add_random(q)) + if (q->limits.features & BLK_FEAT_ADD_RANDOM) add_disk_randomness(req->q->disk); WARN_ON_ONCE(!blk_rq_is_passthrough(req) && @@ -1140,9 +1139,9 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) */ count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); - if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { + if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { unsigned int pad_len = - (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1; last_sg->length += pad_len; cmd->extra_len += pad_len; @@ -1987,7 +1986,7 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) shost->dma_alignment, dma_get_cache_alignment() - 1); if (shost->no_highmem) - lim->bounce = BLK_BOUNCE_HIGH; + lim->features |= BLK_FEAT_BOUNCE_HIGH; dma_set_seg_boundary(dev, shost->dma_boundary); dma_set_max_seg_size(dev, shost->max_segment_size); diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 41a950075913..3e47c4472a80 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -326,6 +326,26 @@ out: } static const char * +scsi_trace_atomic_write16_out(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p); + unsigned int boundary_size; + unsigned int nr_blocks; + sector_t lba; + + lba = get_unaligned_be64(&cdb[2]); + boundary_size = get_unaligned_be16(&cdb[10]); + nr_blocks = get_unaligned_be16(&cdb[12]); + + trace_seq_printf(p, "lba=%llu txlen=%u boundary_size=%u", + lba, nr_blocks, boundary_size); + + trace_seq_putc(p, 0); + + return ret; +} + +static const char * scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) { switch (SERVICE_ACTION32(cdb)) { @@ -385,6 +405,8 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) return scsi_trace_zbc_in(p, cdb, len); case ZBC_OUT: return scsi_trace_zbc_out(p, cdb, len); + case WRITE_ATOMIC_16: + return scsi_trace_atomic_write16_out(p, cdb, len); default: return scsi_trace_misc(p, cdb, len); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6b64af7d4927..2e933fd1de70 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -102,12 +102,13 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); #define SD_MINORS 16 -static void sd_config_discard(struct scsi_disk *, unsigned int); -static void sd_config_write_same(struct scsi_disk *); +static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned int mode); +static void sd_config_write_same(struct scsi_disk *sdkp, + struct queue_limits *lim); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static void sd_shutdown(struct device *); -static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); static DEFINE_IDA(sd_index_ida); @@ -120,17 +121,18 @@ static const char *sd_cache_types[] = { "write back, no read (daft)" }; -static void sd_set_flush_flag(struct scsi_disk *sdkp) +static void sd_set_flush_flag(struct scsi_disk *sdkp, + struct queue_limits *lim) { - bool wc = false, fua = false; - if (sdkp->WCE) { - wc = true; + lim->features |= BLK_FEAT_WRITE_CACHE; if (sdkp->DPOFUA) - fua = true; + lim->features |= BLK_FEAT_FUA; + else + lim->features &= ~BLK_FEAT_FUA; + } else { + lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); } - - blk_queue_write_cache(sdkp->disk->queue, wc, fua); } static ssize_t @@ -168,9 +170,18 @@ cache_type_store(struct device *dev, struct device_attribute *attr, wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; if (sdkp->cache_override) { + struct queue_limits lim; + sdkp->WCE = wce; sdkp->RCD = rcd; - sd_set_flush_flag(sdkp); + + lim = queue_limits_start_update(sdkp->disk->queue); + sd_set_flush_flag(sdkp, &lim); + blk_mq_freeze_queue(sdkp->disk->queue); + ret = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (ret) + return ret; return count; } @@ -457,16 +468,12 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; - int mode; + struct queue_limits lim; + int mode, err; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (sd_is_zoned(sdkp)) { - sd_config_discard(sdkp, SD_LBP_DISABLE); - return count; - } - if (sdp->type != TYPE_DISK) return -EINVAL; @@ -474,8 +481,13 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr, if (mode < 0) return -EINVAL; - sd_config_discard(sdkp, mode); - + lim = queue_limits_start_update(sdkp->disk->queue); + sd_config_discard(sdkp, &lim, mode); + blk_mq_freeze_queue(sdkp->disk->queue); + err = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (err) + return err; return count; } static DEVICE_ATTR_RW(provisioning_mode); @@ -558,6 +570,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; + struct queue_limits lim; unsigned long max; int err; @@ -579,8 +592,13 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, sdkp->max_ws_blocks = max; } - sd_config_write_same(sdkp); - + lim = queue_limits_start_update(sdkp->disk->queue); + sd_config_write_same(sdkp, &lim); + blk_mq_freeze_queue(sdkp->disk->queue); + err = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (err) + return err; return count; } static DEVICE_ATTR_RW(max_write_same_blocks); @@ -823,25 +841,28 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, return protect; } -static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) +static void sd_disable_discard(struct scsi_disk *sdkp) +{ + sdkp->provisioning_mode = SD_LBP_DISABLE; + blk_queue_disable_discard(sdkp->disk->queue); +} + +static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned int mode) { - struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; unsigned int max_blocks = 0; - q->limits.discard_alignment = - sdkp->unmap_alignment * logical_block_size; - q->limits.discard_granularity = - max(sdkp->physical_block_size, - sdkp->unmap_granularity * logical_block_size); + lim->discard_alignment = sdkp->unmap_alignment * logical_block_size; + lim->discard_granularity = max(sdkp->physical_block_size, + sdkp->unmap_granularity * logical_block_size); sdkp->provisioning_mode = mode; switch (mode) { case SD_LBP_FULL: case SD_LBP_DISABLE: - blk_queue_max_discard_sectors(q, 0); - return; + break; case SD_LBP_UNMAP: max_blocks = min_not_zero(sdkp->max_unmap_blocks, @@ -872,7 +893,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) break; } - blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); + lim->max_hw_discard_sectors = max_blocks * + (logical_block_size >> SECTOR_SHIFT); } static void *sd_set_special_bvec(struct request *rq, unsigned int data_len) @@ -918,6 +940,64 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) return scsi_alloc_sgtables(cmd); } +static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim) +{ + unsigned int logical_block_size = sdkp->device->sector_size, + physical_block_size_sectors, max_atomic, unit_min, unit_max; + + if ((!sdkp->max_atomic && !sdkp->max_atomic_with_boundary) || + sdkp->protection_type == T10_PI_TYPE2_PROTECTION) + return; + + physical_block_size_sectors = sdkp->physical_block_size / + sdkp->device->sector_size; + + unit_min = rounddown_pow_of_two(sdkp->atomic_granularity ? + sdkp->atomic_granularity : + physical_block_size_sectors); + + /* + * Only use atomic boundary when we have the odd scenario of + * sdkp->max_atomic == 0, which the spec does permit. + */ + if (sdkp->max_atomic) { + max_atomic = sdkp->max_atomic; + unit_max = rounddown_pow_of_two(sdkp->max_atomic); + sdkp->use_atomic_write_boundary = 0; + } else { + max_atomic = sdkp->max_atomic_with_boundary; + unit_max = rounddown_pow_of_two(sdkp->max_atomic_boundary); + sdkp->use_atomic_write_boundary = 1; + } + + /* + * Ensure compliance with granularity and alignment. For now, keep it + * simple and just don't support atomic writes for values mismatched + * with max_{boundary}atomic, physical block size, and + * atomic_granularity itself. + * + * We're really being distrustful by checking unit_max also... + */ + if (sdkp->atomic_granularity > 1) { + if (unit_min > 1 && unit_min % sdkp->atomic_granularity) + return; + if (unit_max > 1 && unit_max % sdkp->atomic_granularity) + return; + } + + if (sdkp->atomic_alignment > 1) { + if (unit_min > 1 && unit_min % sdkp->atomic_alignment) + return; + if (unit_max > 1 && unit_max % sdkp->atomic_alignment) + return; + } + + lim->atomic_write_hw_max = max_atomic * logical_block_size; + lim->atomic_write_hw_boundary = 0; + lim->atomic_write_hw_unit_min = unit_min * logical_block_size; + lim->atomic_write_hw_unit_max = unit_max * logical_block_size; +} + static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) { @@ -1000,9 +1080,16 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) return sd_setup_write_same10_cmnd(cmd, false); } -static void sd_config_write_same(struct scsi_disk *sdkp) +static void sd_disable_write_same(struct scsi_disk *sdkp) +{ + sdkp->device->no_write_same = 1; + sdkp->max_ws_blocks = 0; + blk_queue_disable_write_zeroes(sdkp->disk->queue); +} + +static void sd_config_write_same(struct scsi_disk *sdkp, + struct queue_limits *lim) { - struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; if (sdkp->device->no_write_same) { @@ -1056,8 +1143,8 @@ static void sd_config_write_same(struct scsi_disk *sdkp) } out: - blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * - (logical_block_size >> 9)); + lim->max_write_zeroes_sectors = + sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT); } static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) @@ -1209,6 +1296,26 @@ static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; } +static blk_status_t sd_setup_atomic_cmnd(struct scsi_cmnd *cmd, + sector_t lba, unsigned int nr_blocks, + bool boundary, unsigned char flags) +{ + cmd->cmd_len = 16; + cmd->cmnd[0] = WRITE_ATOMIC_16; + cmd->cmnd[1] = flags; + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); + if (boundary) + put_unaligned_be16(nr_blocks, &cmd->cmnd[10]); + else + put_unaligned_be16(0, &cmd->cmnd[10]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[12]); + cmd->cmnd[14] = 0; + cmd->cmnd[15] = 0; + + return BLK_STS_OK; +} + static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); @@ -1274,6 +1381,10 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); + } else if (rq->cmd_flags & REQ_ATOMIC && write) { + ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks, + sdkp->use_atomic_write_boundary, + protect | fua); } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); @@ -2247,15 +2358,14 @@ static int sd_done(struct scsi_cmnd *SCpnt) case 0x24: /* INVALID FIELD IN CDB */ switch (SCpnt->cmnd[0]) { case UNMAP: - sd_config_discard(sdkp, SD_LBP_DISABLE); + sd_disable_discard(sdkp); break; case WRITE_SAME_16: case WRITE_SAME: if (SCpnt->cmnd[1] & 8) { /* UNMAP */ - sd_config_discard(sdkp, SD_LBP_DISABLE); + sd_disable_discard(sdkp); } else { - sdkp->device->no_write_same = 1; - sd_config_write_same(sdkp); + sd_disable_write_same(sdkp); req->rq_flags |= RQF_QUIET; } break; @@ -2267,7 +2377,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) } out: - if (sd_is_zoned(sdkp)) + if (sdkp->device->type == TYPE_ZBC) good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr); SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, @@ -2461,11 +2571,13 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer return 0; } -static void sd_config_protection(struct scsi_disk *sdkp) +static void sd_config_protection(struct scsi_disk *sdkp, + struct queue_limits *lim) { struct scsi_device *sdp = sdkp->device; - sd_dif_config_host(sdkp); + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) + sd_dif_config_host(sdkp, lim); if (!sdkp->protection_type) return; @@ -2514,7 +2626,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, #define READ_CAPACITY_RETRIES_ON_RESET 10 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, - unsigned char *buffer) + struct queue_limits *lim, unsigned char *buffer) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; @@ -2588,7 +2700,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, /* Lowest aligned logical block */ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; - blk_queue_alignment_offset(sdp->request_queue, alignment); + lim->alignment_offset = alignment; if (alignment && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "physical block alignment offset: %u\n", alignment); @@ -2599,7 +2711,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, if (buffer[14] & 0x40) /* LBPRZ */ sdkp->lbprz = 1; - sd_config_discard(sdkp, SD_LBP_WS16); + sd_config_discard(sdkp, lim, SD_LBP_WS16); } sdkp->capacity = lba + 1; @@ -2702,13 +2814,14 @@ static int sd_try_rc16_first(struct scsi_device *sdp) * read disk capacity */ static void -sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) +sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim, + unsigned char *buffer) { int sector_size; struct scsi_device *sdp = sdkp->device; if (sd_try_rc16_first(sdp)) { - sector_size = read_capacity_16(sdkp, sdp, buffer); + sector_size = read_capacity_16(sdkp, sdp, lim, buffer); if (sector_size == -EOVERFLOW) goto got_data; if (sector_size == -ENODEV) @@ -2728,7 +2841,7 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) int old_sector_size = sector_size; sd_printk(KERN_NOTICE, sdkp, "Very big device. " "Trying to use READ CAPACITY(16).\n"); - sector_size = read_capacity_16(sdkp, sdp, buffer); + sector_size = read_capacity_16(sdkp, sdp, lim, buffer); if (sector_size < 0) { sd_printk(KERN_NOTICE, sdkp, "Using 0xffffffff as device size\n"); @@ -2787,9 +2900,8 @@ got_data: */ sector_size = 512; } - blk_queue_logical_block_size(sdp->request_queue, sector_size); - blk_queue_physical_block_size(sdp->request_queue, - sdkp->physical_block_size); + lim->logical_block_size = sector_size; + lim->physical_block_size = sdkp->physical_block_size; sdkp->device->sector_size = sector_size; if (sdkp->capacity > 0xffffffff) @@ -3195,11 +3307,30 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) return; } -/** - * sd_read_block_limits - Query disk device for preferred I/O sizes. - * @sdkp: disk to query +static unsigned int sd_discard_mode(struct scsi_disk *sdkp) +{ + if (!sdkp->lbpvpd) { + /* LBP VPD page not provided */ + if (sdkp->max_unmap_blocks) + return SD_LBP_UNMAP; + return SD_LBP_WS16; + } + + /* LBP VPD page tells us what to use */ + if (sdkp->lbpu && sdkp->max_unmap_blocks) + return SD_LBP_UNMAP; + if (sdkp->lbpws) + return SD_LBP_WS16; + if (sdkp->lbpws10) + return SD_LBP_WS10; + return SD_LBP_DISABLE; +} + +/* + * Query disk device for preferred I/O sizes. */ -static void sd_read_block_limits(struct scsi_disk *sdkp) +static void sd_read_block_limits(struct scsi_disk *sdkp, + struct queue_limits *lim) { struct scsi_vpd *vpd; @@ -3219,7 +3350,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]); if (!sdkp->lbpme) - goto out; + goto config_atomic; lba_count = get_unaligned_be32(&vpd->data[20]); desc_count = get_unaligned_be32(&vpd->data[24]); @@ -3233,23 +3364,16 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sdkp->unmap_alignment = get_unaligned_be32(&vpd->data[32]) & ~(1 << 31); - if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ - - if (sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); - else - sd_config_discard(sdkp, SD_LBP_WS16); - - } else { /* LBP VPD page tells us what to use */ - if (sdkp->lbpu && sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); - else if (sdkp->lbpws) - sd_config_discard(sdkp, SD_LBP_WS16); - else if (sdkp->lbpws10) - sd_config_discard(sdkp, SD_LBP_WS10); - else - sd_config_discard(sdkp, SD_LBP_DISABLE); - } + sd_config_discard(sdkp, lim, sd_discard_mode(sdkp)); + +config_atomic: + sdkp->max_atomic = get_unaligned_be32(&vpd->data[44]); + sdkp->atomic_alignment = get_unaligned_be32(&vpd->data[48]); + sdkp->atomic_granularity = get_unaligned_be32(&vpd->data[52]); + sdkp->max_atomic_with_boundary = get_unaligned_be32(&vpd->data[56]); + sdkp->max_atomic_boundary = get_unaligned_be32(&vpd->data[60]); + + sd_config_atomic(sdkp, lim); } out: @@ -3268,13 +3392,10 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp) rcu_read_unlock(); } -/** - * sd_read_block_characteristics - Query block dev. characteristics - * @sdkp: disk to query - */ -static void sd_read_block_characteristics(struct scsi_disk *sdkp) +/* Query block device characteristics */ +static void sd_read_block_characteristics(struct scsi_disk *sdkp, + struct queue_limits *lim) { - struct request_queue *q = sdkp->disk->queue; struct scsi_vpd *vpd; u16 rot; @@ -3290,37 +3411,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) sdkp->zoned = (vpd->data[8] >> 4) & 3; rcu_read_unlock(); - if (rot == 1) { - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - } - - -#ifdef CONFIG_BLK_DEV_ZONED /* sd_probe rejects ZBD devices early otherwise */ - if (sdkp->device->type == TYPE_ZBC) { - /* - * Host-managed. - */ - disk_set_zoned(sdkp->disk); - - /* - * Per ZBC and ZAC specifications, writes in sequential write - * required zones of host-managed devices must be aligned to - * the device physical block size. - */ - blk_queue_zone_write_granularity(q, sdkp->physical_block_size); - } else { - /* - * Host-aware devices are treated as conventional. - */ - WARN_ON_ONCE(blk_queue_is_zoned(q)); - } -#endif /* CONFIG_BLK_DEV_ZONED */ + if (rot == 1) + lim->features &= ~(BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (!sdkp->first_scan) return; - if (blk_queue_is_zoned(q)) + if (sdkp->device->type == TYPE_ZBC) sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n"); else if (sdkp->zoned == 1) sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n"); @@ -3601,10 +3698,11 @@ static int sd_revalidate_disk(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; - struct request_queue *q = sdkp->disk->queue; sector_t old_capacity = sdkp->capacity; + struct queue_limits lim; unsigned char *buffer; - unsigned int dev_max, rw_max; + unsigned int dev_max; + int err; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); @@ -3625,12 +3723,14 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_spinup_disk(sdkp); + lim = queue_limits_start_update(sdkp->disk->queue); + /* * Without media there is no reason to ask; moreover, some devices * react badly if we do. */ if (sdkp->media_present) { - sd_read_capacity(sdkp, buffer); + sd_read_capacity(sdkp, &lim, buffer); /* * Some USB/UAS devices return generic values for mode pages * until the media has been accessed. Trigger a READ operation @@ -3644,15 +3744,14 @@ static int sd_revalidate_disk(struct gendisk *disk) * cause this to be updated correctly and any device which * doesn't support it should be treated as rotational. */ - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM); if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); - sd_read_block_limits(sdkp); + sd_read_block_limits(sdkp, &lim); sd_read_block_limits_ext(sdkp); - sd_read_block_characteristics(sdkp); - sd_zbc_read_zones(sdkp, buffer); + sd_read_block_characteristics(sdkp, &lim); + sd_zbc_read_zones(sdkp, &lim, buffer); sd_read_cpr(sdkp); } @@ -3664,64 +3763,50 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_app_tag_own(sdkp, buffer); sd_read_write_same(sdkp, buffer); sd_read_security(sdkp, buffer); - sd_config_protection(sdkp); + sd_config_protection(sdkp, &lim); } /* * We now have all cache related info, determine how we deal * with flush requests. */ - sd_set_flush_flag(sdkp); + sd_set_flush_flag(sdkp, &lim); /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; /* Some devices report a maximum block count for READ/WRITE requests. */ dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); - q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); + lim.max_dev_sectors = logical_to_sectors(sdp, dev_max); if (sd_validate_min_xfer_size(sdkp)) - blk_queue_io_min(sdkp->disk->queue, - logical_to_bytes(sdp, sdkp->min_xfer_blocks)); + lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks); else - blk_queue_io_min(sdkp->disk->queue, 0); - - if (sd_validate_opt_xfer_size(sdkp, dev_max)) { - q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); - rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); - } else { - q->limits.io_opt = 0; - rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), - (sector_t)BLK_DEF_MAX_SECTORS_CAP); - } + lim.io_min = 0; /* * Limit default to SCSI host optimal sector limit if set. There may be * an impact on performance for when the size of a request exceeds this * host limit. */ - rw_max = min_not_zero(rw_max, sdp->host->opt_sectors); - - /* Do not exceed controller limit */ - rw_max = min(rw_max, queue_max_hw_sectors(q)); - - /* - * Only update max_sectors if previously unset or if the current value - * exceeds the capabilities of the hardware. - */ - if (sdkp->first_scan || - q->limits.max_sectors > q->limits.max_dev_sectors || - q->limits.max_sectors > q->limits.max_hw_sectors) { - q->limits.max_sectors = rw_max; - q->limits.max_user_sectors = rw_max; + lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT; + if (sd_validate_opt_xfer_size(sdkp, dev_max)) { + lim.io_opt = min_not_zero(lim.io_opt, + logical_to_bytes(sdp, sdkp->opt_xfer_blocks)); } sdkp->first_scan = 0; set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity)); - sd_config_write_same(sdkp); + sd_config_write_same(sdkp, &lim); kfree(buffer); + blk_mq_freeze_queue(sdkp->disk->queue); + err = queue_limits_commit_update(sdkp->disk->queue, &lim); + blk_mq_unfreeze_queue(sdkp->disk->queue); + if (err) + return err; + /* * For a zoned drive, revalidating the zones can be done only once * the gendisk capacity is set. So if this fails, set back the gendisk @@ -4119,8 +4204,6 @@ static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); - sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); - if (opal_unlock_from_suspend(sdkp->opal_dev)) { sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n"); return -EIO; @@ -4137,12 +4220,13 @@ static int sd_resume_common(struct device *dev, bool runtime) if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0; + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); + if (!sd_do_start_stop(sdkp->device, runtime)) { sdkp->suspended = false; return 0; } - sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); ret = sd_start_stop_device(sdkp, 1); if (!ret) { sd_resume(dev); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 49dd600bfa48..36382eca941c 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -115,6 +115,13 @@ struct scsi_disk { u32 max_unmap_blocks; u32 unmap_granularity; u32 unmap_alignment; + + u32 max_atomic; + u32 atomic_alignment; + u32 atomic_granularity; + u32 max_atomic_with_boundary; + u32 max_atomic_boundary; + u32 index; unsigned int physical_block_size; unsigned int max_medium_access_timeouts; @@ -148,6 +155,7 @@ struct scsi_disk { unsigned security : 1; unsigned ignore_medium_access_errors : 1; unsigned rscs : 1; /* reduced stream control support */ + unsigned use_atomic_write_boundary : 1; }; #define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev) @@ -220,26 +228,12 @@ static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sec return sector >> (ilog2(sdev->sector_size) - 9); } -#ifdef CONFIG_BLK_DEV_INTEGRITY - -extern void sd_dif_config_host(struct scsi_disk *); - -#else /* CONFIG_BLK_DEV_INTEGRITY */ - -static inline void sd_dif_config_host(struct scsi_disk *disk) -{ -} - -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - -static inline int sd_is_zoned(struct scsi_disk *sdkp) -{ - return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC; -} +void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim); #ifdef CONFIG_BLK_DEV_ZONED -int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]); +int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, + u8 buf[SD_BUF_SIZE]); int sd_zbc_revalidate_zones(struct scsi_disk *sdkp); blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, unsigned char op, bool all); @@ -250,7 +244,8 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, #else /* CONFIG_BLK_DEV_ZONED */ -static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) +static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, + struct queue_limits *lim, u8 buf[SD_BUF_SIZE]) { return 0; } diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 1df847b5f747..ae6ce6f5d622 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -24,14 +24,15 @@ /* * Configure exchange of protection information between OS and HBA. */ -void sd_dif_config_host(struct scsi_disk *sdkp) +void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim) { struct scsi_device *sdp = sdkp->device; - struct gendisk *disk = sdkp->disk; u8 type = sdkp->protection_type; - struct blk_integrity bi; + struct blk_integrity *bi = &lim->integrity; int dif, dix; + memset(bi, 0, sizeof(*bi)); + dif = scsi_host_dif_capable(sdp->host, type); dix = scsi_host_dix_capable(sdp->host, type); @@ -39,45 +40,33 @@ void sd_dif_config_host(struct scsi_disk *sdkp) dif = 0; dix = 1; } - if (!dix) { - blk_integrity_unregister(disk); + if (!dix) return; - } - - memset(&bi, 0, sizeof(bi)); /* Enable DMA of protection information */ - if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) { - if (type == T10_PI_TYPE3_PROTECTION) - bi.profile = &t10_pi_type3_ip; - else - bi.profile = &t10_pi_type1_ip; + if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) + bi->csum_type = BLK_INTEGRITY_CSUM_IP; + else + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; - bi.flags |= BLK_INTEGRITY_IP_CHECKSUM; - } else - if (type == T10_PI_TYPE3_PROTECTION) - bi.profile = &t10_pi_type3_crc; - else - bi.profile = &t10_pi_type1_crc; + if (type != T10_PI_TYPE3_PROTECTION) + bi->flags |= BLK_INTEGRITY_REF_TAG; - bi.tuple_size = sizeof(struct t10_pi_tuple); + bi->tuple_size = sizeof(struct t10_pi_tuple); if (dif && type) { - bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; if (!sdkp->ATO) - goto out; + return; if (type == T10_PI_TYPE3_PROTECTION) - bi.tag_size = sizeof(u16) + sizeof(u32); + bi->tag_size = sizeof(u16) + sizeof(u32); else - bi.tag_size = sizeof(u16); + bi->tag_size = sizeof(u16); } sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIX %s, application tag size %u bytes\n", - bi.profile->name, bi.tag_size); -out: - blk_integrity_register(disk, &bi); + blk_integrity_profile_name(bi), bi->tag_size); } - diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 806036e48abe..c8b9654d30f0 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -232,7 +232,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, int zone_idx = 0; int ret; - if (!sd_is_zoned(sdkp)) + if (sdkp->device->type != TYPE_ZBC) /* Not a zoned device */ return -EOPNOTSUPP; @@ -300,7 +300,7 @@ static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd) struct scsi_disk *sdkp = scsi_disk(rq->q->disk); sector_t sector = blk_rq_pos(rq); - if (!sd_is_zoned(sdkp)) + if (sdkp->device->type != TYPE_ZBC) /* Not a zoned device */ return BLK_STS_IOERR; @@ -521,7 +521,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf, static void sd_zbc_print_zones(struct scsi_disk *sdkp) { - if (!sd_is_zoned(sdkp) || !sdkp->capacity) + if (sdkp->device->type != TYPE_ZBC || !sdkp->capacity) return; if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1)) @@ -565,12 +565,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) sdkp->zone_info.zone_blocks = zone_blocks; sdkp->zone_info.nr_zones = nr_zones; - blk_queue_chunk_sectors(q, - logical_to_sectors(sdkp->device, zone_blocks)); - - /* Enable block layer zone append emulation */ - blk_queue_max_zone_append_sectors(q, 0); - flags = memalloc_noio_save(); ret = blk_revalidate_disk_zones(disk); memalloc_noio_restore(flags); @@ -588,27 +582,31 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) /** * sd_zbc_read_zones - Read zone information and update the request queue * @sdkp: SCSI disk pointer. + * @lim: queue limits to read into * @buf: 512 byte buffer used for storing SCSI command output. * * Read zone information and update the request queue zone characteristics and * also the zoned device information in *sdkp. Called by sd_revalidate_disk() * before the gendisk capacity has been set. */ -int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) +int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, + u8 buf[SD_BUF_SIZE]) { - struct gendisk *disk = sdkp->disk; - struct request_queue *q = disk->queue; unsigned int nr_zones; u32 zone_blocks = 0; int ret; - if (!sd_is_zoned(sdkp)) { - /* - * Device managed or normal SCSI disk, no special handling - * required. - */ + if (sdkp->device->type != TYPE_ZBC) return 0; - } + + lim->features |= BLK_FEAT_ZONED; + + /* + * Per ZBC and ZAC specifications, writes in sequential write required + * zones of host-managed devices must be aligned to the device physical + * block size. + */ + lim->zone_write_granularity = sdkp->physical_block_size; /* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */ sdkp->device->use_16_for_rw = 1; @@ -625,18 +623,20 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) if (ret != 0) goto err; - /* The drive satisfies the kernel restrictions: set it up */ - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); - if (sdkp->zones_max_open == U32_MAX) - disk_set_max_open_zones(disk, 0); - else - disk_set_max_open_zones(disk, sdkp->zones_max_open); - disk_set_max_active_zones(disk, 0); nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); - sdkp->early_zone_info.nr_zones = nr_zones; sdkp->early_zone_info.zone_blocks = zone_blocks; + /* The drive satisfies the kernel restrictions: set it up */ + if (sdkp->zones_max_open == U32_MAX) + lim->max_open_zones = 0; + else + lim->max_open_zones = sdkp->zones_max_open; + lim->max_active_zones = 0; + lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks); + /* Enable block layer zone append emulation */ + lim->max_zone_append_sectors = 0; + return 0; err: diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 7ab000942b97..3f491019103e 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -111,7 +111,7 @@ static struct lock_class_key sr_bio_compl_lkclass; static int sr_open(struct cdrom_device_info *, int); static void sr_release(struct cdrom_device_info *); -static void get_sectorsize(struct scsi_cd *); +static int get_sectorsize(struct scsi_cd *); static int get_capabilities(struct scsi_cd *); static unsigned int sr_check_events(struct cdrom_device_info *cdi, @@ -473,15 +473,15 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) return BLK_STS_IOERR; } -static void sr_revalidate_disk(struct scsi_cd *cd) +static int sr_revalidate_disk(struct scsi_cd *cd) { struct scsi_sense_hdr sshdr; /* if the unit is not ready, nothing more to do */ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) - return; + return 0; sr_cd_check(&cd->cdi); - get_sectorsize(cd); + return get_sectorsize(cd); } static int sr_block_open(struct gendisk *disk, blk_mode_t mode) @@ -494,13 +494,16 @@ static int sr_block_open(struct gendisk *disk, blk_mode_t mode) return -ENXIO; scsi_autopm_get_device(sdev); - if (disk_check_media_change(disk)) - sr_revalidate_disk(cd); + if (disk_check_media_change(disk)) { + ret = sr_revalidate_disk(cd); + if (ret) + goto out; + } mutex_lock(&cd->lock); ret = cdrom_open(&cd->cdi, mode); mutex_unlock(&cd->lock); - +out: scsi_autopm_put_device(sdev); if (ret) scsi_device_put(cd->device); @@ -685,7 +688,9 @@ static int sr_probe(struct device *dev) blk_pm_runtime_init(sdev->request_queue, dev); dev_set_drvdata(dev, cd); - sr_revalidate_disk(cd); + error = sr_revalidate_disk(cd); + if (error) + goto unregister_cdrom; error = device_add_disk(&sdev->sdev_gendev, disk, NULL); if (error) @@ -714,13 +719,14 @@ fail: } -static void get_sectorsize(struct scsi_cd *cd) +static int get_sectorsize(struct scsi_cd *cd) { + struct request_queue *q = cd->device->request_queue; static const u8 cmd[10] = { READ_CAPACITY }; unsigned char buffer[8] = { }; - int the_result; + struct queue_limits lim; + int err; int sector_size; - struct request_queue *queue; struct scsi_failure failure_defs[] = { { .result = SCMD_FAILURE_RESULT_ANY, @@ -736,10 +742,10 @@ static void get_sectorsize(struct scsi_cd *cd) }; /* Do the command and wait.. */ - the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer, + err = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer, sizeof(buffer), SR_TIMEOUT, MAX_RETRIES, &exec_args); - if (the_result) { + if (err) { cd->capacity = 0x1fffff; sector_size = 2048; /* A guess, just in case */ } else { @@ -789,10 +795,12 @@ static void get_sectorsize(struct scsi_cd *cd) set_capacity(cd->disk, cd->capacity); } - queue = cd->device->request_queue; - blk_queue_logical_block_size(queue, sector_size); - - return; + lim = queue_limits_start_update(q); + lim.logical_block_size = sector_size; + blk_mq_freeze_queue(q); + err = queue_limits_commit_update(q, &lim); + blk_mq_unfreeze_queue(q); + return err; } static int get_capabilities(struct scsi_cd *cd) diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c index 3f3039600357..a6453ffeb753 100644 --- a/drivers/soc/amlogic/meson-clk-measure.c +++ b/drivers/soc/amlogic/meson-clk-measure.c @@ -688,4 +688,5 @@ static struct platform_driver meson_msr_driver = { }, }; module_platform_driver(meson_msr_driver); +MODULE_DESCRIPTION("Amlogic Meson SoC Clock Measure driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index 6abb730344ab..8809a948201a 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -63,7 +63,9 @@ static const struct meson_gx_package_id { { "962X", 0x24, 0x10, 0xf0 }, { "962E", 0x24, 0x20, 0xf0 }, { "A113X", 0x25, 0x37, 0xff }, + { "A113X", 0x25, 0x43, 0xff }, { "A113D", 0x25, 0x22, 0xff }, + { "S905L", 0x26, 0, 0x0 }, { "S905D2", 0x28, 0x10, 0xf0 }, { "S905Y2", 0x28, 0x30, 0xf0 }, { "S905X2", 0x28, 0x40, 0xf0 }, diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index ec87d9d878f3..fe111bae38c8 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -252,4 +252,5 @@ free_soc: return ret; } device_initcall(imx8_soc_init); +MODULE_DESCRIPTION("NXP i.MX8M SoC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c index 35825ee95dff..34a6f187c220 100644 --- a/drivers/soc/ixp4xx/ixp4xx-npe.c +++ b/drivers/soc/ixp4xx/ixp4xx-npe.c @@ -764,6 +764,7 @@ static struct platform_driver ixp4xx_npe_driver = { module_platform_driver(ixp4xx_npe_driver); MODULE_AUTHOR("Krzysztof Halasa"); +MODULE_DESCRIPTION("Intel IXP4xx Network Processor Engine driver"); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(NPE_A_FIRMWARE); MODULE_FIRMWARE(NPE_B_FIRMWARE); diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c index 244ad8d7e80b..cb112f3643e9 100644 --- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c +++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c @@ -465,6 +465,7 @@ static struct platform_driver ixp4xx_qmgr_driver = { }; module_platform_driver(ixp4xx_qmgr_driver); +MODULE_DESCRIPTION("Intel IXP4xx Queue Manager driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Krzysztof Halasa"); diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig index e6ba3573a772..f3f869639588 100644 --- a/drivers/soc/litex/Kconfig +++ b/drivers/soc/litex/Kconfig @@ -7,7 +7,7 @@ config LITEX config LITEX_SOC_CONTROLLER tristate "Enable LiteX SoC Controller driver" - depends on OF || COMPILE_TEST + depends on OF depends on HAS_IOMEM select LITEX help diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c index 10813299aa10..72c44119dd54 100644 --- a/drivers/soc/litex/litex_soc_ctrl.c +++ b/drivers/soc/litex/litex_soc_ctrl.c @@ -82,13 +82,11 @@ static int litex_reset_handler(struct notifier_block *this, unsigned long mode, return NOTIFY_DONE; } -#ifdef CONFIG_OF static const struct of_device_id litex_soc_ctrl_of_match[] = { {.compatible = "litex,soc-controller"}, {}, }; MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match); -#endif /* CONFIG_OF */ static int litex_soc_ctrl_probe(struct platform_device *pdev) { @@ -130,7 +128,7 @@ static void litex_soc_ctrl_remove(struct platform_device *pdev) static struct platform_driver litex_soc_ctrl_driver = { .driver = { .name = "litex-soc-controller", - .of_match_table = of_match_ptr(litex_soc_ctrl_of_match) + .of_match_table = litex_soc_ctrl_of_match, }, .probe = litex_soc_ctrl_probe, .remove_new = litex_soc_ctrl_remove, diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 046522664dc1..a8fccedba83f 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -15,6 +15,7 @@ /* dedicate the last GPR_R15 to assign the register address to be poll */ #define CMDQ_POLL_ADDR_GPR (15) #define CMDQ_EOC_IRQ_EN BIT(0) +#define CMDQ_IMMEDIATE_VALUE 0 #define CMDQ_REG_TYPE 1 #define CMDQ_JUMP_RELATIVE 0 #define CMDQ_JUMP_ABSOLUTE 1 @@ -45,6 +46,16 @@ struct cmdq_instruction { u8 op; }; +static inline u8 cmdq_operand_get_type(struct cmdq_operand *op) +{ + return op->reg ? CMDQ_REG_TYPE : CMDQ_IMMEDIATE_VALUE; +} + +static inline u16 cmdq_operand_get_idx_value(struct cmdq_operand *op) +{ + return op->reg ? op->idx : op->value; +} + int cmdq_dev_get_client_reg(struct device *dev, struct cmdq_client_reg *client_reg, int idx) { @@ -461,6 +472,29 @@ int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mas } EXPORT_SYMBOL(cmdq_pkt_poll_addr); +int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx, + struct cmdq_operand *left_operand, + enum cmdq_logic_op s_op, + struct cmdq_operand *right_operand) +{ + struct cmdq_instruction inst = { {0} }; + + if (!left_operand || !right_operand || s_op >= CMDQ_LOGIC_MAX) + return -EINVAL; + + inst.op = CMDQ_CODE_LOGIC; + inst.dst_t = CMDQ_REG_TYPE; + inst.src_t = cmdq_operand_get_type(left_operand); + inst.arg_c_t = cmdq_operand_get_type(right_operand); + inst.sop = s_op; + inst.reg_dst = result_reg_idx; + inst.src_reg = cmdq_operand_get_idx_value(left_operand); + inst.arg_c = cmdq_operand_get_idx_value(right_operand); + + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_logic_command); + int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) { struct cmdq_instruction inst = {}; @@ -526,4 +560,5 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt) } EXPORT_SYMBOL(cmdq_pkt_finalize); +MODULE_DESCRIPTION("MediaTek Command Queue (CMDQ) driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c index f370f4ec4b88..938240714e54 100644 --- a/drivers/soc/mediatek/mtk-mmsys.c +++ b/drivers/soc/mediatek/mtk-mmsys.c @@ -236,6 +236,7 @@ void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_ALPHA + (idx - 1) * 4, ~0, alpha << 16 | alpha, cmdq_pkt); + mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(15 + idx), 0, cmdq_pkt); mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(19 + idx), alpha_sel << (19 + idx), cmdq_pkt); mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4, diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index b5af1fb5847e..01b129caf1eb 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -524,6 +524,7 @@ static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_PAD0] = MT8195_MUTEX_MOD_MDP_PAD0, [MUTEX_MOD_IDX_MDP_PAD2] = MT8195_MUTEX_MOD_MDP_PAD2, [MUTEX_MOD_IDX_MDP_PAD3] = MT8195_MUTEX_MOD_MDP_PAD3, + [MUTEX_MOD_IDX_MDP_TCC0] = MT8195_MUTEX_MOD_MDP_TCC0, [MUTEX_MOD_IDX_MDP_WROT0] = MT8195_MUTEX_MOD_MDP_WROT0, [MUTEX_MOD_IDX_MDP_WROT2] = MT8195_MUTEX_MOD_MDP_WROT2, [MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3, diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 5af33b0e3470..7f02f0525933 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -72,11 +72,28 @@ config QCOM_OCMEM requirements. This is typically used by the GPU, camera/video, and audio components on some Snapdragon SoCs. +config QCOM_PD_MAPPER + tristate "Qualcomm Protection Domain Mapper" + select QCOM_QMI_HELPERS + select QCOM_PDR_MSG + select AUXILIARY_BUS + depends on NET && QRTR + default QCOM_RPROC_COMMON + help + The Protection Domain Mapper maps registered services to the domains + and instances handled by the remote DSPs. This is a kernel-space + implementation of the service. It is a simpler alternative to the + userspace daemon. + config QCOM_PDR_HELPERS tristate select QCOM_QMI_HELPERS + select QCOM_PDR_MSG depends on NET +config QCOM_PDR_MSG + tristate + config QCOM_PMIC_PDCHARGER_ULOG tristate "Qualcomm PMIC PDCharger ULOG driver" depends on RPMSG @@ -194,6 +211,7 @@ config QCOM_SMP2P config QCOM_SMSM tristate "Qualcomm Shared Memory State Machine" + depends on MAILBOX depends on QCOM_SMEM select QCOM_SMEM_STATE select IRQ_DOMAIN diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index ca0bece0dfff..d3560f861085 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -7,7 +7,9 @@ obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_OCMEM) += ocmem.o +obj-$(CONFIG_QCOM_PD_MAPPER) += qcom_pd_mapper.o obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o +obj-$(CONFIG_QCOM_PDR_MSG) += qcom_pdr_msg.o obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o obj-$(CONFIG_QCOM_PMIC_PDCHARGER_ULOG) += pmic_pdcharger_ulog.o diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c index fb323b3364db..e7851974084b 100644 --- a/drivers/soc/qcom/icc-bwmon.c +++ b/drivers/soc/qcom/icc-bwmon.c @@ -565,7 +565,7 @@ static void bwmon_start(struct icc_bwmon *bwmon) int window; /* No need to check for errors, as this must have succeeded before. */ - dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0); + dev_pm_opp_put(dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0)); bwmon_clear_counters(bwmon, true); @@ -772,18 +772,25 @@ static int bwmon_probe(struct platform_device *pdev) opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); if (IS_ERR(opp)) return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n"); + dev_pm_opp_put(opp); bwmon->min_bw_kbps = 0; opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); if (IS_ERR(opp)) return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n"); + dev_pm_opp_put(opp); bwmon->dev = dev; bwmon_disable(bwmon); - ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, - bwmon_intr_thread, - IRQF_ONESHOT, dev_name(dev), bwmon); + + /* + * SoCs with multiple cpu-bwmon instances can end up using a shared interrupt + * line. Using the devm_ variant might result in the IRQ handler being executed + * after bwmon_disable in bwmon_remove() + */ + ret = request_threaded_irq(bwmon->irq, bwmon_intr, bwmon_intr_thread, + IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), bwmon); if (ret) return dev_err_probe(dev, ret, "failed to request IRQ\n"); @@ -798,6 +805,7 @@ static void bwmon_remove(struct platform_device *pdev) struct icc_bwmon *bwmon = platform_get_drvdata(pdev); bwmon_disable(bwmon); + free_irq(bwmon->irq, bwmon); } static const struct icc_bwmon_data msm8998_bwmon_data = { diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index cbef0dea1d5d..37e11e501728 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -7,6 +7,7 @@ #include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/bitops.h> +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/io.h> #include <linux/kernel.h> @@ -150,6 +151,25 @@ enum llcc_reg_offset { LLCC_COMMON_STATUS0, }; +static const struct llcc_slice_config sa8775p_data[] = { + {LLCC_CPUSS, 1, 2048, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 1, 0, 0}, + {LLCC_VIDSC0, 2, 512, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_CPUSS1, 3, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_CPUHWT, 5, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_AUDIO, 6, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CMPT, 10, 4096, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_GPUHTW, 11, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_GPU, 12, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 1, 0}, + {LLCC_MMUHWT, 13, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0}, + {LLCC_CMPTDMA, 15, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_DISP, 16, 4096, 2, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_VIDFW, 17, 3072, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_AUDHW, 22, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CVP, 28, 256, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0}, + {LLCC_WRCACHE, 31, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0}, +}; + static const struct llcc_slice_config sc7180_data[] = { { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 }, { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 }, @@ -552,6 +572,16 @@ static const struct qcom_llcc_config qdu1000_cfg[] = { }, }; +static const struct qcom_llcc_config sa8775p_cfg[] = { + { + .sct_data = sa8775p_data, + .size = ARRAY_SIZE(sa8775p_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v2_1_reg_offset, + .edac_reg_offset = &llcc_v2_1_edac_reg_offset, + }, +}; + static const struct qcom_llcc_config sc7180_cfg[] = { { .sct_data = sc7180_data, @@ -698,6 +728,11 @@ static const struct qcom_sct_config qdu1000_cfgs = { .num_config = ARRAY_SIZE(qdu1000_cfg), }; +static const struct qcom_sct_config sa8775p_cfgs = { + .llcc_config = sa8775p_cfg, + .num_config = ARRAY_SIZE(sa8775p_cfg), +}; + static const struct qcom_sct_config sc7180_cfgs = { .llcc_config = sc7180_cfg, .num_config = ARRAY_SIZE(sc7180_cfg), @@ -821,6 +856,7 @@ EXPORT_SYMBOL_GPL(llcc_slice_putd); static int llcc_update_act_ctrl(u32 sid, u32 act_ctrl_reg_val, u32 status) { + struct regmap *regmap; u32 act_ctrl_reg; u32 act_clear_reg; u32 status_reg; @@ -849,7 +885,8 @@ static int llcc_update_act_ctrl(u32 sid, return ret; if (drv_data->version >= LLCC_VERSION_4_1_0_0) { - ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg, + regmap = drv_data->bcast_and_regmap ?: drv_data->bcast_regmap; + ret = regmap_read_poll_timeout(regmap, status_reg, slice_status, (slice_status & ACT_COMPLETE), 0, LLCC_STATUS_READ_DELAY); if (ret) @@ -1258,16 +1295,13 @@ static int qcom_llcc_probe(struct platform_device *pdev) /* Initialize rest of LLCC bank regmaps */ for (i = 1; i < num_banks; i++) { - char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i); + char *base __free(kfree) = kasprintf(GFP_KERNEL, "llcc%d_base", i); drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base); if (IS_ERR(drv_data->regmaps[i])) { ret = PTR_ERR(drv_data->regmaps[i]); - kfree(base); goto err; } - - kfree(base); } drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base"); @@ -1284,6 +1318,18 @@ static int qcom_llcc_probe(struct platform_device *pdev) drv_data->version = version; + /* Applicable only when drv_data->version >= 4.1 */ + if (drv_data->version >= LLCC_VERSION_4_1_0_0) { + drv_data->bcast_and_regmap = qcom_llcc_init_mmio(pdev, i + 1, "llcc_broadcast_and_base"); + if (IS_ERR(drv_data->bcast_and_regmap)) { + ret = PTR_ERR(drv_data->bcast_and_regmap); + if (ret == -EINVAL) + drv_data->bcast_and_regmap = NULL; + else + goto err; + } + } + llcc_cfg = cfg->sct_data; sz = cfg->size; @@ -1332,6 +1378,7 @@ err: static const struct of_device_id qcom_llcc_of_match[] = { { .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs}, + { .compatible = "qcom,sa8775p-llcc", .data = &sa8775p_cfgs }, { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfgs }, { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfgs }, { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfgs }, diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 6f177e46fa0f..b2c0fb55d4ae 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -7,6 +7,7 @@ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. */ +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/elf.h> #include <linux/firmware.h> @@ -37,13 +38,12 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, { const struct elf32_phdr *phdr = &phdrs[segment]; const struct firmware *seg_fw; - char *seg_name; ssize_t ret; if (strlen(fw_name) < 4) return -EINVAL; - seg_name = kstrdup(fw_name, GFP_KERNEL); + char *seg_name __free(kfree) = kstrdup(fw_name, GFP_KERNEL); if (!seg_name) return -ENOMEM; @@ -52,7 +52,6 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, ptr, phdr->p_filesz); if (ret) { dev_err(dev, "error %zd loading %s\n", ret, seg_name); - kfree(seg_name); return ret; } @@ -64,7 +63,6 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, } release_firmware(seg_fw); - kfree(seg_name); return ret; } diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index e8841d247953..6b6dd80cbc0f 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -10,6 +10,7 @@ */ #include <linux/bitfield.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/kernel.h> @@ -216,7 +217,6 @@ EXPORT_SYMBOL_GPL(of_get_ocmem); struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, unsigned long size) { - struct ocmem_buf *buf; int ret; /* TODO: add support for other clients... */ @@ -229,7 +229,7 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations)) return ERR_PTR(-EBUSY); - buf = kzalloc(sizeof(*buf), GFP_KERNEL); + struct ocmem_buf *buf __free(kfree) = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_unlock; @@ -247,7 +247,7 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, if (ret) { dev_err(ocmem->dev, "could not lock: %d\n", ret); ret = -EINVAL; - goto err_kfree; + goto err_unlock; } } else { ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset); @@ -258,10 +258,8 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n", size / 1024, buf->addr, client); - return buf; + return_ptr(buf); -err_kfree: - kfree(buf); err_unlock: clear_bit_unlock(BIT(client), &ocmem->active_allocations); diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index a1b6a4081dea..328b6153b2be 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -3,6 +3,7 @@ * Copyright (C) 2020 The Linux Foundation. All rights reserved. */ +#include <linux/cleanup.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -76,12 +77,12 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, locator_hdl); struct pdr_service *pds; + mutex_lock(&pdr->lock); /* Create a local client port for QMI communication */ pdr->locator_addr.sq_family = AF_QIPCRTR; pdr->locator_addr.sq_node = svc->node; pdr->locator_addr.sq_port = svc->port; - mutex_lock(&pdr->lock); pdr->locator_init_complete = true; mutex_unlock(&pdr->lock); @@ -104,10 +105,10 @@ static void pdr_locator_del_server(struct qmi_handle *qmi, mutex_lock(&pdr->lock); pdr->locator_init_complete = false; - mutex_unlock(&pdr->lock); pdr->locator_addr.sq_node = 0; pdr->locator_addr.sq_port = 0; + mutex_unlock(&pdr->lock); } static const struct qmi_ops pdr_locator_ops = { @@ -365,12 +366,14 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, if (ret < 0) return ret; + mutex_lock(&pdr->lock); ret = qmi_send_request(&pdr->locator_hdl, &pdr->locator_addr, &txn, SERVREG_GET_DOMAIN_LIST_REQ, SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, servreg_get_domain_list_req_ei, req); + mutex_unlock(&pdr->lock); if (ret < 0) { qmi_txn_cancel(&txn); return ret; @@ -394,13 +397,13 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) { - struct servreg_get_domain_list_resp *resp; struct servreg_get_domain_list_req req; struct servreg_location_entry *entry; int domains_read = 0; int ret, i; - resp = kzalloc(sizeof(*resp), GFP_KERNEL); + struct servreg_get_domain_list_resp *resp __free(kfree) = kzalloc(sizeof(*resp), + GFP_KERNEL); if (!resp) return -ENOMEM; @@ -413,9 +416,9 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) req.domain_offset = domains_read; ret = pdr_get_domain_list(&req, resp, pdr); if (ret < 0) - goto out; + return ret; - for (i = domains_read; i < resp->domain_list_len; i++) { + for (i = 0; i < resp->domain_list_len; i++) { entry = &resp->domain_list[i]; if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name)) @@ -425,7 +428,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) pds->service_data_valid = entry->service_data_valid; pds->service_data = entry->service_data; pds->instance = entry->instance; - goto out; + return 0; } } @@ -438,8 +441,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) domains_read += resp->domain_list_len; } while (domains_read < resp->total_domains); -out: - kfree(resp); + return ret; } @@ -515,8 +517,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, const char *service_name, const char *service_path) { - struct pdr_service *pds, *tmp; - int ret; + struct pdr_service *tmp; if (IS_ERR_OR_NULL(pdr)) return ERR_PTR(-EINVAL); @@ -525,7 +526,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, !service_path || strlen(service_path) > SERVREG_NAME_LENGTH) return ERR_PTR(-EINVAL); - pds = kzalloc(sizeof(*pds), GFP_KERNEL); + struct pdr_service *pds __free(kfree) = kzalloc(sizeof(*pds), GFP_KERNEL); if (!pds) return ERR_PTR(-ENOMEM); @@ -540,8 +541,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, continue; mutex_unlock(&pdr->list_lock); - ret = -EALREADY; - goto err; + return ERR_PTR(-EALREADY); } list_add(&pds->node, &pdr->lookups); @@ -549,10 +549,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, schedule_work(&pdr->locator_work); - return pds; -err: - kfree(pds); - return ERR_PTR(ret); + return_ptr(pds); } EXPORT_SYMBOL_GPL(pdr_add_lookup); @@ -649,13 +646,12 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state, char *service_path, void *priv), void *priv) { - struct pdr_handle *pdr; int ret; if (!status) return ERR_PTR(-EINVAL); - pdr = kzalloc(sizeof(*pdr), GFP_KERNEL); + struct pdr_handle *pdr __free(kfree) = kzalloc(sizeof(*pdr), GFP_KERNEL); if (!pdr) return ERR_PTR(-ENOMEM); @@ -674,10 +670,8 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state, INIT_WORK(&pdr->indack_work, pdr_indack_work); pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq"); - if (!pdr->notifier_wq) { - ret = -ENOMEM; - goto free_pdr_handle; - } + if (!pdr->notifier_wq) + return ERR_PTR(-ENOMEM); pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI); if (!pdr->indack_wq) { @@ -702,7 +696,7 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state, if (ret < 0) goto release_qmi_handle; - return pdr; + return_ptr(pdr); release_qmi_handle: qmi_handle_release(&pdr->locator_hdl); @@ -710,8 +704,6 @@ destroy_indack: destroy_workqueue(pdr->indack_wq); destroy_notifier: destroy_workqueue(pdr->notifier_wq); -free_pdr_handle: - kfree(pdr); return ERR_PTR(ret); } diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h index 03c282b7f17e..8d17f7fb79e7 100644 --- a/drivers/soc/qcom/pdr_internal.h +++ b/drivers/soc/qcom/pdr_internal.h @@ -13,6 +13,8 @@ #define SERVREG_SET_ACK_REQ 0x23 #define SERVREG_RESTART_PD_REQ 0x24 +#define SERVREG_LOC_PFR_REQ 0x24 + #define SERVREG_DOMAIN_LIST_LENGTH 32 #define SERVREG_RESTART_PD_REQ_MAX_LEN 67 #define SERVREG_REGISTER_LISTENER_REQ_LEN 71 @@ -20,6 +22,7 @@ #define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74 #define SERVREG_STATE_UPDATED_IND_MAX_LEN 79 #define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389 +#define SERVREG_LOC_PFR_RESP_MAX_LEN 10 struct servreg_location_entry { char name[SERVREG_NAME_LENGTH + 1]; @@ -28,83 +31,12 @@ struct servreg_location_entry { u32 instance; }; -static const struct qmi_elem_info servreg_location_entry_ei[] = { - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0, - .offset = offsetof(struct servreg_location_entry, - name), - }, - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(u32), - .array_type = NO_ARRAY, - .tlv_type = 0, - .offset = offsetof(struct servreg_location_entry, - instance), - }, - { - .data_type = QMI_UNSIGNED_1_BYTE, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0, - .offset = offsetof(struct servreg_location_entry, - service_data_valid), - }, - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(u32), - .array_type = NO_ARRAY, - .tlv_type = 0, - .offset = offsetof(struct servreg_location_entry, - service_data), - }, - {} -}; - struct servreg_get_domain_list_req { char service_name[SERVREG_NAME_LENGTH + 1]; u8 domain_offset_valid; u32 domain_offset; }; -static const struct qmi_elem_info servreg_get_domain_list_req_ei[] = { - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_get_domain_list_req, - service_name), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_get_domain_list_req, - domain_offset_valid), - }, - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(u32), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_get_domain_list_req, - domain_offset), - }, - {} -}; - struct servreg_get_domain_list_resp { struct qmi_response_type_v01 resp; u8 total_domains_valid; @@ -116,264 +48,60 @@ struct servreg_get_domain_list_resp { struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH]; }; -static const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { - { - .data_type = QMI_STRUCT, - .elem_len = 1, - .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_get_domain_list_resp, - resp), - .ei_array = qmi_response_type_v01_ei, - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_get_domain_list_resp, - total_domains_valid), - }, - { - .data_type = QMI_UNSIGNED_2_BYTE, - .elem_len = 1, - .elem_size = sizeof(u16), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_get_domain_list_resp, - total_domains), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x11, - .offset = offsetof(struct servreg_get_domain_list_resp, - db_rev_count_valid), - }, - { - .data_type = QMI_UNSIGNED_2_BYTE, - .elem_len = 1, - .elem_size = sizeof(u16), - .array_type = NO_ARRAY, - .tlv_type = 0x11, - .offset = offsetof(struct servreg_get_domain_list_resp, - db_rev_count), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x12, - .offset = offsetof(struct servreg_get_domain_list_resp, - domain_list_valid), - }, - { - .data_type = QMI_DATA_LEN, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x12, - .offset = offsetof(struct servreg_get_domain_list_resp, - domain_list_len), - }, - { - .data_type = QMI_STRUCT, - .elem_len = SERVREG_DOMAIN_LIST_LENGTH, - .elem_size = sizeof(struct servreg_location_entry), - .array_type = VAR_LEN_ARRAY, - .tlv_type = 0x12, - .offset = offsetof(struct servreg_get_domain_list_resp, - domain_list), - .ei_array = servreg_location_entry_ei, - }, - {} -}; - struct servreg_register_listener_req { u8 enable; char service_path[SERVREG_NAME_LENGTH + 1]; }; -static const struct qmi_elem_info servreg_register_listener_req_ei[] = { - { - .data_type = QMI_UNSIGNED_1_BYTE, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_register_listener_req, - enable), - }, - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_register_listener_req, - service_path), - }, - {} -}; - struct servreg_register_listener_resp { struct qmi_response_type_v01 resp; u8 curr_state_valid; enum servreg_service_state curr_state; }; -static const struct qmi_elem_info servreg_register_listener_resp_ei[] = { - { - .data_type = QMI_STRUCT, - .elem_len = 1, - .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_register_listener_resp, - resp), - .ei_array = qmi_response_type_v01_ei, - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_register_listener_resp, - curr_state_valid), - }, - { - .data_type = QMI_SIGNED_4_BYTE_ENUM, - .elem_len = 1, - .elem_size = sizeof(enum servreg_service_state), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_register_listener_resp, - curr_state), - }, - {} -}; - struct servreg_restart_pd_req { char service_path[SERVREG_NAME_LENGTH + 1]; }; -static const struct qmi_elem_info servreg_restart_pd_req_ei[] = { - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_restart_pd_req, - service_path), - }, - {} -}; - struct servreg_restart_pd_resp { struct qmi_response_type_v01 resp; }; -static const struct qmi_elem_info servreg_restart_pd_resp_ei[] = { - { - .data_type = QMI_STRUCT, - .elem_len = 1, - .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_restart_pd_resp, - resp), - .ei_array = qmi_response_type_v01_ei, - }, - {} -}; - struct servreg_state_updated_ind { enum servreg_service_state curr_state; char service_path[SERVREG_NAME_LENGTH + 1]; u16 transaction_id; }; -static const struct qmi_elem_info servreg_state_updated_ind_ei[] = { - { - .data_type = QMI_SIGNED_4_BYTE_ENUM, - .elem_len = 1, - .elem_size = sizeof(u32), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_state_updated_ind, - curr_state), - }, - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_state_updated_ind, - service_path), - }, - { - .data_type = QMI_UNSIGNED_2_BYTE, - .elem_len = 1, - .elem_size = sizeof(u16), - .array_type = NO_ARRAY, - .tlv_type = 0x03, - .offset = offsetof(struct servreg_state_updated_ind, - transaction_id), - }, - {} -}; - struct servreg_set_ack_req { char service_path[SERVREG_NAME_LENGTH + 1]; u16 transaction_id; }; -static const struct qmi_elem_info servreg_set_ack_req_ei[] = { - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_set_ack_req, - service_path), - }, - { - .data_type = QMI_UNSIGNED_2_BYTE, - .elem_len = 1, - .elem_size = sizeof(u16), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_set_ack_req, - transaction_id), - }, - {} -}; - struct servreg_set_ack_resp { struct qmi_response_type_v01 resp; }; -static const struct qmi_elem_info servreg_set_ack_resp_ei[] = { - { - .data_type = QMI_STRUCT, - .elem_len = 1, - .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_set_ack_resp, - resp), - .ei_array = qmi_response_type_v01_ei, - }, - {} +struct servreg_loc_pfr_req { + char service[SERVREG_NAME_LENGTH + 1]; + char reason[257]; }; +struct servreg_loc_pfr_resp { + struct qmi_response_type_v01 rsp; +}; + +extern const struct qmi_elem_info servreg_location_entry_ei[]; +extern const struct qmi_elem_info servreg_get_domain_list_req_ei[]; +extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[]; +extern const struct qmi_elem_info servreg_register_listener_req_ei[]; +extern const struct qmi_elem_info servreg_register_listener_resp_ei[]; +extern const struct qmi_elem_info servreg_restart_pd_req_ei[]; +extern const struct qmi_elem_info servreg_restart_pd_resp_ei[]; +extern const struct qmi_elem_info servreg_state_updated_ind_ei[]; +extern const struct qmi_elem_info servreg_set_ack_req_ei[]; +extern const struct qmi_elem_info servreg_set_ack_resp_ei[]; +extern const struct qmi_elem_info servreg_loc_pfr_req_ei[]; +extern const struct qmi_elem_info servreg_loc_pfr_resp_ei[]; + #endif diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index 40fb09d69014..9ebc0ba35947 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -348,11 +348,15 @@ static void pmic_glink_remove(struct platform_device *pdev) mutex_unlock(&__pmic_glink_lock); } +static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | + BIT(PMIC_GLINK_CLIENT_ALTMODE); + static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | BIT(PMIC_GLINK_CLIENT_ALTMODE) | BIT(PMIC_GLINK_CLIENT_UCSI); static const struct of_device_id pmic_glink_of_match[] = { + { .compatible = "qcom,sc8280xp-pmic-glink", .data = &pmic_glink_sc8280xp_client_mask }, { .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask }, {} }; @@ -369,8 +373,17 @@ static struct platform_driver pmic_glink_driver = { static int pmic_glink_init(void) { - platform_driver_register(&pmic_glink_driver); - register_rpmsg_driver(&pmic_glink_rpmsg_driver); + int ret; + + ret = platform_driver_register(&pmic_glink_driver); + if (ret < 0) + return ret; + + ret = register_rpmsg_driver(&pmic_glink_rpmsg_driver); + if (ret < 0) { + platform_driver_unregister(&pmic_glink_driver); + return ret; + } return 0; } diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c index b3808fc24c69..1e0808b3cb93 100644 --- a/drivers/soc/qcom/pmic_glink_altmode.c +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -20,7 +20,7 @@ #include <linux/soc/qcom/pmic_glink.h> -#define PMIC_GLINK_MAX_PORTS 2 +#define PMIC_GLINK_MAX_PORTS 3 #define USBC_SC8180X_NOTIFY_IND 0x13 #define USBC_CMD_WRITE_REQ 0x15 diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c new file mode 100644 index 000000000000..a4c007080665 --- /dev/null +++ b/drivers/soc/qcom/qcom_pd_mapper.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm Protection Domain mapper + * + * Copyright (c) 2023 Linaro Ltd. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/refcount.h> +#include <linux/slab.h> +#include <linux/soc/qcom/qmi.h> + +#include "pdr_internal.h" + +#define SERVREG_QMI_VERSION 0x101 +#define SERVREG_QMI_INSTANCE 0 + +#define TMS_SERVREG_SERVICE "tms/servreg" + +struct qcom_pdm_domain_data { + const char *domain; + u32 instance_id; + /* NULL-terminated array */ + const char * services[]; +}; + +struct qcom_pdm_domain { + struct list_head list; + const char *name; + u32 instance_id; +}; + +struct qcom_pdm_service { + struct list_head list; + struct list_head domains; + const char *name; +}; + +struct qcom_pdm_data { + refcount_t refcnt; + struct qmi_handle handle; + struct list_head services; +}; + +static DEFINE_MUTEX(qcom_pdm_mutex); /* protects __qcom_pdm_data */ +static struct qcom_pdm_data *__qcom_pdm_data; + +static struct qcom_pdm_service *qcom_pdm_find(struct qcom_pdm_data *data, + const char *name) +{ + struct qcom_pdm_service *service; + + list_for_each_entry(service, &data->services, list) { + if (!strcmp(service->name, name)) + return service; + } + + return NULL; +} + +static int qcom_pdm_add_service_domain(struct qcom_pdm_data *data, + const char *service_name, + const char *domain_name, + u32 instance_id) +{ + struct qcom_pdm_service *service; + struct qcom_pdm_domain *domain; + + service = qcom_pdm_find(data, service_name); + if (service) { + list_for_each_entry(domain, &service->domains, list) { + if (!strcmp(domain->name, domain_name)) + return -EBUSY; + } + } else { + service = kzalloc(sizeof(*service), GFP_KERNEL); + if (!service) + return -ENOMEM; + + INIT_LIST_HEAD(&service->domains); + service->name = service_name; + + list_add_tail(&service->list, &data->services); + } + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) { + if (list_empty(&service->domains)) { + list_del(&service->list); + kfree(service); + } + + return -ENOMEM; + } + + domain->name = domain_name; + domain->instance_id = instance_id; + list_add_tail(&domain->list, &service->domains); + + return 0; +} + +static int qcom_pdm_add_domain(struct qcom_pdm_data *data, + const struct qcom_pdm_domain_data *domain) +{ + int ret; + int i; + + ret = qcom_pdm_add_service_domain(data, + TMS_SERVREG_SERVICE, + domain->domain, + domain->instance_id); + if (ret) + return ret; + + for (i = 0; domain->services[i]; i++) { + ret = qcom_pdm_add_service_domain(data, + domain->services[i], + domain->domain, + domain->instance_id); + if (ret) + return ret; + } + + return 0; + +} + +static void qcom_pdm_free_domains(struct qcom_pdm_data *data) +{ + struct qcom_pdm_service *service, *tservice; + struct qcom_pdm_domain *domain, *tdomain; + + list_for_each_entry_safe(service, tservice, &data->services, list) { + list_for_each_entry_safe(domain, tdomain, &service->domains, list) { + list_del(&domain->list); + kfree(domain); + } + + list_del(&service->list); + kfree(service); + } +} + +static void qcom_pdm_get_domain_list(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ + struct qcom_pdm_data *data = container_of(qmi, struct qcom_pdm_data, handle); + const struct servreg_get_domain_list_req *req = decoded; + struct servreg_get_domain_list_resp *rsp; + struct qcom_pdm_service *service; + u32 offset; + int ret; + + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); + if (!rsp) + return; + + offset = req->domain_offset_valid ? req->domain_offset : 0; + + rsp->resp.result = QMI_RESULT_SUCCESS_V01; + rsp->resp.error = QMI_ERR_NONE_V01; + + rsp->db_rev_count_valid = true; + rsp->db_rev_count = 1; + + rsp->total_domains_valid = true; + rsp->total_domains = 0; + + mutex_lock(&qcom_pdm_mutex); + + service = qcom_pdm_find(data, req->service_name); + if (service) { + struct qcom_pdm_domain *domain; + + rsp->domain_list_valid = true; + rsp->domain_list_len = 0; + + list_for_each_entry(domain, &service->domains, list) { + u32 i = rsp->total_domains++; + + if (i >= offset && i < SERVREG_DOMAIN_LIST_LENGTH) { + u32 j = rsp->domain_list_len++; + + strscpy(rsp->domain_list[j].name, domain->name, + sizeof(rsp->domain_list[i].name)); + rsp->domain_list[j].instance = domain->instance_id; + + pr_debug("PDM: found %s / %d\n", domain->name, + domain->instance_id); + } + } + } + + pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->service_name, + req->domain_offset_valid ? req->domain_offset : -1, rsp->domain_list_len, rsp->total_domains); + + ret = qmi_send_response(qmi, sq, txn, SERVREG_GET_DOMAIN_LIST_REQ, + SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN, + servreg_get_domain_list_resp_ei, rsp); + if (ret) + pr_err("Error sending servreg response: %d\n", ret); + + mutex_unlock(&qcom_pdm_mutex); + + kfree(rsp); +} + +static void qcom_pdm_pfr(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ + const struct servreg_loc_pfr_req *req = decoded; + struct servreg_loc_pfr_resp rsp = {}; + int ret; + + pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason); + + rsp.rsp.result = QMI_RESULT_SUCCESS_V01; + rsp.rsp.error = QMI_ERR_NONE_V01; + + ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR_REQ, + SERVREG_LOC_PFR_RESP_MAX_LEN, + servreg_loc_pfr_resp_ei, &rsp); + if (ret) + pr_err("Error sending servreg response: %d\n", ret); +} + +static const struct qmi_msg_handler qcom_pdm_msg_handlers[] = { + { + .type = QMI_REQUEST, + .msg_id = SERVREG_GET_DOMAIN_LIST_REQ, + .ei = servreg_get_domain_list_req_ei, + .decoded_size = sizeof(struct servreg_get_domain_list_req), + .fn = qcom_pdm_get_domain_list, + }, + { + .type = QMI_REQUEST, + .msg_id = SERVREG_LOC_PFR_REQ, + .ei = servreg_loc_pfr_req_ei, + .decoded_size = sizeof(struct servreg_loc_pfr_req), + .fn = qcom_pdm_pfr, + }, + { }, +}; + +static const struct qcom_pdm_domain_data adsp_audio_pd = { + .domain = "msm/adsp/audio_pd", + .instance_id = 74, + .services = { + "avs/audio", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data adsp_charger_pd = { + .domain = "msm/adsp/charger_pd", + .instance_id = 74, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data adsp_root_pd = { + .domain = "msm/adsp/root_pd", + .instance_id = 74, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data adsp_root_pd_pdr = { + .domain = "msm/adsp/root_pd", + .instance_id = 74, + .services = { + "tms/pdr_enabled", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data adsp_sensor_pd = { + .domain = "msm/adsp/sensor_pd", + .instance_id = 74, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data msm8996_adsp_audio_pd = { + .domain = "msm/adsp/audio_pd", + .instance_id = 4, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data msm8996_adsp_root_pd = { + .domain = "msm/adsp/root_pd", + .instance_id = 4, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data cdsp_root_pd = { + .domain = "msm/cdsp/root_pd", + .instance_id = 76, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data slpi_root_pd = { + .domain = "msm/slpi/root_pd", + .instance_id = 90, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data slpi_sensor_pd = { + .domain = "msm/slpi/sensor_pd", + .instance_id = 90, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data mpss_root_pd = { + .domain = "msm/modem/root_pd", + .instance_id = 180, + .services = { + NULL, + }, +}; + +static const struct qcom_pdm_domain_data mpss_root_pd_gps = { + .domain = "msm/modem/root_pd", + .instance_id = 180, + .services = { + "gps/gps_service", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data mpss_root_pd_gps_pdr = { + .domain = "msm/modem/root_pd", + .instance_id = 180, + .services = { + "gps/gps_service", + "tms/pdr_enabled", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data msm8996_mpss_root_pd = { + .domain = "msm/modem/root_pd", + .instance_id = 100, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data mpss_wlan_pd = { + .domain = "msm/modem/wlan_pd", + .instance_id = 180, + .services = { + "kernel/elf_loader", + "wlan/fw", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data *msm8996_domains[] = { + &msm8996_adsp_audio_pd, + &msm8996_adsp_root_pd, + &msm8996_mpss_root_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *msm8998_domains[] = { + &mpss_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *qcm2290_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *qcs404_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sc7180_domains[] = { + &adsp_audio_pd, + &adsp_root_pd_pdr, + &adsp_sensor_pd, + &mpss_root_pd_gps_pdr, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sc7280_domains[] = { + &adsp_audio_pd, + &adsp_root_pd_pdr, + &adsp_charger_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd_gps_pdr, + NULL, +}; + +static const struct qcom_pdm_domain_data *sc8180x_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_charger_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sc8280xp_domains[] = { + &adsp_audio_pd, + &adsp_root_pd_pdr, + &adsp_charger_pd, + &cdsp_root_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sdm660_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sdm670_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &cdsp_root_pd, + &mpss_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sdm845_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &cdsp_root_pd, + &mpss_root_pd, + &mpss_wlan_pd, + &slpi_root_pd, + &slpi_sensor_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm6115_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm6350_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm8150_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm8250_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &cdsp_root_pd, + &slpi_root_pd, + &slpi_sensor_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm8350_domains[] = { + &adsp_audio_pd, + &adsp_root_pd_pdr, + &adsp_charger_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &slpi_root_pd, + &slpi_sensor_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm8550_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_charger_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + NULL, +}; + +static const struct of_device_id qcom_pdm_domains[] = { + { .compatible = "qcom,apq8064", .data = NULL, }, + { .compatible = "qcom,apq8074", .data = NULL, }, + { .compatible = "qcom,apq8084", .data = NULL, }, + { .compatible = "qcom,apq8096", .data = msm8996_domains, }, + { .compatible = "qcom,msm8226", .data = NULL, }, + { .compatible = "qcom,msm8974", .data = NULL, }, + { .compatible = "qcom,msm8996", .data = msm8996_domains, }, + { .compatible = "qcom,msm8998", .data = msm8998_domains, }, + { .compatible = "qcom,qcm2290", .data = qcm2290_domains, }, + { .compatible = "qcom,qcs404", .data = qcs404_domains, }, + { .compatible = "qcom,sc7180", .data = sc7180_domains, }, + { .compatible = "qcom,sc7280", .data = sc7280_domains, }, + { .compatible = "qcom,sc8180x", .data = sc8180x_domains, }, + { .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, }, + { .compatible = "qcom,sda660", .data = sdm660_domains, }, + { .compatible = "qcom,sdm660", .data = sdm660_domains, }, + { .compatible = "qcom,sdm670", .data = sdm670_domains, }, + { .compatible = "qcom,sdm845", .data = sdm845_domains, }, + { .compatible = "qcom,sm4250", .data = sm6115_domains, }, + { .compatible = "qcom,sm6115", .data = sm6115_domains, }, + { .compatible = "qcom,sm6350", .data = sm6350_domains, }, + { .compatible = "qcom,sm8150", .data = sm8150_domains, }, + { .compatible = "qcom,sm8250", .data = sm8250_domains, }, + { .compatible = "qcom,sm8350", .data = sm8350_domains, }, + { .compatible = "qcom,sm8450", .data = sm8350_domains, }, + { .compatible = "qcom,sm8550", .data = sm8550_domains, }, + { .compatible = "qcom,sm8650", .data = sm8550_domains, }, + {}, +}; + +static void qcom_pdm_stop(struct qcom_pdm_data *data) +{ + qcom_pdm_free_domains(data); + + /* The server is removed automatically */ + qmi_handle_release(&data->handle); + + kfree(data); +} + +static struct qcom_pdm_data *qcom_pdm_start(void) +{ + const struct qcom_pdm_domain_data * const *domains; + const struct of_device_id *match; + struct qcom_pdm_data *data; + struct device_node *root; + int ret, i; + + root = of_find_node_by_path("/"); + if (!root) + return ERR_PTR(-ENODEV); + + match = of_match_node(qcom_pdm_domains, root); + of_node_put(root); + if (!match) { + pr_notice("PDM: no support for the platform, userspace daemon might be required.\n"); + return ERR_PTR(-ENODEV); + } + + domains = match->data; + if (!domains) { + pr_debug("PDM: no domains\n"); + return ERR_PTR(-ENODEV); + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&data->services); + + ret = qmi_handle_init(&data->handle, SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, + NULL, qcom_pdm_msg_handlers); + if (ret) { + kfree(data); + return ERR_PTR(ret); + } + + refcount_set(&data->refcnt, 1); + + for (i = 0; domains[i]; i++) { + ret = qcom_pdm_add_domain(data, domains[i]); + if (ret) + goto err_stop; + } + + ret = qmi_add_server(&data->handle, SERVREG_LOCATOR_SERVICE, + SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE); + if (ret) { + pr_err("PDM: error adding server %d\n", ret); + goto err_stop; + } + + return data; + +err_stop: + qcom_pdm_stop(data); + + return ERR_PTR(ret); +} + +static int qcom_pdm_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) + +{ + struct qcom_pdm_data *data; + int ret = 0; + + mutex_lock(&qcom_pdm_mutex); + + if (!__qcom_pdm_data) { + data = qcom_pdm_start(); + + if (IS_ERR(data)) + ret = PTR_ERR(data); + else + __qcom_pdm_data = data; + } + + auxiliary_set_drvdata(auxdev, __qcom_pdm_data); + + mutex_unlock(&qcom_pdm_mutex); + + return ret; +} + +static void qcom_pdm_remove(struct auxiliary_device *auxdev) +{ + struct qcom_pdm_data *data; + + data = auxiliary_get_drvdata(auxdev); + if (!data) + return; + + if (refcount_dec_and_mutex_lock(&data->refcnt, &qcom_pdm_mutex)) { + __qcom_pdm_data = NULL; + qcom_pdm_stop(data); + mutex_unlock(&qcom_pdm_mutex); + } +} + +static const struct auxiliary_device_id qcom_pdm_table[] = { + { .name = "qcom_common.pd-mapper" }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, qcom_pdm_table); + +static struct auxiliary_driver qcom_pdm_drv = { + .name = "qcom-pdm-mapper", + .id_table = qcom_pdm_table, + .probe = qcom_pdm_probe, + .remove = qcom_pdm_remove, +}; +module_auxiliary_driver(qcom_pdm_drv); + +MODULE_DESCRIPTION("Qualcomm Protection Domain Mapper"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/qcom_pdr_msg.c b/drivers/soc/qcom/qcom_pdr_msg.c new file mode 100644 index 000000000000..bf3e4a47165e --- /dev/null +++ b/drivers/soc/qcom/qcom_pdr_msg.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 The Linux Foundation. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/soc/qcom/qmi.h> + +#include "pdr_internal.h" + +const struct qmi_elem_info servreg_location_entry_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + name), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + instance), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_location_entry_ei); + +const struct qmi_elem_info servreg_get_domain_list_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_get_domain_list_req, + service_name), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_get_domain_list_req_ei); + +const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_get_domain_list_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = SERVREG_DOMAIN_LIST_LENGTH, + .elem_size = sizeof(struct servreg_location_entry), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list), + .ei_array = servreg_location_entry_ei, + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_get_domain_list_resp_ei); + +const struct qmi_elem_info servreg_register_listener_req_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_register_listener_req, + enable), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_req, + service_path), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_register_listener_req_ei); + +const struct qmi_elem_info servreg_register_listener_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum servreg_service_state), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_register_listener_resp_ei); + +const struct qmi_elem_info servreg_restart_pd_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_restart_pd_req, + service_path), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_restart_pd_req_ei); + +const struct qmi_elem_info servreg_restart_pd_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_restart_pd_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_restart_pd_resp_ei); + +const struct qmi_elem_info servreg_state_updated_ind_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_state_updated_ind, + curr_state), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_state_updated_ind, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct servreg_state_updated_ind, + transaction_id), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_state_updated_ind_ei); + +const struct qmi_elem_info servreg_set_ack_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_set_ack_req, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_req, + transaction_id), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_set_ack_req_ei); + +const struct qmi_elem_info servreg_set_ack_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_set_ack_resp_ei); + +const struct qmi_elem_info servreg_loc_pfr_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_loc_pfr_req, service) + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_loc_pfr_req, reason) + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_loc_pfr_req_ei); + +const struct qmi_elem_info servreg_loc_pfr_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof_field(struct servreg_loc_pfr_resp, rsp), + .tlv_type = 0x02, + .offset = offsetof(struct servreg_loc_pfr_resp, rsp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_loc_pfr_resp_ei); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Qualcomm Protection Domain messages data"); diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index 561d8037b50a..de86009ecd91 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -646,13 +646,14 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) { struct tcs_group *tcs; int tcs_id; - unsigned long flags; + + might_sleep(); tcs = get_tcs_for_msg(drv, msg); if (IS_ERR(tcs)) return PTR_ERR(tcs); - spin_lock_irqsave(&drv->lock, flags); + spin_lock_irq(&drv->lock); /* Wait forever for a free tcs. It better be there eventually! */ wait_event_lock_irq(drv->tcs_wait, @@ -670,7 +671,7 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0); enable_tcs_irq(drv, tcs_id, true); } - spin_unlock_irqrestore(&drv->lock, flags); + spin_unlock_irq(&drv->lock); /* * These two can be done after the lock is released because: diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index 9f26d7f9b9dc..8903ed956312 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -183,7 +183,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state, } if (state == RPMH_ACTIVE_ONLY_STATE) { - WARN_ON(irqs_disabled()); ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); } else { /* Clean up our call by spoofing tx_done */ diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 7191fa0c087f..e40aac281b06 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -795,6 +795,39 @@ int qcom_smem_get_soc_id(u32 *id) } EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id); +/** + * qcom_smem_get_feature_code() - return the feature code + * @code: On success, return the feature code here. + * + * Look up the feature code identifier from SMEM and return it. + * + * Return: 0 on success, negative errno on failure. + */ +int qcom_smem_get_feature_code(u32 *code) +{ + struct socinfo *info; + u32 raw_code; + + info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL); + if (IS_ERR(info)) + return PTR_ERR(info); + + /* This only makes sense for socinfo >= 16 */ + if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16)) + return -EOPNOTSUPP; + + raw_code = __le32_to_cpu(info->feature_code); + + /* Ensure the value makes sense */ + if (raw_code > SOCINFO_FC_INT_MAX) + raw_code = SOCINFO_FC_UNKNOWN; + + *code = raw_code; + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_smem_get_feature_code); + static int qcom_smem_get_sbl_version(struct qcom_smem *smem) { struct smem_header *header; diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index a21241cbeec7..696c2a8387d0 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/pm_wakeirq.h> #include <linux/regmap.h> +#include <linux/seq_file.h> #include <linux/soc/qcom/smem.h> #include <linux/soc/qcom/smem_state.h> #include <linux/spinlock.h> @@ -353,11 +354,19 @@ static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type) return 0; } +static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p) +{ + struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); + + seq_printf(p, " %8s", dev_name(entry->smp2p->dev)); +} + static struct irq_chip smp2p_irq_chip = { .name = "smp2p", .irq_mask = smp2p_mask_irq, .irq_unmask = smp2p_unmask_irq, .irq_set_type = smp2p_set_irq_type, + .irq_print_chip = smp2p_irq_print_chip, }; static int smp2p_irq_map(struct irq_domain *d, @@ -617,7 +626,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qcom_smp2p_intr, IRQF_ONESHOT, - "smp2p", (void *)smp2p); + NULL, (void *)smp2p); if (ret) { dev_err(&pdev->dev, "failed to request interrupt\n"); goto unwind_interfaces; diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index e7c7e9a640a6..ffe78ae34386 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -5,6 +5,7 @@ */ #include <linux/interrupt.h> +#include <linux/mailbox_client.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_irq.h> @@ -71,6 +72,7 @@ struct smsm_host; * @lock: spinlock for read-modify-write of the outgoing state * @entries: context for each of the entries * @hosts: context for each of the hosts + * @mbox_client: mailbox client handle */ struct qcom_smsm { struct device *dev; @@ -88,6 +90,8 @@ struct qcom_smsm { struct smsm_entry *entries; struct smsm_host *hosts; + + struct mbox_client mbox_client; }; /** @@ -120,11 +124,14 @@ struct smsm_entry { * @ipc_regmap: regmap for outgoing interrupt * @ipc_offset: offset in @ipc_regmap for outgoing interrupt * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt + * @mbox_chan: apcs ipc mailbox channel handle */ struct smsm_host { struct regmap *ipc_regmap; int ipc_offset; int ipc_bit; + + struct mbox_chan *mbox_chan; }; /** @@ -172,7 +179,13 @@ static int smsm_update_bits(void *data, u32 mask, u32 value) hostp = &smsm->hosts[host]; val = readl(smsm->subscription + host); - if (val & changes && hostp->ipc_regmap) { + if (!(val & changes)) + continue; + + if (hostp->mbox_chan) { + mbox_send_message(hostp->mbox_chan, NULL); + mbox_client_txdone(hostp->mbox_chan, 0); + } else if (hostp->ipc_regmap) { regmap_write(hostp->ipc_regmap, hostp->ipc_offset, BIT(hostp->ipc_bit)); @@ -353,6 +366,28 @@ static const struct irq_domain_ops smsm_irq_ops = { }; /** + * smsm_parse_mbox() - requests an mbox channel + * @smsm: smsm driver context + * @host_id: index of the remote host to be resolved + * + * Requests the desired channel using the mbox interface which is needed for + * sending the outgoing interrupts to a remove hosts - identified by @host_id. + */ +static int smsm_parse_mbox(struct qcom_smsm *smsm, unsigned int host_id) +{ + struct smsm_host *host = &smsm->hosts[host_id]; + int ret = 0; + + host->mbox_chan = mbox_request_channel(&smsm->mbox_client, host_id); + if (IS_ERR(host->mbox_chan)) { + ret = PTR_ERR(host->mbox_chan); + host->mbox_chan = NULL; + } + + return ret; +} + +/** * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property * @smsm: smsm driver context * @host_id: index of the remote host to be resolved @@ -521,8 +556,16 @@ static int qcom_smsm_probe(struct platform_device *pdev) "qcom,local-host", &smsm->local_host); + smsm->mbox_client.dev = &pdev->dev; + smsm->mbox_client.knows_txdone = true; + /* Parse the host properties */ for (id = 0; id < smsm->num_hosts; id++) { + /* Try using mbox interface first, otherwise fall back to syscon */ + ret = smsm_parse_mbox(smsm, id); + if (!ret) + continue; + ret = smsm_parse_ipc(smsm, id); if (ret < 0) goto out_put; @@ -609,6 +652,9 @@ unwind_interfaces: qcom_smem_state_unregister(smsm->state); out_put: + for (id = 0; id < smsm->num_hosts; id++) + mbox_free_channel(smsm->hosts[id].mbox_chan); + of_node_put(local_node); return ret; } @@ -622,6 +668,9 @@ static void qcom_smsm_remove(struct platform_device *pdev) if (smsm->entries[id].domain) irq_domain_remove(smsm->entries[id].domain); + for (id = 0; id < smsm->num_hosts; id++) + mbox_free_channel(smsm->hosts[id].mbox_chan); + qcom_smem_state_unregister(smsm->state); } diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 277c07a6603d..d7359a235e3c 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -21,14 +21,6 @@ #include <dt-bindings/arm/qcom,ids.h> -/* - * SoC version type with major number in the upper 16 bits and minor - * number in the lower 16 bits. - */ -#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff) -#define SOCINFO_MINOR(ver) ((ver) & 0xffff) -#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff)) - /* Helper macros to create soc_id table */ #define qcom_board_id(id) QCOM_ID_ ## id, __stringify(id) #define qcom_board_id_named(id, name) QCOM_ID_ ## id, (name) @@ -124,6 +116,7 @@ static const char *const pmic_models[] = { [50] = "PM8350B", [51] = "PMR735A", [52] = "PMR735B", + [54] = "PM6350", [55] = "PM4125", [58] = "PM8450", [65] = "PM8010", @@ -133,7 +126,8 @@ static const char *const pmic_models[] = { [72] = "PMR735D", [73] = "PM8550", [74] = "PMK8550", - [82] = "SMB2360", + [82] = "PMC8380", + [83] = "SMB2360", }; struct socinfo_params { @@ -348,6 +342,7 @@ static const struct soc_id soc_id[] = { { qcom_board_id(SDA630) }, { qcom_board_id(MSM8905) }, { qcom_board_id(SDX202) }, + { qcom_board_id(SDM670) }, { qcom_board_id(SDM450) }, { qcom_board_id(SM8150) }, { qcom_board_id(SDA845) }, @@ -445,6 +440,7 @@ static const struct soc_id soc_id[] = { { qcom_board_id(QCS8550) }, { qcom_board_id(QCM8550) }, { qcom_board_id(IPQ5300) }, + { qcom_board_id(IPQ5321) }, }; static const char *socinfo_machine(struct device *dev, unsigned int id) diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index 06e2c4c2a4a8..f75659fff287 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -572,4 +572,5 @@ static int __init qcom_spm_init(void) } arch_initcall(qcom_spm_init); +MODULE_DESCRIPTION("Qualcomm Subsystem Power Manager (SPM)"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index 148bcbac332d..62b424e90d90 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -3,6 +3,7 @@ * Copyright (c) 2016, Linaro Ltd. * Copyright (c) 2015, Sony Mobile Communications Inc. */ +#include <linux/cleanup.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/slab.h> @@ -198,7 +199,6 @@ static int wcnss_request_version(struct wcnss_ctrl *wcnss) */ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) { - struct wcnss_download_nv_req *req; const struct firmware *fw; struct device *dev = wcnss->dev; const char *nvbin = NVBIN_FILE; @@ -206,18 +206,19 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) ssize_t left; int ret; - req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL); + struct wcnss_download_nv_req *req __free(kfree) = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, + GFP_KERNEL); if (!req) return -ENOMEM; ret = of_property_read_string(dev->of_node, "firmware-name", &nvbin); if (ret < 0 && ret != -EINVAL) - goto free_req; + return ret; ret = request_firmware(&fw, nvbin, dev); if (ret < 0) { dev_err(dev, "Failed to load nv file %s: %d\n", nvbin, ret); - goto free_req; + return ret; } data = fw->data; @@ -263,8 +264,6 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) release_fw: release_firmware(fw); -free_req: - kfree(req); return ret; } diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index fd8b6ac06656..a0123070a816 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -129,14 +129,30 @@ static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val, return ret; } -static int tensor_sec_update_bits(void *ctx, unsigned int reg, - unsigned int mask, unsigned int val) +static bool tensor_is_atomic(unsigned int reg) { /* * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF) - * as the target registers can be accessed by multiple masters. + * as the target registers can be accessed by multiple masters. SFRs + * that don't support atomic are added to the switch statement below. */ if (reg > PMUALIVE_MASK) + return false; + + switch (reg) { + case GS101_SYSIP_DAT0: + case GS101_SYSTEM_CONFIGURATION: + return false; + default: + return true; + } +} + +static int tensor_sec_update_bits(void *ctx, unsigned int reg, + unsigned int mask, unsigned int val) +{ + + if (!tensor_is_atomic(reg)) return tensor_sec_reg_rmw(ctx, reg, mask, val); return tensor_set_bits_atomic(ctx, reg, val, mask); diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c index 6eb6cf06278e..2781a091a6a6 100644 --- a/drivers/soc/sunxi/sunxi_sram.c +++ b/drivers/soc/sunxi/sunxi_sram.c @@ -33,7 +33,6 @@ struct sunxi_sram_data { u8 offset; u8 width; struct sunxi_sram_func *func; - struct list_head list; }; struct sunxi_sram_desc { @@ -103,7 +102,6 @@ static const struct of_device_id sunxi_sram_dt_ids[] = { }; static struct device *sram_dev; -static LIST_HEAD(claimed_sram); static DEFINE_SPINLOCK(sram_lock); static void __iomem *base; @@ -346,7 +344,7 @@ static void sunxi_sram_unlock(void *_lock) spin_unlock(lock); } -static struct regmap_config sunxi_sram_regmap_config = { +static const struct regmap_config sunxi_sram_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 91d0ad6ddefc..6c37d6eb8b49 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2891,15 +2891,11 @@ static int tegra_pmc_probe(struct platform_device *pdev) pmc->aotag = base; pmc->scratch = base; } else { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "wake"); - pmc->wake = devm_ioremap_resource(&pdev->dev, res); + pmc->wake = devm_platform_ioremap_resource_byname(pdev, "wake"); if (IS_ERR(pmc->wake)) return PTR_ERR(pmc->wake); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "aotag"); - pmc->aotag = devm_ioremap_resource(&pdev->dev, res); + pmc->aotag = devm_platform_ioremap_resource_byname(pdev, "aotag"); if (IS_ERR(pmc->aotag)) return PTR_ERR(pmc->aotag); diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c index 59101bf7cf23..4fb0f0a24828 100644 --- a/drivers/soc/ti/k3-socinfo.c +++ b/drivers/soc/ti/k3-socinfo.c @@ -61,7 +61,7 @@ static const struct k3_soc_id { }; static const char * const j721e_rev_string_map[] = { - "1.0", "1.1", + "1.0", "1.1", "2.0", }; static int diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h index a01eda720bf6..9325e8ce2e25 100644 --- a/drivers/soc/ti/knav_qmss.h +++ b/drivers/soc/ti/knav_qmss.h @@ -333,7 +333,7 @@ struct knav_range_info { void *queue_base_inst; unsigned flags; struct list_head list; - struct knav_range_ops *ops; + const struct knav_range_ops *ops; struct knav_acc_info acc_info; struct knav_acc_channel *acc; unsigned num_irqs; diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c index 3d388646ed43..269b4e75ae40 100644 --- a/drivers/soc/ti/knav_qmss_acc.c +++ b/drivers/soc/ti/knav_qmss_acc.c @@ -450,7 +450,7 @@ static int knav_acc_free_range(struct knav_range_info *range) return 0; } -static struct knav_range_ops knav_acc_range_ops = { +static const struct knav_range_ops knav_acc_range_ops = { .set_notify = knav_acc_set_notify, .init_queue = knav_acc_init_queue, .open_queue = knav_acc_open_queue, diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 06fb5505c22c..f2055a76f84c 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -411,7 +411,7 @@ static int knav_gp_close_queue(struct knav_range_info *range, return 0; } -static struct knav_range_ops knav_gp_range_ops = { +static const struct knav_range_ops knav_gp_range_ops = { .set_notify = knav_gp_set_notify, .open_queue = knav_gp_open_queue, .close_queue = knav_gp_close_queue, diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c index 8e983c3c4e03..3a56bbf3268a 100644 --- a/drivers/soc/ti/pm33xx.c +++ b/drivers/soc/ti/pm33xx.c @@ -450,14 +450,14 @@ static int am33xx_pm_rtc_setup(void) rtc_base_virt = of_iomap(np, 0); if (!rtc_base_virt) { - pr_warn("PM: could not iomap rtc"); + pr_warn("PM: could not iomap rtc\n"); error = -ENODEV; goto err_clk_put; } omap_rtc = rtc_class_open("rtc0"); if (!omap_rtc) { - pr_warn("PM: rtc0 not available"); + pr_warn("PM: rtc0 not available\n"); error = -EPROBE_DEFER; goto err_iounmap; } diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 253299e4214d..f529e1346247 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -3,6 +3,7 @@ * Xilinx Event Management Driver * * Copyright (C) 2021 Xilinx, Inc. + * Copyright (C) 2024 Advanced Micro Devices, Inc. * * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com> */ @@ -19,7 +20,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> -static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1); +static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number); static int virq_sgi; static int event_manager_availability = -EACCES; @@ -35,7 +36,6 @@ static int event_manager_availability = -EACCES; #define MAX_BITS (32U) /* Number of bits available for error mask */ -#define FIRMWARE_VERSION_MASK (0xFFFFU) #define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U) static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER); @@ -570,7 +570,6 @@ static void xlnx_disable_percpu_irq(void *data) static int xlnx_event_init_sgi(struct platform_device *pdev) { int ret = 0; - int cpu; /* * IRQ related structures are used for the following: * for each SGI interrupt ensure its mapped by GIC IRQ domain @@ -607,11 +606,8 @@ static int xlnx_event_init_sgi(struct platform_device *pdev) sgi_fwspec.param[0] = sgi_num; virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec); - cpu = get_cpu(); - per_cpu(cpu_number1, cpu) = cpu; ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt", - &cpu_number1); - put_cpu(); + &dummy_cpu_number); WARN_ON(ret); if (ret) { @@ -627,16 +623,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev) static void xlnx_event_cleanup_sgi(struct platform_device *pdev) { - int cpu = smp_processor_id(); - - per_cpu(cpu_number1, cpu) = cpu; - cpuhp_remove_state(CPUHP_AP_ONLINE_DYN); on_each_cpu(xlnx_disable_percpu_irq, NULL, 1); irq_clear_status_flags(virq_sgi, IRQ_PER_CPU); - free_percpu_irq(virq_sgi, &cpu_number1); + free_percpu_irq(virq_sgi, &dummy_cpu_number); irq_dispose_mapping(virq_sgi); } diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index 965b1143936a..411d33f2fb05 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -30,9 +30,27 @@ struct zynqmp_pm_work_struct { u32 args[CB_ARG_CNT]; }; -static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work; +/** + * struct zynqmp_pm_event_info - event related information + * @cb_fun: Function pointer to store the callback function. + * @cb_type: Type of callback from pm_api_cb_id, + * PM_NOTIFY_CB - for Error Events, + * PM_INIT_SUSPEND_CB - for suspend callback. + * @node_id: Node-Id related to event. + * @event: Event Mask for the Error Event. + * @wake: Flag specifying whether the subsystem should be woken upon + * event notification. + */ +struct zynqmp_pm_event_info { + event_cb_func_t cb_fun; + enum pm_api_cb_id cb_type; + u32 node_id; + u32 event; + bool wake; +}; + +static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work, *zynqmp_pm_init_restart_work; static struct mbox_chan *rx_chan; -static bool event_registered; enum pm_suspend_mode { PM_SUSPEND_MODE_FIRST = 0, @@ -54,6 +72,19 @@ static void zynqmp_pm_get_callback_data(u32 *buf) zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0); } +static void subsystem_restart_event_callback(const u32 *payload, void *data) +{ + /* First element is callback API ID, others are callback arguments */ + if (work_pending(&zynqmp_pm_init_restart_work->callback_work)) + return; + + /* Copy callback arguments into work's structure */ + memcpy(zynqmp_pm_init_restart_work->args, &payload[0], + sizeof(zynqmp_pm_init_restart_work->args)); + + queue_work(system_unbound_wq, &zynqmp_pm_init_restart_work->callback_work); +} + static void suspend_event_callback(const u32 *payload, void *data) { /* First element is callback API ID, others are callback arguments */ @@ -120,6 +151,37 @@ static void ipi_receive_callback(struct mbox_client *cl, void *data) } /** + * zynqmp_pm_subsystem_restart_work_fn - Initiate Subsystem restart + * @work: Pointer to work_struct + * + * Bottom-half of PM callback IRQ handler. + */ +static void zynqmp_pm_subsystem_restart_work_fn(struct work_struct *work) +{ + int ret; + struct zynqmp_pm_work_struct *pm_work = container_of(work, struct zynqmp_pm_work_struct, + callback_work); + + /* First element is callback API ID, others are callback arguments */ + if (pm_work->args[0] == PM_NOTIFY_CB) { + if (pm_work->args[2] == EVENT_SUBSYSTEM_RESTART) { + ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM); + if (ret) { + pr_err("unable to set shutdown scope\n"); + return; + } + + kernel_restart(NULL); + } else { + pr_err("%s Unsupported Event - %d\n", __func__, pm_work->args[2]); + } + } else { + pr_err("%s() Unsupported Callback %d\n", __func__, pm_work->args[0]); + } +} + +/** * zynqmp_pm_init_suspend_work_fn - Initialize suspend * @work: Pointer to work_struct * @@ -184,13 +246,51 @@ static ssize_t suspend_mode_store(struct device *dev, static DEVICE_ATTR_RW(suspend_mode); +static void unregister_event(struct device *dev, void *res) +{ + struct zynqmp_pm_event_info *event_info = res; + + xlnx_unregister_event(event_info->cb_type, event_info->node_id, + event_info->event, event_info->cb_fun, NULL); +} + +static int register_event(struct device *dev, const enum pm_api_cb_id cb_type, const u32 node_id, + const u32 event, const bool wake, event_cb_func_t cb_fun) +{ + int ret; + struct zynqmp_pm_event_info *event_info; + + event_info = devres_alloc(unregister_event, sizeof(struct zynqmp_pm_event_info), + GFP_KERNEL); + if (!event_info) + return -ENOMEM; + + event_info->cb_type = cb_type; + event_info->node_id = node_id; + event_info->event = event; + event_info->wake = wake; + event_info->cb_fun = cb_fun; + + ret = xlnx_register_event(event_info->cb_type, event_info->node_id, + event_info->event, event_info->wake, event_info->cb_fun, NULL); + if (ret) { + devres_free(event_info); + return ret; + } + + devres_add(dev, event_info); + return 0; +} + static int zynqmp_pm_probe(struct platform_device *pdev) { int ret, irq; - u32 pm_api_version; + u32 pm_api_version, pm_family_code, pm_sub_family_code, node_id; struct mbox_client *client; - zynqmp_pm_get_api_version(&pm_api_version); + ret = zynqmp_pm_get_api_version(&pm_api_version); + if (ret) + return ret; /* Check PM API version number */ if (pm_api_version < ZYNQMP_PM_VERSION) @@ -203,21 +303,43 @@ static int zynqmp_pm_probe(struct platform_device *pdev) * is not available to use) or -ENODEV(Xilinx Event Manager not compiled), * then use ipi-mailbox or interrupt method. */ - ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false, - suspend_event_callback, NULL); + ret = register_event(&pdev->dev, PM_INIT_SUSPEND_CB, 0, 0, false, + suspend_event_callback); if (!ret) { zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev, sizeof(struct zynqmp_pm_work_struct), GFP_KERNEL); - if (!zynqmp_pm_init_suspend_work) { - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, - suspend_event_callback, NULL); + if (!zynqmp_pm_init_suspend_work) return -ENOMEM; - } - event_registered = true; INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work, zynqmp_pm_init_suspend_work_fn); + + ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); + if (ret < 0) + return ret; + + if (pm_sub_family_code == VERSALNET_SUB_FAMILY_CODE) + node_id = PM_DEV_ACPU_0_0; + else + node_id = PM_DEV_ACPU_0; + + ret = register_event(&pdev->dev, PM_NOTIFY_CB, node_id, EVENT_SUBSYSTEM_RESTART, + false, subsystem_restart_event_callback); + if (ret) { + dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", + ret); + return ret; + } + + zynqmp_pm_init_restart_work = devm_kzalloc(&pdev->dev, + sizeof(struct zynqmp_pm_work_struct), + GFP_KERNEL); + if (!zynqmp_pm_init_restart_work) + return -ENOMEM; + + INIT_WORK(&zynqmp_pm_init_restart_work->callback_work, + zynqmp_pm_subsystem_restart_work_fn); } else if (ret != -EACCES && ret != -ENODEV) { dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret); return ret; @@ -264,15 +386,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev) } ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); - if (ret) { - if (event_registered) { - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, - NULL); - event_registered = false; - } - dev_err(&pdev->dev, "unable to create sysfs interface\n"); + if (ret) return ret; - } return 0; } @@ -280,8 +395,6 @@ static int zynqmp_pm_probe(struct platform_device *pdev) static void zynqmp_pm_remove(struct platform_device *pdev) { sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); - if (event_registered) - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL); if (!rx_chan) mbox_free_channel(rx_chan); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index a2c99ff33e0a..ec1550c698d5 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -277,6 +277,12 @@ config SPI_CADENCE_XSPI device with a Cadence XSPI controller and want to access the Flash as an MTD device. +config SPI_CH341 + tristate "CH341 USB2SPI adapter" + depends on SPI_MASTER && USB + help + Enables the SPI controller on the CH341a USB to serial chip + config SPI_CLPS711X tristate "CLPS711X host SPI controller" depends on ARCH_CLPS711X || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index e694254dec04..a9b1bc259b68 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o obj-$(CONFIG_SPI_CADENCE_XSPI) += spi-cadence-xspi.o +obj-$(CONFIG_SPI_CH341) += spi-ch341.o obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o obj-$(CONFIG_SPI_CS42L43) += spi-cs42l43.o @@ -107,7 +108,8 @@ obj-$(CONFIG_SPI_PIC32) += spi-pic32.o obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o obj-$(CONFIG_SPI_PL022) += spi-pl022.o obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o -spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o +obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-core.o +spi-pxa2xx-core-y := spi-pxa2xx.o spi-pxa2xx-dma.o obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 370c4d1572ed..5aaff3bee1b7 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -756,8 +756,15 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); int ret; - clk_prepare(aq->pclk); - clk_prepare(aq->qspick); + ret = clk_prepare(aq->pclk); + if (ret) + return ret; + + ret = clk_prepare(aq->qspick); + if (ret) { + clk_unprepare(aq->pclk); + return ret; + } ret = pm_runtime_force_resume(dev); if (ret < 0) diff --git a/drivers/spi/internals.h b/drivers/spi/internals.h index 4a28a8395552..1f459b895891 100644 --- a/drivers/spi/internals.h +++ b/drivers/spi/internals.h @@ -40,4 +40,12 @@ static inline void spi_unmap_buf(struct spi_controller *ctlr, } #endif /* CONFIG_HAS_DMA */ +static inline bool spi_xfer_is_dma_mapped(struct spi_controller *ctlr, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + return ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer) && + (xfer->tx_sg_mapped || xfer->rx_sg_mapped); +} + #endif /* __LINUX_SPI_INTERNALS_H */ diff --git a/drivers/spi/spi-altera-core.c b/drivers/spi/spi-altera-core.c index 87e37f48f196..7af097929116 100644 --- a/drivers/spi/spi-altera-core.c +++ b/drivers/spi/spi-altera-core.c @@ -219,4 +219,5 @@ void altera_spi_init_host(struct spi_controller *host) } EXPORT_SYMBOL_GPL(altera_spi_init_host); +MODULE_DESCRIPTION("Altera SPI Controller driver core"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index e358ac5b4509..447e5a962dee 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -46,6 +46,7 @@ #define SPI_ENGINE_INST_ASSERT 0x1 #define SPI_ENGINE_INST_WRITE 0x2 #define SPI_ENGINE_INST_MISC 0x3 +#define SPI_ENGINE_INST_CS_INV 0x4 #define SPI_ENGINE_CMD_REG_CLK_DIV 0x0 #define SPI_ENGINE_CMD_REG_CONFIG 0x1 @@ -73,6 +74,8 @@ SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay)) #define SPI_ENGINE_CMD_SYNC(id) \ SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id)) +#define SPI_ENGINE_CMD_CS_INV(flags) \ + SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags)) struct spi_engine_program { unsigned int length; @@ -111,6 +114,8 @@ struct spi_engine { struct spi_engine_message_state msg_state; struct completion msg_complete; unsigned int int_enable; + /* shadows hardware CS inversion flag state */ + u8 cs_inv; }; static void spi_engine_program_add_cmd(struct spi_engine_program *p, @@ -164,16 +169,20 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, } static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry, - int delay_ns, u32 sclk_hz) + int delay_ns, int inst_ns, u32 sclk_hz) { unsigned int t; - /* negative delay indicates error, e.g. from spi_delay_to_ns() */ - if (delay_ns <= 0) + /* + * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if + * delay is less that the instruction execution time, there is no need + * for an extra sleep instruction since the instruction execution time + * will already cover the required delay. + */ + if (delay_ns < 0 || delay_ns <= inst_ns) return; - /* rounding down since executing the instruction adds a couple of ticks delay */ - t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC); + t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC); while (t) { unsigned int n = min(t, 256U); @@ -220,10 +229,16 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, struct spi_device *spi = msg->spi; struct spi_controller *host = spi->controller; struct spi_transfer *xfer; - int clk_div, new_clk_div; + int clk_div, new_clk_div, inst_ns; bool keep_cs = false; u8 bits_per_word = 0; + /* + * Take into account instruction execution time for more accurate sleep + * times, especially when the delay is small. + */ + inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz); + clk_div = 1; spi_engine_program_add_cmd(p, dry, @@ -252,7 +267,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, spi_engine_gen_xfer(p, dry, xfer); spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer), - xfer->effective_speed_hz); + inst_ns, xfer->effective_speed_hz); if (xfer->cs_change) { if (list_is_last(&xfer->transfer_list, &msg->transfers)) { @@ -262,7 +277,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, spi_engine_gen_cs(p, dry, spi, false); spi_engine_gen_sleep(p, dry, spi_delay_to_ns( - &xfer->cs_change_delay, xfer), + &xfer->cs_change_delay, xfer), inst_ns, xfer->effective_speed_hz); if (!list_next_entry(xfer, transfer_list)->cs_off) @@ -530,6 +545,29 @@ static int spi_engine_unoptimize_message(struct spi_message *msg) return 0; } +static int spi_engine_setup(struct spi_device *device) +{ + struct spi_controller *host = device->controller; + struct spi_engine *spi_engine = spi_controller_get_devdata(host); + + if (device->mode & SPI_CS_HIGH) + spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0)); + else + spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0)); + + writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + /* + * In addition to setting the flags, we have to do a CS assert command + * to make the new setting actually take effect. + */ + writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + return 0; +} + static int spi_engine_transfer_one_message(struct spi_controller *host, struct spi_message *msg) { @@ -653,16 +691,16 @@ static int spi_engine_probe(struct platform_device *pdev) host->unoptimize_message = spi_engine_unoptimize_message; host->num_chipselect = 8; + /* Some features depend of the IP core version. */ + if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) { + host->mode_bits |= SPI_CS_HIGH; + host->setup = spi_engine_setup; + } + if (host->max_speed_hz == 0) return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0"); - ret = devm_spi_register_controller(&pdev->dev, host); - if (ret) - return ret; - - platform_set_drvdata(pdev, host); - - return 0; + return devm_spi_register_controller(&pdev->dev, host); } static const struct of_device_id spi_engine_match_table[] = { diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index ca5cc67555c5..afb1b1105ec2 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -38,33 +38,24 @@ * working quickly, or testing for differences that aren't speed related. */ +typedef unsigned int (*spi_bb_txrx_bufs_fn)(struct spi_device *, spi_bb_txrx_word_fn, + unsigned int, struct spi_transfer *, + unsigned int); + struct spi_bitbang_cs { - unsigned nsecs; /* (clock cycle time)/2 */ - u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs, - u32 word, u8 bits, unsigned flags); - unsigned (*txrx_bufs)(struct spi_device *, - u32 (*txrx_word)( - struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits, - unsigned flags), - unsigned, struct spi_transfer *, - unsigned); + unsigned int nsecs; /* (clock cycle time) / 2 */ + spi_bb_txrx_word_fn txrx_word; + spi_bb_txrx_bufs_fn txrx_bufs; }; -static unsigned bitbang_txrx_8( - struct spi_device *spi, - u32 (*txrx_word)(struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits, - unsigned flags), - unsigned ns, +static unsigned int bitbang_txrx_8(struct spi_device *spi, + spi_bb_txrx_word_fn txrx_word, + unsigned int ns, struct spi_transfer *t, - unsigned flags -) + unsigned int flags) { - unsigned bits = t->bits_per_word; - unsigned count = t->len; + unsigned int bits = t->bits_per_word; + unsigned int count = t->len; const u8 *tx = t->tx_buf; u8 *rx = t->rx_buf; @@ -81,19 +72,14 @@ static unsigned bitbang_txrx_8( return t->len - count; } -static unsigned bitbang_txrx_16( - struct spi_device *spi, - u32 (*txrx_word)(struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits, - unsigned flags), - unsigned ns, +static unsigned int bitbang_txrx_16(struct spi_device *spi, + spi_bb_txrx_word_fn txrx_word, + unsigned int ns, struct spi_transfer *t, - unsigned flags -) + unsigned int flags) { - unsigned bits = t->bits_per_word; - unsigned count = t->len; + unsigned int bits = t->bits_per_word; + unsigned int count = t->len; const u16 *tx = t->tx_buf; u16 *rx = t->rx_buf; @@ -110,19 +96,14 @@ static unsigned bitbang_txrx_16( return t->len - count; } -static unsigned bitbang_txrx_32( - struct spi_device *spi, - u32 (*txrx_word)(struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits, - unsigned flags), - unsigned ns, +static unsigned int bitbang_txrx_32(struct spi_device *spi, + spi_bb_txrx_word_fn txrx_word, + unsigned int ns, struct spi_transfer *t, - unsigned flags -) + unsigned int flags) { - unsigned bits = t->bits_per_word; - unsigned count = t->len; + unsigned int bits = t->bits_per_word; + unsigned int count = t->len; const u32 *tx = t->tx_buf; u32 *rx = t->rx_buf; @@ -234,7 +215,7 @@ EXPORT_SYMBOL_GPL(spi_bitbang_cleanup); static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) { struct spi_bitbang_cs *cs = spi->controller_state; - unsigned nsecs = cs->nsecs; + unsigned int nsecs = cs->nsecs; struct spi_bitbang *bitbang; bitbang = spi_controller_get_devdata(spi->controller); @@ -247,7 +228,7 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) } if (spi->mode & SPI_3WIRE) { - unsigned flags; + unsigned int flags; flags = t->tx_buf ? SPI_CONTROLLER_NO_RX : SPI_CONTROLLER_NO_TX; return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags); diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index e5140532071d..e07e081de5ea 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -18,6 +18,7 @@ #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/spi/spi.h> /* Name of this driver */ @@ -111,6 +112,7 @@ * @dev_busy: Device busy flag * @is_decoded_cs: Flag for decoder property set or not * @tx_fifo_depth: Depth of the TX FIFO + * @rstc: Optional reset control for SPI controller */ struct cdns_spi { void __iomem *regs; @@ -125,6 +127,7 @@ struct cdns_spi { u8 dev_busy; u32 is_decoded_cs; unsigned int tx_fifo_depth; + struct reset_control *rstc; }; /* Macros for the SPI controller read/write */ @@ -588,14 +591,24 @@ static int cdns_spi_probe(struct platform_device *pdev) goto remove_ctlr; } - if (!spi_controller_is_target(ctlr)) { - xspi->ref_clk = devm_clk_get_enabled(&pdev->dev, "ref_clk"); - if (IS_ERR(xspi->ref_clk)) { - dev_err(&pdev->dev, "ref_clk clock not found.\n"); - ret = PTR_ERR(xspi->ref_clk); - goto remove_ctlr; - } + xspi->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, "spi"); + if (IS_ERR(xspi->rstc)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(xspi->rstc), + "Cannot get SPI reset.\n"); + goto remove_ctlr; + } + + reset_control_assert(xspi->rstc); + reset_control_deassert(xspi->rstc); + xspi->ref_clk = devm_clk_get_enabled(&pdev->dev, "ref_clk"); + if (IS_ERR(xspi->ref_clk)) { + dev_err(&pdev->dev, "ref_clk clock not found.\n"); + ret = PTR_ERR(xspi->ref_clk); + goto remove_ctlr; + } + + if (!spi_controller_is_target(ctlr)) { pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); pm_runtime_get_noresume(&pdev->dev); diff --git a/drivers/spi/spi-ch341.c b/drivers/spi/spi-ch341.c new file mode 100644 index 000000000000..d2351812d310 --- /dev/null +++ b/drivers/spi/spi-ch341.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// QiHeng Electronics ch341a USB-to-SPI adapter driver +// +// Copyright (C) 2024 Johannes Thumshirn <jth@kernel.org> +// +// Based on ch341a_spi.c from the flashrom project. + +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/spi/spi.h> + +#define CH341_PACKET_LENGTH 32 +#define CH341_DEFAULT_TIMEOUT 1000 + +#define CH341A_CMD_UIO_STREAM 0xab + +#define CH341A_CMD_UIO_STM_END 0x20 +#define CH341A_CMD_UIO_STM_DIR 0x40 +#define CH341A_CMD_UIO_STM_OUT 0x80 + +#define CH341A_CMD_I2C_STREAM 0xaa +#define CH341A_CMD_I2C_STM_SET 0x60 +#define CH341A_CMD_I2C_STM_END 0x00 + +#define CH341A_CMD_SPI_STREAM 0xa8 + +#define CH341A_STM_I2C_100K 0x01 + +struct ch341_spi_dev { + struct spi_controller *ctrl; + struct usb_device *udev; + unsigned int write_pipe; + unsigned int read_pipe; + int rx_len; + void *rx_buf; + u8 *tx_buf; + struct urb *rx_urb; + struct spi_device *spidev; +}; + +static void ch341_set_cs(struct spi_device *spi, bool is_high) +{ + struct ch341_spi_dev *ch341 = + spi_controller_get_devdata(spi->controller); + int err; + + memset(ch341->tx_buf, 0, CH341_PACKET_LENGTH); + ch341->tx_buf[0] = CH341A_CMD_UIO_STREAM; + ch341->tx_buf[1] = CH341A_CMD_UIO_STM_OUT | (is_high ? 0x36 : 0x37); + + if (is_high) { + ch341->tx_buf[2] = CH341A_CMD_UIO_STM_DIR | 0x3f; + ch341->tx_buf[3] = CH341A_CMD_UIO_STM_END; + } else { + ch341->tx_buf[2] = CH341A_CMD_UIO_STM_END; + } + + err = usb_bulk_msg(ch341->udev, ch341->write_pipe, ch341->tx_buf, + (is_high ? 4 : 3), NULL, CH341_DEFAULT_TIMEOUT); + if (err) + dev_err(&spi->dev, + "error sending USB message for setting CS (%d)\n", err); +} + +static int ch341_transfer_one(struct spi_controller *host, + struct spi_device *spi, + struct spi_transfer *trans) +{ + struct ch341_spi_dev *ch341 = + spi_controller_get_devdata(spi->controller); + int len; + int ret; + + len = min(CH341_PACKET_LENGTH, trans->len + 1); + + memset(ch341->tx_buf, 0, CH341_PACKET_LENGTH); + + ch341->tx_buf[0] = CH341A_CMD_SPI_STREAM; + + memcpy(ch341->tx_buf + 1, trans->tx_buf, len); + + ret = usb_bulk_msg(ch341->udev, ch341->write_pipe, ch341->tx_buf, len, + NULL, CH341_DEFAULT_TIMEOUT); + if (ret) + return ret; + + return usb_bulk_msg(ch341->udev, ch341->read_pipe, trans->rx_buf, + len - 1, NULL, CH341_DEFAULT_TIMEOUT); +} + +static void ch341_recv(struct urb *urb) +{ + struct ch341_spi_dev *ch341 = urb->context; + struct usb_device *udev = ch341->udev; + + switch (urb->status) { + case 0: + /* success */ + break; + case -ENOENT: + case -ECONNRESET: + case -EPIPE: + case -ESHUTDOWN: + dev_dbg(&udev->dev, "rx urb terminated with status: %d\n", + urb->status); + return; + default: + dev_dbg(&udev->dev, "rx urb error: %d\n", urb->status); + break; + } +} + +static int ch341_config_stream(struct ch341_spi_dev *ch341) +{ + memset(ch341->tx_buf, 0, CH341_PACKET_LENGTH); + ch341->tx_buf[0] = CH341A_CMD_I2C_STREAM; + ch341->tx_buf[1] = CH341A_CMD_I2C_STM_SET | CH341A_STM_I2C_100K; + ch341->tx_buf[2] = CH341A_CMD_I2C_STM_END; + + return usb_bulk_msg(ch341->udev, ch341->write_pipe, ch341->tx_buf, 3, + NULL, CH341_DEFAULT_TIMEOUT); +} + +static int ch341_enable_pins(struct ch341_spi_dev *ch341, bool enable) +{ + memset(ch341->tx_buf, 0, CH341_PACKET_LENGTH); + ch341->tx_buf[0] = CH341A_CMD_UIO_STREAM; + ch341->tx_buf[1] = CH341A_CMD_UIO_STM_OUT | 0x37; + ch341->tx_buf[2] = CH341A_CMD_UIO_STM_DIR | (enable ? 0x3f : 0x00); + ch341->tx_buf[3] = CH341A_CMD_UIO_STM_END; + + return usb_bulk_msg(ch341->udev, ch341->write_pipe, ch341->tx_buf, 4, + NULL, CH341_DEFAULT_TIMEOUT); +} + +static struct spi_board_info chip = { + .modalias = "spi-ch341a", +}; + +static int ch341_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *in, *out; + struct ch341_spi_dev *ch341; + struct spi_controller *ctrl; + int ret; + + ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, + NULL); + if (ret) + return ret; + + ctrl = devm_spi_alloc_master(&udev->dev, sizeof(struct ch341_spi_dev)); + if (!ctrl) + return -ENOMEM; + + ch341 = spi_controller_get_devdata(ctrl); + ch341->ctrl = ctrl; + ch341->udev = udev; + ch341->write_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(out)); + ch341->read_pipe = usb_rcvbulkpipe(udev, usb_endpoint_num(in)); + + ch341->rx_len = usb_endpoint_maxp(in); + ch341->rx_buf = devm_kzalloc(&udev->dev, ch341->rx_len, GFP_KERNEL); + if (!ch341->rx_buf) + return -ENOMEM; + + ch341->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!ch341->rx_urb) + return -ENOMEM; + + ch341->tx_buf = + devm_kzalloc(&udev->dev, CH341_PACKET_LENGTH, GFP_KERNEL); + if (!ch341->tx_buf) + return -ENOMEM; + + usb_fill_bulk_urb(ch341->rx_urb, udev, ch341->read_pipe, ch341->rx_buf, + ch341->rx_len, ch341_recv, ch341); + + ret = usb_submit_urb(ch341->rx_urb, GFP_KERNEL); + if (ret) { + usb_free_urb(ch341->rx_urb); + return -ENOMEM; + } + + ctrl->bus_num = -1; + ctrl->mode_bits = SPI_CPHA; + ctrl->transfer_one = ch341_transfer_one; + ctrl->set_cs = ch341_set_cs; + ctrl->auto_runtime_pm = false; + + usb_set_intfdata(intf, ch341); + + ret = ch341_config_stream(ch341); + if (ret) + return ret; + + ret = ch341_enable_pins(ch341, true); + if (ret) + return ret; + + ret = spi_register_controller(ctrl); + if (ret) + return ret; + + ch341->spidev = spi_new_device(ctrl, &chip); + if (!ch341->spidev) + return -ENOMEM; + + return 0; +} + +static void ch341_disconnect(struct usb_interface *intf) +{ + struct ch341_spi_dev *ch341 = usb_get_intfdata(intf); + + spi_unregister_device(ch341->spidev); + spi_unregister_controller(ch341->ctrl); + ch341_enable_pins(ch341, false); + usb_free_urb(ch341->rx_urb); +} + +static const struct usb_device_id ch341_id_table[] = { + { USB_DEVICE(0x1a86, 0x5512) }, + { } +}; +MODULE_DEVICE_TABLE(usb, ch341_id_table); + +static struct usb_driver ch341a_usb_driver = { + .name = "spi-ch341", + .probe = ch341_probe, + .disconnect = ch341_disconnect, + .id_table = ch341_id_table, +}; +module_usb_driver(ch341a_usb_driver); + +MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>"); +MODULE_DESCRIPTION("QiHeng Electronics ch341 USB2SPI"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index 8b618ef0f711..5b8ed65f8094 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -9,6 +9,7 @@ #include <linux/array_size.h> #include <linux/bits.h> #include <linux/bitfield.h> +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/gpio/machine.h> @@ -44,28 +45,10 @@ static const unsigned int cs42l43_clock_divs[] = { 2, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 }; -static const struct software_node ampl = { - .name = "cs35l56-left", -}; - -static const struct software_node ampr = { - .name = "cs35l56-right", -}; - -static struct spi_board_info ampl_info = { - .modalias = "cs35l56", - .max_speed_hz = 11 * HZ_PER_MHZ, - .chip_select = 0, - .mode = SPI_MODE_0, - .swnode = &l, -}; - -static struct spi_board_info ampr_info = { +static struct spi_board_info amp_info_template = { .modalias = "cs35l56", .max_speed_hz = 11 * HZ_PER_MHZ, - .chip_select = 1, .mode = SPI_MODE_0, - .swnode = &r, }; static const struct software_node cs42l43_gpiochip_swnode = { @@ -246,11 +229,10 @@ static size_t cs42l43_spi_max_length(struct spi_device *spi) return CS42L43_SPI_MAX_LENGTH; } -static bool cs42l43_has_sidecar(struct fwnode_handle *fwnode) +static struct fwnode_handle *cs42l43_find_xu_node(struct fwnode_handle *fwnode) { static const u32 func_smart_amp = 0x1; struct fwnode_handle *child_fwnode, *ext_fwnode; - unsigned int val; u32 function; int ret; @@ -266,21 +248,45 @@ static bool cs42l43_has_sidecar(struct fwnode_handle *fwnode) if (!ext_fwnode) continue; - ret = fwnode_property_read_u32(ext_fwnode, - "01fa-sidecar-instances", - &val); + fwnode_handle_put(child_fwnode); - fwnode_handle_put(ext_fwnode); + return ext_fwnode; + } - if (ret) - continue; + return NULL; +} - fwnode_handle_put(child_fwnode); +static struct spi_board_info *cs42l43_create_bridge_amp(struct cs42l43_spi *priv, + const char * const name, + int cs, int spkid) +{ + struct property_entry *props = NULL; + struct software_node *swnode; + struct spi_board_info *info; - return !!val; + if (spkid >= 0) { + props = devm_kmalloc(priv->dev, sizeof(*props), GFP_KERNEL); + if (!props) + return NULL; + + *props = PROPERTY_ENTRY_U32("cirrus,speaker-id", spkid); } - return false; + swnode = devm_kmalloc(priv->dev, sizeof(*swnode), GFP_KERNEL); + if (!swnode) + return NULL; + + *swnode = SOFTWARE_NODE(name, props, NULL); + + info = devm_kmemdup(priv->dev, &_info_template, + sizeof(amp_info_template), GFP_KERNEL); + if (!info) + return NULL; + + info->chip_select = cs; + info->swnode = swnode; + + return info; } static void cs42l43_release_of_node(void *data) @@ -298,7 +304,8 @@ static int cs42l43_spi_probe(struct platform_device *pdev) struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent); struct cs42l43_spi *priv; struct fwnode_handle *fwnode = dev_fwnode(cs42l43->dev); - bool has_sidecar = cs42l43_has_sidecar(fwnode); + struct fwnode_handle *xu_fwnode __free(fwnode_handle) = cs42l43_find_xu_node(fwnode); + int nsidecars = 0; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -350,7 +357,9 @@ static int cs42l43_spi_probe(struct platform_device *pdev) return ret; } - if (has_sidecar) { + fwnode_property_read_u32(xu_fwnode, "01fa-sidecar-instances", &nsidecars); + + if (nsidecars) { ret = software_node_register(&cs42l43_gpiochip_swnode); if (ret) return dev_err_probe(priv->dev, ret, @@ -373,12 +382,28 @@ static int cs42l43_spi_probe(struct platform_device *pdev) return dev_err_probe(priv->dev, ret, "Failed to register SPI controller\n"); - if (has_sidecar) { - if (!spi_new_device(priv->ctlr, &l_info)) + if (nsidecars) { + struct spi_board_info *ampl_info; + struct spi_board_info *ampr_info; + int spkid = -EINVAL; + + fwnode_property_read_u32(xu_fwnode, "01fa-spk-id-val", &spkid); + + dev_dbg(priv->dev, "Found speaker ID %d\n", spkid); + + ampl_info = cs42l43_create_bridge_amp(priv, "cs35l56-left", 0, spkid); + if (!ampl_info) + return -ENOMEM; + + ampr_info = cs42l43_create_bridge_amp(priv, "cs35l56-right", 1, spkid); + if (!ampr_info) + return -ENOMEM; + + if (!spi_new_device(priv->ctlr, ampl_info)) return dev_err_probe(priv->dev, -ENODEV, "Failed to create left amp slave\n"); - if (!spi_new_device(priv->ctlr, &r_info)) + if (!spi_new_device(priv->ctlr, ampr_info)) return dev_err_probe(priv->dev, -ENODEV, "Failed to create right amp slave\n"); } diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index be3998104bfb..f7e8b5efa50e 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -984,6 +984,9 @@ static int davinci_spi_probe(struct platform_device *pdev) return ret; free_dma: + /* This bit needs to be cleared to disable dpsi->clk */ + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + if (dspi->dma_rx) { dma_release_channel(dspi->dma_rx); dma_release_channel(dspi->dma_tx); @@ -1013,6 +1016,9 @@ static void davinci_spi_remove(struct platform_device *pdev) spi_bitbang_stop(&dspi->bitbang); + /* This bit needs to be cleared to disable dpsi->clk */ + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + if (dspi->dma_rx) { dma_release_channel(dspi->dma_rx); dma_release_channel(dspi->dma_tx); diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c index 5391bcac305c..4577e8096cd9 100644 --- a/drivers/spi/spi-dw-bt1.c +++ b/drivers/spi/spi-dw-bt1.c @@ -55,13 +55,15 @@ static int dw_spi_bt1_dirmap_create(struct spi_mem_dirmap_desc *desc) !dwsbt1->dws.mem_ops.supports_op(desc->mem, &desc->info.op_tmpl)) return -EOPNOTSUPP; + if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) + return -EOPNOTSUPP; + /* * Make sure the requested region doesn't go out of the physically - * mapped flash memory bounds and the operation is read-only. + * mapped flash memory bounds. */ - if (desc->info.offset + desc->info.length > dwsbt1->map_len || - desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) - return -EOPNOTSUPP; + if (desc->info.offset + desc->info.length > dwsbt1->map_len) + return -EINVAL; return 0; } diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index ddfdb903047a..431788dd848c 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -19,6 +19,7 @@ #include <linux/string.h> #include <linux/of.h> +#include "internals.h" #include "spi-dw.h" #ifdef CONFIG_DEBUG_FS @@ -438,8 +439,7 @@ static int dw_spi_transfer_one(struct spi_controller *host, transfer->effective_speed_hz = dws->current_freq; /* Check if current transfer is a DMA transaction */ - if (host->can_dma && host->can_dma(host, spi, transfer)) - dws->dma_mapped = host->cur_msg_mapped; + dws->dma_mapped = spi_xfer_is_dma_mapped(host, spi, transfer); /* For poll mode just disable all interrupts */ dw_spi_mask_intr(dws, 0xff); diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index e335132080bf..23ad1249f121 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c @@ -415,4 +415,5 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) } EXPORT_SYMBOL_GPL(fsl_spi_cpm_free); +MODULE_DESCRIPTION("Freescale SPI controller driver CPM functions"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 0a2730cd07c6..191de1917f83 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1006,6 +1006,7 @@ static int dspi_setup(struct spi_device *spi) struct chip_data *chip; unsigned long clkrate; bool cs = true; + int val; /* Only alloc on first setup */ chip = spi_get_ctldata(spi); @@ -1018,11 +1019,19 @@ static int dspi_setup(struct spi_device *spi) pdata = dev_get_platdata(&dspi->pdev->dev); if (!pdata) { - of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay", - &cs_sck_delay); - - of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay", - &sck_cs_delay); + val = spi_delay_to_ns(&spi->cs_setup, NULL); + cs_sck_delay = val >= 0 ? val : 0; + if (!cs_sck_delay) + of_property_read_u32(spi->dev.of_node, + "fsl,spi-cs-sck-delay", + &cs_sck_delay); + + val = spi_delay_to_ns(&spi->cs_hold, NULL); + sck_cs_delay = val >= 0 ? val : 0; + if (!sck_cs_delay) + of_property_read_u32(spi->dev.of_node, + "fsl,spi-sck-cs-delay", + &sck_cs_delay); } else { cs_sck_delay = pdata->cs_sck_delay; sck_cs_delay = pdata->sck_cs_delay; diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c index 4fc2c56555b5..bb7a625db5b0 100644 --- a/drivers/spi/spi-fsl-lib.c +++ b/drivers/spi/spi-fsl-lib.c @@ -158,4 +158,5 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev) } EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe); +MODULE_DESCRIPTION("Freescale SPI/eSPI controller driver library"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index aa5ed254be46..32baa14dfd83 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -960,13 +960,13 @@ static void fsl_lpspi_remove(struct platform_device *pdev) pm_runtime_disable(fsl_lpspi->dev); } -static int __maybe_unused fsl_lpspi_suspend(struct device *dev) +static int fsl_lpspi_suspend(struct device *dev) { pinctrl_pm_select_sleep_state(dev); return pm_runtime_force_suspend(dev); } -static int __maybe_unused fsl_lpspi_resume(struct device *dev) +static int fsl_lpspi_resume(struct device *dev) { int ret; @@ -984,14 +984,14 @@ static int __maybe_unused fsl_lpspi_resume(struct device *dev) static const struct dev_pm_ops fsl_lpspi_pm_ops = { SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend, fsl_lpspi_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume) + SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume) }; static struct platform_driver fsl_lpspi_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = fsl_lpspi_dt_ids, - .pm = &fsl_lpspi_pm_ops, + .pm = pm_ptr(&fsl_lpspi_pm_ops), }, .probe = fsl_lpspi_probe, .remove_new = fsl_lpspi_remove, diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 909cce109bba..36c587be9e28 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -5,17 +5,17 @@ * Copyright (C) 2006,2008 David Brownell * Copyright (C) 2017 Linus Walleij */ +#include <linux/gpio/consumer.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/gpio/consumer.h> -#include <linux/of.h> +#include <linux/property.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/spi/spi_gpio.h> - /* * This bitbanging SPI host driver should help make systems usable * when a native hardware SPI engine is not available, perhaps because @@ -239,8 +239,8 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active) static int spi_gpio_setup(struct spi_device *spi) { struct gpio_desc *cs; - int status = 0; struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); + int ret; /* * The CS GPIOs have already been @@ -248,15 +248,14 @@ static int spi_gpio_setup(struct spi_device *spi) */ if (spi_gpio->cs_gpios) { cs = spi_gpio->cs_gpios[spi_get_chipselect(spi, 0)]; - if (!spi->controller_state && cs) - status = gpiod_direction_output(cs, - !(spi->mode & SPI_CS_HIGH)); + if (!spi->controller_state && cs) { + ret = gpiod_direction_output(cs, !(spi->mode & SPI_CS_HIGH)); + if (ret) + return ret; + } } - if (!status) - status = spi_bitbang_setup(spi); - - return status; + return spi_bitbang_setup(spi); } static int spi_gpio_set_direction(struct spi_device *spi, bool output) @@ -326,29 +325,6 @@ static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio) return PTR_ERR_OR_ZERO(spi_gpio->sck); } -#ifdef CONFIG_OF -static const struct of_device_id spi_gpio_dt_ids[] = { - { .compatible = "spi-gpio" }, - {} -}; -MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids); - -static int spi_gpio_probe_dt(struct platform_device *pdev, - struct spi_controller *host) -{ - host->dev.of_node = pdev->dev.of_node; - host->use_gpio_descriptors = true; - - return 0; -} -#else -static inline int spi_gpio_probe_dt(struct platform_device *pdev, - struct spi_controller *host) -{ - return 0; -} -#endif - static int spi_gpio_probe_pdata(struct platform_device *pdev, struct spi_controller *host) { @@ -389,19 +365,21 @@ static int spi_gpio_probe(struct platform_device *pdev) struct spi_controller *host; struct spi_gpio *spi_gpio; struct device *dev = &pdev->dev; + struct fwnode_handle *fwnode = dev_fwnode(dev); struct spi_bitbang *bb; host = devm_spi_alloc_host(dev, sizeof(*spi_gpio)); if (!host) return -ENOMEM; - if (pdev->dev.of_node) - status = spi_gpio_probe_dt(pdev, host); - else + if (fwnode) { + device_set_node(&host->dev, fwnode); + host->use_gpio_descriptors = true; + } else { status = spi_gpio_probe_pdata(pdev, host); - - if (status) - return status; + if (status) + return status; + } spi_gpio = spi_controller_get_devdata(host); @@ -459,10 +437,16 @@ static int spi_gpio_probe(struct platform_device *pdev) MODULE_ALIAS("platform:" DRIVER_NAME); +static const struct of_device_id spi_gpio_dt_ids[] = { + { .compatible = "spi-gpio" }, + {} +}; +MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids); + static struct platform_driver spi_gpio_driver = { .driver = { .name = DRIVER_NAME, - .of_match_table = of_match_ptr(spi_gpio_dt_ids), + .of_match_table = spi_gpio_dt_ids, }, .probe = spi_gpio_probe, }; diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 33164ebdb583..85bd1a82a34e 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1050,7 +1050,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = { .rx_available = mx31_rx_available, .reset = mx31_reset, .fifo_size = 8, - .has_dmamode = true, + .has_dmamode = false, .dynamic_burst = false, .has_targetmode = false, .devtype = IMX35_CSPI, @@ -1656,10 +1656,6 @@ static int spi_imx_setup(struct spi_device *spi) return 0; } -static void spi_imx_cleanup(struct spi_device *spi) -{ -} - static int spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg) { @@ -1756,7 +1752,6 @@ static int spi_imx_probe(struct platform_device *pdev) controller->transfer_one = spi_imx_transfer_one; controller->setup = spi_imx_setup; - controller->cleanup = spi_imx_cleanup; controller->prepare_message = spi_imx_prepare_message; controller->unprepare_message = spi_imx_unprepare_message; controller->target_abort = spi_imx_target_abort; @@ -1903,7 +1898,7 @@ static void spi_imx_remove(struct platform_device *pdev) spi_imx_sdma_exit(spi_imx); } -static int __maybe_unused spi_imx_runtime_resume(struct device *dev) +static int spi_imx_runtime_resume(struct device *dev) { struct spi_controller *controller = dev_get_drvdata(dev); struct spi_imx_data *spi_imx; @@ -1924,7 +1919,7 @@ static int __maybe_unused spi_imx_runtime_resume(struct device *dev) return 0; } -static int __maybe_unused spi_imx_runtime_suspend(struct device *dev) +static int spi_imx_runtime_suspend(struct device *dev) { struct spi_controller *controller = dev_get_drvdata(dev); struct spi_imx_data *spi_imx; @@ -1937,29 +1932,28 @@ static int __maybe_unused spi_imx_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused spi_imx_suspend(struct device *dev) +static int spi_imx_suspend(struct device *dev) { pinctrl_pm_select_sleep_state(dev); return 0; } -static int __maybe_unused spi_imx_resume(struct device *dev) +static int spi_imx_resume(struct device *dev) { pinctrl_pm_select_default_state(dev); return 0; } static const struct dev_pm_ops imx_spi_pm = { - SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend, - spi_imx_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume) + RUNTIME_PM_OPS(spi_imx_runtime_suspend, spi_imx_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume) }; static struct platform_driver spi_imx_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = spi_imx_dt_ids, - .pm = &imx_spi_pm, + .pm = pm_ptr(&imx_spi_pm), }, .probe = spi_imx_probe, .remove_new = spi_imx_remove, diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c index 003a6d21c4c3..318b0768701e 100644 --- a/drivers/spi/spi-ingenic.c +++ b/drivers/spi/spi-ingenic.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/spi/spi.h> +#include "internals.h" #define REG_SSIDR 0x0 #define REG_SSICR0 0x4 @@ -242,11 +243,10 @@ static int spi_ingenic_transfer_one(struct spi_controller *ctlr, { struct ingenic_spi *priv = spi_controller_get_devdata(ctlr); unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word; - bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer); spi_ingenic_prepare_transfer(priv, spi, xfer); - if (ctlr->cur_msg_mapped && can_dma) + if (spi_xfer_is_dma_mapped(ctlr, spi, xfer)) return spi_ingenic_dma_tx(ctlr, xfer, bits); if (bits > 16) diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index fc75492e50ff..8838a98b04c2 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -514,7 +514,9 @@ static int meson_spicc_prepare_message(struct spi_controller *host, /* Setup no wait cycles by default */ writel_relaxed(0, spicc->base + SPICC_PERIODREG); - writel_bits_relaxed(SPICC_LBC_W1, 0, spicc->base + SPICC_TESTREG); + writel_bits_relaxed(SPICC_LBC_W1, + spi->mode & SPI_LOOP ? SPICC_LBC_W1 : 0, + spicc->base + SPICC_TESTREG); return 0; } @@ -644,11 +646,13 @@ static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc) snprintf(name, sizeof(name), "%s#pow2_fixed_div", dev_name(dev)); init.name = name; init.ops = &clk_fixed_factor_ops; - init.flags = 0; - if (spicc->data->has_pclk) + if (spicc->data->has_pclk) { + init.flags = CLK_SET_RATE_PARENT; parent_data[0].hw = __clk_get_hw(spicc->pclk); - else + } else { + init.flags = 0; parent_data[0].hw = __clk_get_hw(spicc->core); + } init.num_parents = 1; pow2_fixed_div->mult = 1, @@ -708,11 +712,13 @@ static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc) snprintf(name, sizeof(name), "%s#enh_fixed_div", dev_name(dev)); init.name = name; init.ops = &clk_fixed_factor_ops; - init.flags = 0; - if (spicc->data->has_pclk) + if (spicc->data->has_pclk) { + init.flags = CLK_SET_RATE_PARENT; parent_data[0].hw = __clk_get_hw(spicc->pclk); - else + } else { + init.flags = 0; parent_data[0].hw = __clk_get_hw(spicc->core); + } init.num_parents = 1; enh_fixed_div->mult = 1, @@ -846,7 +852,7 @@ static int meson_spicc_probe(struct platform_device *pdev) host->num_chipselect = 4; host->dev.of_node = pdev->dev.of_node; - host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH; + host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LOOP; host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) | SPI_BPW_MASK(16) | diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c index 634364c7cfe6..6246254e1dff 100644 --- a/drivers/spi/spi-microchip-core.c +++ b/drivers/spi/spi-microchip-core.c @@ -21,7 +21,7 @@ #include <linux/spi/spi.h> #define MAX_LEN (0xffff) -#define MAX_CS (8) +#define MAX_CS (1) #define DEFAULT_FRAMESIZE (8) #define FIFO_DEPTH (32) #define CLK_GEN_MODE1_MAX (255) @@ -258,6 +258,9 @@ static int mchp_corespi_setup(struct spi_device *spi) struct mchp_corespi *corespi = spi_controller_get_devdata(spi->controller); u32 reg; + if (spi_is_csgpiod(spi)) + return 0; + /* * Active high targets need to be specifically set to their inactive * states during probe by adding them to the "control group" & thus @@ -516,6 +519,7 @@ static int mchp_corespi_probe(struct platform_device *pdev) host->num_chipselect = num_cs; host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + host->use_gpio_descriptors = true; host->setup = mchp_corespi_setup; host->bits_per_word_mask = SPI_BPW_MASK(8); host->transfer_one = mchp_corespi_transfer_one; diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index 5d72e3d59df8..c02c4204442f 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -158,12 +158,14 @@ static int spi_mux_probe(struct spi_device *spi) /* supported modes are the same as our parent's */ ctlr->mode_bits = spi->controller->mode_bits; ctlr->flags = spi->controller->flags; + ctlr->bits_per_word_mask = spi->controller->bits_per_word_mask; ctlr->transfer_one_message = spi_mux_transfer_one_message; ctlr->setup = spi_mux_setup; ctlr->num_chipselect = mux_control_states(priv->mux); ctlr->bus_num = -1; ctlr->dev.of_node = spi->dev.of_node; ctlr->must_async = true; + ctlr->defer_optimize_message = true; ret = devm_spi_register_controller(&spi->dev, ctlr); if (ret) diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 60c9f3048ac9..6156d691630a 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -496,7 +496,7 @@ static int mxic_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc) struct mxic_spi *mxic = spi_controller_get_devdata(desc->mem->spi->controller); if (!mxic->linear.map) - return -EINVAL; + return -EOPNOTSUPP; if (desc->info.offset + desc->info.length > U32_MAX) return -EINVAL; diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c index 210a98d903fa..03b820e85651 100644 --- a/drivers/spi/spi-omap-uwire.c +++ b/drivers/spi/spi-omap-uwire.c @@ -541,5 +541,6 @@ static void __exit omap_uwire_exit(void) subsys_initcall(omap_uwire_init); module_exit(omap_uwire_exit); +MODULE_DESCRIPTION("MicroWire interface driver for OMAP"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 7e3083b83534..2c043817c66a 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -27,6 +27,8 @@ #include <linux/spi/spi.h> +#include "internals.h" + #include <linux/platform_data/spi-omap2-mcspi.h> #define OMAP2_MCSPI_MAX_FREQ 48000000 @@ -1208,8 +1210,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, unsigned count; if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && - ctlr->cur_msg_mapped && - ctlr->can_dma(ctlr, spi, t)) + spi_xfer_is_dma_mapped(ctlr, spi, t)) omap2_mcspi_set_fifo(spi, t, 1); omap2_mcspi_set_enable(spi, 1); @@ -1220,8 +1221,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, + OMAP2_MCSPI_TX0); if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && - ctlr->cur_msg_mapped && - ctlr->can_dma(ctlr, spi, t)) + spi_xfer_is_dma_mapped(ctlr, spi, t)) count = omap2_mcspi_txrx_dma(spi, t); else count = omap2_mcspi_txrx_pio(spi, t); @@ -1277,24 +1277,11 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, /* * Check if this transfer contains only one word; - * OR contains 1 to 4 words, with bits_per_word == 8 and no delay between each word - * OR contains 1 to 2 words, with bits_per_word == 16 and no delay between each word - * - * If one of the two last case is true, this also change the bits_per_word of this - * transfer to make it a bit faster. - * It's not an issue to change the bits_per_word here even if the multi-mode is not - * applicable for this message, the signal on the wire will be the same. */ if (bits_per_word < 8 && tr->len == 1) { /* multi-mode is applicable, only one word (1..7 bits) */ - } else if (tr->word_delay.value == 0 && bits_per_word == 8 && tr->len <= 4) { - /* multi-mode is applicable, only one "bigger" word (8,16,24,32 bits) */ - tr->bits_per_word = tr->len * bits_per_word; - } else if (tr->word_delay.value == 0 && bits_per_word == 16 && tr->len <= 2) { - /* multi-mode is applicable, only one "bigger" word (16,32 bits) */ - tr->bits_per_word = tr->len * bits_per_word / 2; } else if (bits_per_word >= 8 && tr->len == bits_per_word / 8) { - /* multi-mode is applicable, only one word (9..15,17..32 bits) */ + /* multi-mode is applicable, only one word (8..32 bits) */ } else { /* multi-mode is not applicable: more than one word in the transfer */ mcspi->use_multi_mode = false; @@ -1671,4 +1658,5 @@ static struct platform_driver omap2_mcspi_driver = { }; module_platform_driver(omap2_mcspi_driver); +MODULE_DESCRIPTION("OMAP2 McSPI controller driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c index cc18d320370f..fc98979eba48 100644 --- a/drivers/spi/spi-pci1xxxx.c +++ b/drivers/spi/spi-pci1xxxx.c @@ -6,6 +6,7 @@ #include <linux/bitfield.h> +#include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/iopoll.h> #include <linux/irq.h> @@ -15,7 +16,7 @@ #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/spi/spi.h> -#include <linux/delay.h> +#include "internals.h" #define DRV_NAME "spi-pci1xxxx" @@ -567,7 +568,7 @@ error: static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped) + if (spi_xfer_is_dma_mapped(spi_ctlr, spi, xfer)) return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer); else return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer); diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 6d2efdb0e95f..616d032f1a89 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -10,8 +10,7 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/platform_device.h> -#include <linux/property.h> +#include <linux/pm.h> #include <linux/sprintf.h> #include <linux/string.h> #include <linux/types.h> @@ -265,10 +264,8 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { const struct pxa_spi_info *info; - struct platform_device_info pi; int ret; - struct platform_device *pdev; - struct pxa2xx_spi_controller spi_pdata; + struct pxa2xx_spi_controller *pdata; struct ssp_device *ssp; ret = pcim_enable_device(dev); @@ -279,15 +276,17 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, if (ret) return ret; - memset(&spi_pdata, 0, sizeof(spi_pdata)); + pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; - ssp = &spi_pdata.ssp; + ssp = &pdata->ssp; ssp->dev = &dev->dev; ssp->phys_base = pci_resource_start(dev, 0); ssp->mmio_base = pcim_iomap_table(dev)[0]; info = (struct pxa_spi_info *)ent->driver_data; - ret = info->setup(dev, &spi_pdata); + ret = info->setup(dev, pdata); if (ret) return ret; @@ -298,28 +297,12 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, return ret; ssp->irq = pci_irq_vector(dev, 0); - memset(&pi, 0, sizeof(pi)); - pi.fwnode = dev_fwnode(&dev->dev); - pi.parent = &dev->dev; - pi.name = "pxa2xx-spi"; - pi.id = ssp->port_id; - pi.data = &spi_pdata; - pi.size_data = sizeof(spi_pdata); - - pdev = platform_device_register_full(&pi); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - pci_set_drvdata(dev, pdev); - - return 0; + return pxa2xx_spi_probe(&dev->dev, ssp); } static void pxa2xx_spi_pci_remove(struct pci_dev *dev) { - struct platform_device *pdev = pci_get_drvdata(dev); - - platform_device_unregister(pdev); + pxa2xx_spi_remove(&dev->dev); } static const struct pci_device_id pxa2xx_spi_pci_devices[] = { @@ -341,6 +324,9 @@ MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices); static struct pci_driver pxa2xx_spi_pci_driver = { .name = "pxa2xx_spi_pci", .id_table = pxa2xx_spi_pci_devices, + .driver = { + .pm = pm_ptr(&pxa2xx_spi_pm_ops), + }, .probe = pxa2xx_spi_pci_probe, .remove = pxa2xx_spi_pci_remove, }; @@ -349,4 +335,5 @@ module_pci_driver(pxa2xx_spi_pci_driver); MODULE_DESCRIPTION("CE4100/LPSS PCI-SPI glue code for PXA's driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(SPI_PXA2xx); MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>"); diff --git a/drivers/spi/spi-pxa2xx-platform.c b/drivers/spi/spi-pxa2xx-platform.c new file mode 100644 index 000000000000..98a8ceb7db6f --- /dev/null +++ b/drivers/spi/spi-pxa2xx-platform.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/acpi.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/types.h> + +#include "spi-pxa2xx.h" + +static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) +{ + return param == chan->device->dev; +} + +static int +pxa2xx_spi_init_ssp(struct platform_device *pdev, struct ssp_device *ssp, enum pxa_ssp_type type) +{ + struct device *dev = &pdev->dev; + struct resource *res; + int status; + u64 uid; + + ssp->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(ssp->mmio_base)) + return PTR_ERR(ssp->mmio_base); + + ssp->phys_base = res->start; + + ssp->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ssp->clk)) + return PTR_ERR(ssp->clk); + + ssp->irq = platform_get_irq(pdev, 0); + if (ssp->irq < 0) + return ssp->irq; + + ssp->type = type; + ssp->dev = dev; + + status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid); + if (status) + ssp->port_id = -1; + else + ssp->port_id = uid; + + return 0; +} + +static void pxa2xx_spi_ssp_release(void *ssp) +{ + pxa_ssp_free(ssp); +} + +static struct ssp_device *pxa2xx_spi_ssp_request(struct platform_device *pdev) +{ + struct ssp_device *ssp; + int status; + + ssp = pxa_ssp_request(pdev->id, pdev->name); + if (!ssp) + return ssp; + + status = devm_add_action_or_reset(&pdev->dev, pxa2xx_spi_ssp_release, ssp); + if (status) + return ERR_PTR(status); + + return ssp; +} + +static struct pxa2xx_spi_controller * +pxa2xx_spi_init_pdata(struct platform_device *pdev) +{ + struct pxa2xx_spi_controller *pdata; + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + const void *match = device_get_match_data(dev); + enum pxa_ssp_type type = SSP_UNDEFINED; + struct ssp_device *ssp; + bool is_lpss_priv; + u32 num_cs = 1; + int status; + + ssp = pxa2xx_spi_ssp_request(pdev); + if (IS_ERR(ssp)) + return ERR_CAST(ssp); + if (ssp) { + type = ssp->type; + } else if (match) { + type = (enum pxa_ssp_type)(uintptr_t)match; + } else { + u32 value; + + status = device_property_read_u32(dev, "intel,spi-pxa2xx-type", &value); + if (status) + return ERR_PTR(status); + + type = (enum pxa_ssp_type)value; + } + + /* Validate the SSP type correctness */ + if (!(type > SSP_UNDEFINED && type < SSP_MAX)) + return ERR_PTR(-EINVAL); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + /* Platforms with iDMA 64-bit */ + is_lpss_priv = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpss_priv"); + if (is_lpss_priv) { + pdata->tx_param = parent; + pdata->rx_param = parent; + pdata->dma_filter = pxa2xx_spi_idma_filter; + } + + /* Read number of chip select pins, if provided */ + device_property_read_u32(dev, "num-cs", &num_cs); + + pdata->num_chipselect = num_cs; + pdata->is_target = device_property_read_bool(dev, "spi-slave"); + pdata->enable_dma = true; + pdata->dma_burst_size = 1; + + /* If SSP has been already enumerated, use it */ + if (ssp) + return pdata; + + status = pxa2xx_spi_init_ssp(pdev, &pdata->ssp, type); + if (status) + return ERR_PTR(status); + + return pdata; +} + +static int pxa2xx_spi_platform_probe(struct platform_device *pdev) +{ + struct pxa2xx_spi_controller *platform_info; + struct device *dev = &pdev->dev; + struct ssp_device *ssp; + + platform_info = dev_get_platdata(dev); + if (!platform_info) { + platform_info = pxa2xx_spi_init_pdata(pdev); + if (IS_ERR(platform_info)) + return dev_err_probe(dev, PTR_ERR(platform_info), "missing platform data\n"); + + dev->platform_data = platform_info; + } + + ssp = pxa2xx_spi_ssp_request(pdev); + if (IS_ERR(ssp)) + return PTR_ERR(ssp); + if (!ssp) + ssp = &platform_info->ssp; + + return pxa2xx_spi_probe(dev, ssp); +} + +static void pxa2xx_spi_platform_remove(struct platform_device *pdev) +{ + pxa2xx_spi_remove(&pdev->dev); +} + +static const struct acpi_device_id pxa2xx_spi_acpi_match[] = { + { "80860F0E" }, + { "8086228E" }, + { "INT33C0" }, + { "INT33C1" }, + { "INT3430" }, + { "INT3431" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); + +static const struct of_device_id pxa2xx_spi_of_match[] = { + { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP }, + {} +}; +MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match); + +static struct platform_driver driver = { + .driver = { + .name = "pxa2xx-spi", + .pm = pm_ptr(&pxa2xx_spi_pm_ops), + .acpi_match_table = pxa2xx_spi_acpi_match, + .of_match_table = pxa2xx_spi_of_match, + }, + .probe = pxa2xx_spi_platform_probe, + .remove_new = pxa2xx_spi_platform_remove, +}; + +static int __init pxa2xx_spi_init(void) +{ + return platform_driver_register(&driver); +} +subsys_initcall(pxa2xx_spi_init); + +static void __exit pxa2xx_spi_exit(void) +{ + platform_driver_unregister(&driver); +} +module_exit(pxa2xx_spi_exit); + +MODULE_AUTHOR("Stephen Street"); +MODULE_DESCRIPTION("PXA2xx SSP SPI Controller platform driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(SPI_PXA2xx); +MODULE_ALIAS("platform:pxa2xx-spi"); +MODULE_SOFTDEP("pre: dw_dmac"); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index efe76d0c21bb..16b96eb176cd 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -4,7 +4,6 @@ * Copyright (C) 2013, 2021 Intel Corporation */ -#include <linux/acpi.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/bug.h> @@ -14,15 +13,12 @@ #include <linux/dmaengine.h> #include <linux/err.h> #include <linux/gpio/consumer.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/math64.h> #include <linux/minmax.h> -#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/slab.h> @@ -30,13 +26,9 @@ #include <linux/spi/spi.h> +#include "internals.h" #include "spi-pxa2xx.h" -MODULE_AUTHOR("Stephen Street"); -MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:pxa2xx-spi"); - #define TIMOUT_DFLT 1000 /* @@ -99,7 +91,6 @@ struct lpss_config { /* Chip select control */ unsigned cs_sel_shift; unsigned cs_sel_mask; - unsigned cs_num; /* Quirks */ unsigned cs_clk_stays_gated : 1; }; @@ -137,7 +128,6 @@ static const struct lpss_config lpss_platforms[] = { .tx_threshold_hi = 224, .cs_sel_shift = 2, .cs_sel_mask = 1 << 2, - .cs_num = 2, }, { /* LPSS_SPT_SSP */ .offset = 0x200, @@ -1004,11 +994,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, } dma_thresh = SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT); - dma_mapped = controller->can_dma && - controller->can_dma(controller, spi, transfer) && - controller->cur_msg_mapped; + dma_mapped = spi_xfer_is_dma_mapped(controller, spi, transfer); if (dma_mapped) { - /* Ensure we have the correct interrupt handler */ drv_data->transfer_handler = pxa2xx_spi_dma_transfer; @@ -1265,135 +1252,24 @@ static void cleanup(struct spi_device *spi) kfree(chip); } -static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) -{ - return param == chan->device->dev; -} - -static int -pxa2xx_spi_init_ssp(struct platform_device *pdev, struct ssp_device *ssp, enum pxa_ssp_type type) -{ - struct device *dev = &pdev->dev; - struct resource *res; - int status; - u64 uid; - - ssp->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); - if (IS_ERR(ssp->mmio_base)) - return PTR_ERR(ssp->mmio_base); - - ssp->phys_base = res->start; - - ssp->clk = devm_clk_get(dev, NULL); - if (IS_ERR(ssp->clk)) - return PTR_ERR(ssp->clk); - - ssp->irq = platform_get_irq(pdev, 0); - if (ssp->irq < 0) - return ssp->irq; - - ssp->type = type; - ssp->dev = dev; - - status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid); - if (status) - ssp->port_id = -1; - else - ssp->port_id = uid; - - return 0; -} - -static struct pxa2xx_spi_controller * -pxa2xx_spi_init_pdata(struct platform_device *pdev) -{ - struct pxa2xx_spi_controller *pdata; - struct device *dev = &pdev->dev; - struct device *parent = dev->parent; - enum pxa_ssp_type type = SSP_UNDEFINED; - struct ssp_device *ssp = NULL; - const void *match; - bool is_lpss_priv; - u32 num_cs = 1; - int status; - - is_lpss_priv = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpss_priv"); - - match = device_get_match_data(dev); - if (match) - type = (uintptr_t)match; - else if (is_lpss_priv) { - u32 value; - - status = device_property_read_u32(dev, "intel,spi-pxa2xx-type", &value); - if (status) - return ERR_PTR(status); - - type = (enum pxa_ssp_type)value; - } else { - ssp = pxa_ssp_request(pdev->id, pdev->name); - if (ssp) { - type = ssp->type; - pxa_ssp_free(ssp); - } - } - - /* Validate the SSP type correctness */ - if (!(type > SSP_UNDEFINED && type < SSP_MAX)) - return ERR_PTR(-EINVAL); - - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - /* Platforms with iDMA 64-bit */ - if (is_lpss_priv) { - pdata->tx_param = parent; - pdata->rx_param = parent; - pdata->dma_filter = pxa2xx_spi_idma_filter; - } - - /* Read number of chip select pins, if provided */ - device_property_read_u32(dev, "num-cs", &num_cs); - - pdata->num_chipselect = num_cs; - pdata->is_target = device_property_read_bool(dev, "spi-slave"); - pdata->enable_dma = true; - pdata->dma_burst_size = 1; - - /* If SSP has been already enumerated, use it */ - if (ssp) - return pdata; - - status = pxa2xx_spi_init_ssp(pdev, &pdata->ssp, type); - if (status) - return ERR_PTR(status); - - return pdata; -} - static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller, unsigned int cs) { struct driver_data *drv_data = spi_controller_get_devdata(controller); - if (has_acpi_companion(drv_data->ssp->dev)) { - switch (drv_data->ssp_type) { - /* - * For Atoms the ACPI DeviceSelection used by the Windows - * driver starts from 1 instead of 0 so translate it here - * to match what Linux expects. - */ - case LPSS_BYT_SSP: - case LPSS_BSW_SSP: - return cs - 1; + switch (drv_data->ssp_type) { + /* + * For some of Intel Atoms the ACPI DeviceSelection used by the Windows + * driver starts from 1 instead of 0 so translate it here to match what + * Linux expects. + */ + case LPSS_BYT_SSP: + case LPSS_BSW_SSP: + return cs - 1; - default: - break; - } + default: + return cs; } - - return cs; } static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi) @@ -1401,41 +1277,23 @@ static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi) return MAX_DMA_LEN; } -static int pxa2xx_spi_probe(struct platform_device *pdev) +int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp) { - struct device *dev = &pdev->dev; struct pxa2xx_spi_controller *platform_info; struct spi_controller *controller; struct driver_data *drv_data; - struct ssp_device *ssp; const struct lpss_config *config; int status; u32 tmp; platform_info = dev_get_platdata(dev); - if (!platform_info) { - platform_info = pxa2xx_spi_init_pdata(pdev); - if (IS_ERR(platform_info)) - return dev_err_probe(dev, PTR_ERR(platform_info), "missing platform data\n"); - } - dev_dbg(dev, "DMA burst size set to %u\n", platform_info->dma_burst_size); - - ssp = pxa_ssp_request(pdev->id, pdev->name); - if (!ssp) - ssp = &platform_info->ssp; - - if (!ssp->mmio_base) - return dev_err_probe(dev, -ENODEV, "failed to get SSP\n"); - if (platform_info->is_target) controller = devm_spi_alloc_target(dev, sizeof(*drv_data)); else controller = devm_spi_alloc_host(dev, sizeof(*drv_data)); + if (!controller) + return dev_err_probe(dev, -ENOMEM, "cannot alloc spi_controller\n"); - if (!controller) { - status = dev_err_probe(dev, -ENOMEM, "cannot alloc spi_controller\n"); - goto out_error_controller_alloc; - } drv_data = spi_controller_get_devdata(controller); drv_data->controller = controller; drv_data->controller_info = platform_info; @@ -1486,10 +1344,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), drv_data); - if (status < 0) { - dev_err_probe(dev, status, "cannot get IRQ %d\n", ssp->irq); - goto out_error_controller_alloc; - } + if (status < 0) + return dev_err_probe(dev, status, "cannot get IRQ %d\n", ssp->irq); /* Setup DMA if requested */ if (platform_info->enable_dma) { @@ -1502,6 +1358,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) controller->max_dma_len = MAX_DMA_LEN; controller->max_transfer_size = pxa2xx_spi_max_dma_transfer_size; + + dev_dbg(dev, "DMA burst size set to %u\n", platform_info->dma_burst_size); } } @@ -1578,8 +1436,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) tmp &= LPSS_CAPS_CS_EN_MASK; tmp >>= LPSS_CAPS_CS_EN_SHIFT; platform_info->num_chipselect = ffz(tmp); - } else if (config->cs_num) { - platform_info->num_chipselect = config->cs_num; } } controller->num_chipselect = platform_info->num_chipselect; @@ -1594,13 +1450,13 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } } - pm_runtime_set_autosuspend_delay(&pdev->dev, 50); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(dev, 50); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); /* Register with the SPI framework */ - platform_set_drvdata(pdev, drv_data); + dev_set_drvdata(dev, drv_data); status = spi_register_controller(controller); if (status) { dev_err_probe(dev, status, "problem registering SPI controller\n"); @@ -1610,7 +1466,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) return status; out_error_pm_runtime_enabled: - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); out_error_clock_enabled: clk_disable_unprepare(ssp->clk); @@ -1619,17 +1475,16 @@ out_error_dma_irq_alloc: pxa2xx_spi_dma_release(drv_data); free_irq(ssp->irq, drv_data); -out_error_controller_alloc: - pxa_ssp_free(ssp); return status; } +EXPORT_SYMBOL_NS_GPL(pxa2xx_spi_probe, SPI_PXA2xx); -static void pxa2xx_spi_remove(struct platform_device *pdev) +void pxa2xx_spi_remove(struct device *dev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct driver_data *drv_data = dev_get_drvdata(dev); struct ssp_device *ssp = drv_data->ssp; - pm_runtime_get_sync(&pdev->dev); + pm_runtime_get_sync(dev); spi_unregister_controller(drv_data->controller); @@ -1641,15 +1496,13 @@ static void pxa2xx_spi_remove(struct platform_device *pdev) if (drv_data->controller_info->enable_dma) pxa2xx_spi_dma_release(drv_data); - pm_runtime_put_noidle(&pdev->dev); - pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); /* Release IRQ */ free_irq(ssp->irq, drv_data); - - /* Release SSP */ - pxa_ssp_free(ssp); } +EXPORT_SYMBOL_NS_GPL(pxa2xx_spi_remove, SPI_PXA2xx); static int pxa2xx_spi_suspend(struct device *dev) { @@ -1701,49 +1554,11 @@ static int pxa2xx_spi_runtime_resume(struct device *dev) return clk_prepare_enable(drv_data->ssp->clk); } -static const struct dev_pm_ops pxa2xx_spi_pm_ops = { +EXPORT_NS_GPL_DEV_PM_OPS(pxa2xx_spi_pm_ops, SPI_PXA2xx) = { SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume) RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, pxa2xx_spi_runtime_resume, NULL) }; -static const struct acpi_device_id pxa2xx_spi_acpi_match[] = { - { "80860F0E", LPSS_BYT_SSP }, - { "8086228E", LPSS_BSW_SSP }, - { "INT33C0", LPSS_LPT_SSP }, - { "INT33C1", LPSS_LPT_SSP }, - { "INT3430", LPSS_LPT_SSP }, - { "INT3431", LPSS_LPT_SSP }, - {} -}; -MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); - -static const struct of_device_id pxa2xx_spi_of_match[] = { - { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP }, - {} -}; -MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match); - -static struct platform_driver driver = { - .driver = { - .name = "pxa2xx-spi", - .pm = pm_ptr(&pxa2xx_spi_pm_ops), - .acpi_match_table = pxa2xx_spi_acpi_match, - .of_match_table = pxa2xx_spi_of_match, - }, - .probe = pxa2xx_spi_probe, - .remove_new = pxa2xx_spi_remove, -}; - -static int __init pxa2xx_spi_init(void) -{ - return platform_driver_register(&driver); -} -subsys_initcall(pxa2xx_spi_init); - -static void __exit pxa2xx_spi_exit(void) -{ - platform_driver_unregister(&driver); -} -module_exit(pxa2xx_spi_exit); - -MODULE_SOFTDEP("pre: dw_dmac"); +MODULE_AUTHOR("Stephen Street"); +MODULE_DESCRIPTION("PXA2xx SSP SPI Controller core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 93e1e471e1c6..a470d3d634d3 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -14,6 +14,7 @@ #include <linux/pxa2xx_ssp.h> +struct device; struct gpio_desc; /* @@ -131,4 +132,9 @@ extern void pxa2xx_spi_dma_stop(struct driver_data *drv_data); extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data); extern void pxa2xx_spi_dma_release(struct driver_data *drv_data); +int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp); +void pxa2xx_spi_remove(struct device *dev); + +extern const struct dev_pm_ops pxa2xx_spi_pm_ops; + #endif /* SPI_PXA2XX_H */ diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 2af63040ac6e..1a2f9cd92b3c 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -5,6 +5,8 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> #include <linux/err.h> #include <linux/interconnect.h> #include <linux/interrupt.h> @@ -16,8 +18,7 @@ #include <linux/pm_opp.h> #include <linux/pm_runtime.h> #include <linux/spi/spi.h> -#include <linux/dmaengine.h> -#include <linux/dma-mapping.h> +#include "internals.h" #define QUP_CONFIG 0x0000 #define QUP_STATE 0x0004 @@ -709,9 +710,7 @@ static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer) if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32))) controller->mode = QUP_IO_M_MODE_FIFO; - else if (spi->controller->can_dma && - spi->controller->can_dma(spi->controller, spi, xfer) && - spi->controller->cur_msg_mapped) + else if (spi_xfer_is_dma_mapped(spi->controller, spi, xfer)) controller->mode = QUP_IO_M_MODE_BAM; else controller->mode = QUP_IO_M_MODE_BLOCK; @@ -1369,5 +1368,6 @@ static struct platform_driver spi_qup_driver = { }; module_platform_driver(spi_qup_driver); +MODULE_DESCRIPTION("Qualcomm SPI controller with QUP interface"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:spi_qup"); diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c index e11146932828..d3f07fd719bd 100644 --- a/drivers/spi/spi-rpc-if.c +++ b/drivers/spi/spi-rpc-if.c @@ -95,16 +95,16 @@ static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc) spi_controller_get_devdata(desc->mem->spi->controller); if (desc->info.offset + desc->info.length > U32_MAX) - return -ENOTSUPP; + return -EINVAL; if (!rpcif_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) - return -ENOTSUPP; + return -EOPNOTSUPP; - if (!rpc->dirmap && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) - return -ENOTSUPP; + if (!rpc->dirmap) + return -EOPNOTSUPP; - if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) - return -ENOTSUPP; + if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) + return -EOPNOTSUPP; return 0; } diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c index 6b16a22cc3a4..886d6d7771d4 100644 --- a/drivers/spi/spi-wpcm-fiu.c +++ b/drivers/spi/spi-wpcm-fiu.c @@ -378,7 +378,7 @@ static int wpcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc) int cs = spi_get_chipselect(desc->mem->spi, 0); if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) - return -ENOTSUPP; + return -EOPNOTSUPP; /* * Unfortunately, FIU only supports a 16 MiB direct mapping window (per @@ -387,11 +387,11 @@ static int wpcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc) * flashes that are bigger than 16 MiB. */ if (desc->info.offset + desc->info.length > MAX_MEMORY_SIZE_PER_CS) - return -ENOTSUPP; + return -EINVAL; /* Don't read past the memory window */ if (cs * MAX_MEMORY_SIZE_PER_CS + desc->info.offset + desc->info.length > fiu->memory_size) - return -ENOTSUPP; + return -EINVAL; return 0; } diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c index 63354dd3110f..846f00e23b71 100644 --- a/drivers/spi/spi-xcomm.c +++ b/drivers/spi/spi-xcomm.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/i2c.h> +#include <linux/gpio/driver.h> #include <linux/spi/spi.h> #include <asm/unaligned.h> @@ -26,24 +27,63 @@ #define SPI_XCOMM_CMD_UPDATE_CONFIG 0x03 #define SPI_XCOMM_CMD_WRITE 0x04 +#define SPI_XCOMM_CMD_GPIO_SET 0x05 #define SPI_XCOMM_CLOCK 48000000 struct spi_xcomm { struct i2c_client *i2c; - uint16_t settings; - uint16_t chipselect; + struct gpio_chip gc; + + u16 settings; + u16 chipselect; unsigned int current_speed; - uint8_t buf[63]; + u8 buf[63]; }; +static void spi_xcomm_gpio_set_value(struct gpio_chip *chip, + unsigned int offset, int val) +{ + struct spi_xcomm *spi_xcomm = gpiochip_get_data(chip); + unsigned char buf[2]; + + buf[0] = SPI_XCOMM_CMD_GPIO_SET; + buf[1] = !!val; + + i2c_master_send(spi_xcomm->i2c, buf, 2); +} + +static int spi_xcomm_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + return GPIO_LINE_DIRECTION_OUT; +} + +static int spi_xcomm_gpio_add(struct spi_xcomm *spi_xcomm) +{ + struct device *dev = &spi_xcomm->i2c->dev; + + if (!IS_ENABLED(CONFIG_GPIOLIB)) + return 0; + + spi_xcomm->gc.get_direction = spi_xcomm_gpio_get_direction; + spi_xcomm->gc.set = spi_xcomm_gpio_set_value; + spi_xcomm->gc.can_sleep = 1; + spi_xcomm->gc.base = -1; + spi_xcomm->gc.ngpio = 1; + spi_xcomm->gc.label = spi_xcomm->i2c->name; + spi_xcomm->gc.owner = THIS_MODULE; + + return devm_gpiochip_add_data(dev, &spi_xcomm->gc, spi_xcomm); +} + static int spi_xcomm_sync_config(struct spi_xcomm *spi_xcomm, unsigned int len) { - uint16_t settings; - uint8_t *buf = spi_xcomm->buf; + u16 settings; + u8 *buf = spi_xcomm->buf; settings = spi_xcomm->settings; settings |= len << SPI_XCOMM_SETTINGS_LEN_OFFSET; @@ -56,10 +96,10 @@ static int spi_xcomm_sync_config(struct spi_xcomm *spi_xcomm, unsigned int len) } static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm, - struct spi_device *spi, int is_active) + struct spi_device *spi, int is_active) { unsigned long cs = spi_get_chipselect(spi, 0); - uint16_t chipselect = spi_xcomm->chipselect; + u16 chipselect = spi_xcomm->chipselect; if (is_active) chipselect |= BIT(cs); @@ -70,7 +110,8 @@ static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm, } static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm, - struct spi_device *spi, struct spi_transfer *t, unsigned int *settings) + struct spi_device *spi, struct spi_transfer *t, + unsigned int *settings) { if (t->len > 62) return -EINVAL; @@ -108,7 +149,7 @@ static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm, } static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm, - struct spi_device *spi, struct spi_transfer *t) + struct spi_device *spi, struct spi_transfer *t) { int ret; @@ -119,13 +160,13 @@ static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm, ret = i2c_master_send(spi_xcomm->i2c, spi_xcomm->buf, t->len + 1); if (ret < 0) return ret; - else if (ret != t->len + 1) + if (ret != t->len + 1) return -EIO; } else if (t->rx_buf) { ret = i2c_master_recv(spi_xcomm->i2c, t->rx_buf, t->len); if (ret < 0) return ret; - else if (ret != t->len) + if (ret != t->len) return -EIO; } @@ -133,12 +174,12 @@ static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm, } static int spi_xcomm_transfer_one(struct spi_controller *host, - struct spi_message *msg) + struct spi_message *msg) { struct spi_xcomm *spi_xcomm = spi_controller_get_devdata(host); unsigned int settings = spi_xcomm->settings; struct spi_device *spi = msg->spi; - unsigned cs_change = 0; + unsigned int cs_change = 0; struct spi_transfer *t; bool is_first = true; int status = 0; @@ -147,7 +188,6 @@ static int spi_xcomm_transfer_one(struct spi_controller *host, spi_xcomm_chipselect(spi_xcomm, spi, true); list_for_each_entry(t, &msg->transfers, transfer_list) { - if (!t->tx_buf && !t->rx_buf && t->len) { status = -EINVAL; break; @@ -208,7 +248,7 @@ static int spi_xcomm_probe(struct i2c_client *i2c) struct spi_controller *host; int ret; - host = spi_alloc_host(&i2c->dev, sizeof(*spi_xcomm)); + host = devm_spi_alloc_host(&i2c->dev, sizeof(*spi_xcomm)); if (!host) return -ENOMEM; @@ -221,13 +261,12 @@ static int spi_xcomm_probe(struct i2c_client *i2c) host->flags = SPI_CONTROLLER_HALF_DUPLEX; host->transfer_one_message = spi_xcomm_transfer_one; host->dev.of_node = i2c->dev.of_node; - i2c_set_clientdata(i2c, host); ret = devm_spi_register_controller(&i2c->dev, host); if (ret < 0) - spi_controller_put(host); + return ret; - return ret; + return spi_xcomm_gpio_add(spi_xcomm); } static const struct i2c_device_id spi_xcomm_ids[] = { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index fc13fa192189..d4da5464dbd0 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1222,11 +1222,6 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); } -/* Dummy SG for unidirect transfers */ -static struct scatterlist dummy_sg = { - .page_link = SG_END, -}; - static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct device *tx_dev, *rx_dev; @@ -1265,8 +1260,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) attrs); if (ret != 0) return ret; - } else { - xfer->tx_sg.sgl = &dummy_sg; + + xfer->tx_sg_mapped = true; } if (xfer->rx_buf != NULL) { @@ -1280,8 +1275,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) return ret; } - } else { - xfer->rx_sg.sgl = &dummy_sg; + + xfer->rx_sg_mapped = true; } } /* No transfer has been mapped, bail out with success */ @@ -1290,7 +1285,6 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) ctlr->cur_rx_dma_dev = rx_dev; ctlr->cur_tx_dma_dev = tx_dev; - ctlr->cur_msg_mapped = true; return 0; } @@ -1301,57 +1295,46 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) struct device *tx_dev = ctlr->cur_tx_dma_dev; struct spi_transfer *xfer; - if (!ctlr->cur_msg_mapped || !ctlr->can_dma) - return 0; - list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync has already been done after each transfer. */ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) - continue; + if (xfer->rx_sg_mapped) + spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, + DMA_FROM_DEVICE, attrs); + xfer->rx_sg_mapped = false; - spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, - DMA_FROM_DEVICE, attrs); - spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, - DMA_TO_DEVICE, attrs); + if (xfer->tx_sg_mapped) + spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, + DMA_TO_DEVICE, attrs); + xfer->tx_sg_mapped = false; } - ctlr->cur_msg_mapped = false; - return 0; } -static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg, +static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; - if (!ctlr->cur_msg_mapped) - return; - - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) - return; - - dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); - dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + if (xfer->tx_sg_mapped) + dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + if (xfer->rx_sg_mapped) + dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); } -static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg, +static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; - if (!ctlr->cur_msg_mapped) - return; - - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) - return; - - dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); - dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + if (xfer->rx_sg_mapped) + dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + if (xfer->tx_sg_mapped) + dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } #else /* !CONFIG_HAS_DMA */ static inline int __spi_map_msg(struct spi_controller *ctlr, @@ -1367,13 +1350,11 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr, } static void spi_dma_sync_for_device(struct spi_controller *ctrl, - struct spi_message *msg, struct spi_transfer *xfer) { } static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, - struct spi_message *msg, struct spi_transfer *xfer) { } @@ -1645,13 +1626,13 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, reinit_completion(&ctlr->xfer_completion); fallback_pio: - spi_dma_sync_for_device(ctlr, msg, xfer); + spi_dma_sync_for_device(ctlr, xfer); ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { - spi_dma_sync_for_cpu(ctlr, msg, xfer); + spi_dma_sync_for_cpu(ctlr, xfer); - if (ctlr->cur_msg_mapped && - (xfer->error & SPI_TRANS_FAIL_NO_START)) { + if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) && + (xfer->error & SPI_TRANS_FAIL_NO_START)) { __spi_unmap_msg(ctlr, msg); ctlr->fallback = true; xfer->error &= ~SPI_TRANS_FAIL_NO_START; @@ -1673,7 +1654,7 @@ fallback_pio: msg->status = ret; } - spi_dma_sync_for_cpu(ctlr, msg, xfer); + spi_dma_sync_for_cpu(ctlr, xfer); } else { if (xfer->len) dev_err(&msg->spi->dev, @@ -2151,7 +2132,8 @@ static void __spi_unoptimize_message(struct spi_message *msg) */ static void spi_maybe_unoptimize_message(struct spi_message *msg) { - if (!msg->pre_optimized && msg->optimized) + if (!msg->pre_optimized && msg->optimized && + !msg->spi->controller->defer_optimize_message) __spi_unoptimize_message(msg); } @@ -2230,11 +2212,8 @@ static int spi_start_queue(struct spi_controller *ctlr) static int spi_stop_queue(struct spi_controller *ctlr) { + unsigned int limit = 500; unsigned long flags; - unsigned limit = 500; - int ret = 0; - - spin_lock_irqsave(&ctlr->queue_lock, flags); /* * This is a bit lame, but is optimized for the common execution path. @@ -2242,20 +2221,18 @@ static int spi_stop_queue(struct spi_controller *ctlr) * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead. */ - while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { + do { + spin_lock_irqsave(&ctlr->queue_lock, flags); + if (list_empty(&ctlr->queue) && !ctlr->busy) { + ctlr->running = false; + spin_unlock_irqrestore(&ctlr->queue_lock, flags); + return 0; + } spin_unlock_irqrestore(&ctlr->queue_lock, flags); usleep_range(10000, 11000); - spin_lock_irqsave(&ctlr->queue_lock, flags); - } - - if (!list_empty(&ctlr->queue) || ctlr->busy) - ret = -EBUSY; - else - ctlr->running = false; + } while (--limit); - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - - return ret; + return -EBUSY; } static int spi_destroy_queue(struct spi_controller *ctlr) @@ -2594,7 +2571,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi, { struct spi_controller *ctlr = spi->controller; struct spi_device *ancillary; - int rc = 0; + int rc; /* Alloc an spi_device */ ancillary = spi_alloc_device(ctlr); @@ -2740,7 +2717,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) return -ENODEV; if (ctlr) { - if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle) + if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle)) return -ENODEV; } else { struct acpi_device *adev; @@ -2839,7 +2816,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, if (!lookup.max_speed_hz && ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && - ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) { + device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { /* Apple does not use _CRS but nested devices for SPI slaves */ acpi_spi_parse_apple_properties(adev, &lookup); } @@ -3925,7 +3902,7 @@ static int spi_set_cs_timing(struct spi_device *spi) int spi_setup(struct spi_device *spi) { unsigned bad_bits, ugly_bits; - int status = 0; + int status; /* * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO @@ -4294,6 +4271,11 @@ static int __spi_optimize_message(struct spi_device *spi, static int spi_maybe_optimize_message(struct spi_device *spi, struct spi_message *msg) { + if (spi->controller->defer_optimize_message) { + msg->spi = spi; + return 0; + } + if (msg->pre_optimized) return 0; @@ -4324,6 +4306,13 @@ int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) { int ret; + /* + * Pre-optimization is not supported and optimization is deferred e.g. + * when using spi-mux. + */ + if (spi->controller->defer_optimize_message) + return 0; + ret = __spi_optimize_message(spi, msg); if (ret) return ret; @@ -4350,6 +4339,9 @@ EXPORT_SYMBOL_GPL(spi_optimize_message); */ void spi_unoptimize_message(struct spi_message *msg) { + if (msg->spi->controller->defer_optimize_message) + return; + __spi_unoptimize_message(msg); msg->pre_optimized = false; } @@ -4382,6 +4374,34 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) return ctlr->transfer(spi, message); } +static void devm_spi_unoptimize_message(void *msg) +{ + spi_unoptimize_message(msg); +} + +/** + * devm_spi_optimize_message - managed version of spi_optimize_message() + * @dev: the device that manages @msg (usually @spi->dev) + * @spi: the device that will be used for the message + * @msg: the message to optimize + * Return: zero on success, else a negative error code + * + * spi_unoptimize_message() will automatically be called when the device is + * removed. + */ +int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, + struct spi_message *msg) +{ + int ret; + + ret = spi_optimize_message(spi, msg); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg); +} +EXPORT_SYMBOL_GPL(devm_spi_optimize_message); + /** * spi_async - asynchronous SPI transfer * @spi: device with which data will be exchanged @@ -4432,8 +4452,6 @@ int spi_async(struct spi_device *spi, struct spi_message *message) spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - spi_maybe_unoptimize_message(message); - return ret; } EXPORT_SYMBOL_GPL(spi_async); diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c index 2a115a8fc263..5217aacfcf54 100644 --- a/drivers/staging/greybus/gpio.c +++ b/drivers/staging/greybus/gpio.c @@ -579,7 +579,7 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev, if (ret) goto exit_line_free; - ret = gpiochip_add(gpio); + ret = gpiochip_add_data(gpio, NULL); if (ret) { dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret); goto exit_line_free; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7f6ca8177845..a3e09adc4e76 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -148,35 +148,38 @@ static int iblock_configure_device(struct se_device *dev) dev->dev_attrib.is_nonrot = 1; bi = bdev_get_integrity(bd); - if (bi) { - struct bio_set *bs = &ib_dev->ibd_bio_set; - - if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") || - !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) { - pr_err("IBLOCK export of blk_integrity: %s not" - " supported\n", bi->profile->name); - ret = -ENOSYS; - goto out_blkdev_put; - } + if (!bi) + return 0; - if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) { - dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; - } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) { + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_IP: + pr_err("IBLOCK export of blk_integrity: %s not supported\n", + blk_integrity_profile_name(bi)); + ret = -ENOSYS; + goto out_blkdev_put; + case BLK_INTEGRITY_CSUM_CRC: + if (bi->flags & BLK_INTEGRITY_REF_TAG) dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; - } + else + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; + break; + default: + break; + } - if (dev->dev_attrib.pi_prot_type) { - if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { - pr_err("Unable to allocate bioset for PI\n"); - ret = -ENOMEM; - goto out_blkdev_put; - } - pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", - &bs->bio_integrity_pool); + if (dev->dev_attrib.pi_prot_type) { + struct bio_set *bs = &ib_dev->ibd_bio_set; + + if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { + pr_err("Unable to allocate bioset for PI\n"); + ret = -ENOMEM; + goto out_blkdev_put; } - dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; + pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", + &bs->bio_integrity_pool); } + dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; return 0; out_blkdev_put: diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 3235e1c719e8..3e73efa51bba 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -660,7 +660,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev, const struct ffa_ops *ops) { const struct ffa_msg_ops *msg_ops = ops->msg_ops; - struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION }; + struct ffa_send_direct_data data = { + .data0 = OPTEE_FFA_GET_API_VERSION, + }; int rc; msg_ops->mode_32bit_set(ffa_dev); @@ -677,7 +679,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev, return false; } - data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION }; + data = (struct ffa_send_direct_data){ + .data0 = OPTEE_FFA_GET_OS_VERSION, + }; rc = msg_ops->sync_send_receive(ffa_dev, &data); if (rc) { pr_err("Unexpected error %d\n", rc); @@ -698,7 +702,9 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, unsigned int *rpc_param_count, unsigned int *max_notif_value) { - struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES }; + struct ffa_send_direct_data data = { + .data0 = OPTEE_FFA_EXCHANGE_CAPABILITIES, + }; int rc; rc = ops->msg_ops->sync_send_receive(ffa_dev, &data); diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c index 0d7878e770cd..1970880c796f 100644 --- a/drivers/tee/optee/notif.c +++ b/drivers/tee/optee/notif.c @@ -29,7 +29,7 @@ static bool have_key(struct optee *optee, u_int key) return false; } -int optee_notif_wait(struct optee *optee, u_int key) +int optee_notif_wait(struct optee *optee, u_int key, u32 timeout) { unsigned long flags; struct notif_entry *entry; @@ -70,7 +70,12 @@ int optee_notif_wait(struct optee *optee, u_int key) * Unlock temporarily and wait for completion. */ spin_unlock_irqrestore(&optee->notif.lock, flags); - wait_for_completion(&entry->c); + if (timeout != 0) { + if (!wait_for_completion_timeout(&entry->c, timeout)) + rc = -ETIMEDOUT; + } else { + wait_for_completion(&entry->c); + } spin_lock_irqsave(&optee->notif.lock, flags); list_del(&entry->link); diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 429cc20be5cc..424898cdc4e9 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -26,6 +26,9 @@ #define TEEC_ERROR_BUSY 0xFFFF000D #define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010 +/* API Return Codes are from the GP TEE Internal Core API Specification */ +#define TEE_ERROR_TIMEOUT 0xFFFF3001 + #define TEEC_ORIGIN_COMMS 0x00000002 /* @@ -252,7 +255,7 @@ struct optee_call_ctx { int optee_notif_init(struct optee *optee, u_int max_key); void optee_notif_uninit(struct optee *optee); -int optee_notif_wait(struct optee *optee, u_int key); +int optee_notif_wait(struct optee *optee, u_int key, u32 timeout); int optee_notif_send(struct optee *optee, u_int key); u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h index f3f06e0994a7..4576751b490c 100644 --- a/drivers/tee/optee/optee_rpc_cmd.h +++ b/drivers/tee/optee/optee_rpc_cmd.h @@ -41,6 +41,7 @@ * Waiting on notification * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT * [in] value[0].b notification value + * [in] value[0].c timeout in milliseconds or 0 if no timeout * * Sending a synchronous notification * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index f086812f1179..5de4504665be 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -130,6 +130,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, static void handle_rpc_func_cmd_wq(struct optee *optee, struct optee_msg_arg *arg) { + int rc = 0; + if (arg->num_params != 1) goto bad; @@ -139,7 +141,8 @@ static void handle_rpc_func_cmd_wq(struct optee *optee, switch (arg->params[0].u.value.a) { case OPTEE_RPC_NOTIFICATION_WAIT: - if (optee_notif_wait(optee, arg->params[0].u.value.b)) + rc = optee_notif_wait(optee, arg->params[0].u.value.b, arg->params[0].u.value.c); + if (rc) goto bad; break; case OPTEE_RPC_NOTIFICATION_SEND: @@ -153,7 +156,10 @@ static void handle_rpc_func_cmd_wq(struct optee *optee, arg->ret = TEEC_SUCCESS; return; bad: - arg->ret = TEEC_ERROR_BAD_PARAMETERS; + if (rc == -ETIMEDOUT) + arg->ret = TEE_ERROR_TIMEOUT; + else + arg->ret = TEEC_ERROR_BAD_PARAMETERS; } static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 204ed89a3ec9..ed16897584b4 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -343,32 +343,6 @@ config ROCKCHIP_THERMAL trip point. Cpufreq is used as the cooling device and will throttle CPUs when the Temperature crosses the passive trip point. -config RCAR_THERMAL - tristate "Renesas R-Car thermal driver" - depends on ARCH_RENESAS || COMPILE_TEST - depends on HAS_IOMEM - help - Enable this to plug the R-Car thermal sensor driver into the Linux - thermal framework. - -config RCAR_GEN3_THERMAL - tristate "Renesas R-Car Gen3 and RZ/G2 thermal driver" - depends on ARCH_RENESAS || COMPILE_TEST - depends on HAS_IOMEM - depends on OF - help - Enable this to plug the R-Car Gen3 or RZ/G2 thermal sensor driver into - the Linux thermal framework. - -config RZG2L_THERMAL - tristate "Renesas RZ/G2L thermal driver" - depends on ARCH_RENESAS || COMPILE_TEST - depends on HAS_IOMEM - depends on OF - help - Enable this to plug the RZ/G2L thermal sensor driver into the Linux - thermal framework. - config KIRKWOOD_THERMAL tristate "Temperature sensor on Marvell Kirkwood SoCs" depends on MACH_KIRKWOOD || COMPILE_TEST @@ -459,6 +433,8 @@ depends on (ARCH_STI || ARCH_STM32) && OF source "drivers/thermal/st/Kconfig" endmenu +source "drivers/thermal/renesas/Kconfig" + source "drivers/thermal/tegra/Kconfig" config GENERIC_ADC_THERMAL diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 5cdf7d68687f..ce7a4752ef52 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -38,9 +38,7 @@ obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o obj-$(CONFIG_SUN8I_THERMAL) += sun8i_thermal.o obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o -obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o -obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o -obj-$(CONFIG_RZG2L_THERMAL) += rzg2l_thermal.o +obj-y += renesas/ obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o obj-y += samsung/ obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 5c1cebe07580..5ad87eb3f578 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -163,6 +163,7 @@ MODULE_DEVICE_TABLE(of, bcm2835_thermal_of_match_table); static int bcm2835_thermal_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; const struct of_device_id *match; struct thermal_zone_device *tz; struct bcm2835_thermal_data *data; @@ -170,12 +171,11 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) u32 val; unsigned long rate; - data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - match = of_match_device(bcm2835_thermal_of_match_table, - &pdev->dev); + match = of_match_device(bcm2835_thermal_of_match_table, dev); if (!match) return -EINVAL; @@ -185,34 +185,20 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) return err; } - data->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(data->clk)) { - err = PTR_ERR(data->clk); - if (err != -EPROBE_DEFER) - dev_err(&pdev->dev, "Could not get clk: %d\n", err); - return err; - } - - err = clk_prepare_enable(data->clk); - if (err) - return err; + data->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(data->clk)) + return dev_err_probe(dev, PTR_ERR(data->clk), "Could not get clk\n"); rate = clk_get_rate(data->clk); if ((rate < 1920000) || (rate > 5000000)) - dev_warn(&pdev->dev, + dev_warn(dev, "Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n", data->clk, rate); /* register of thermal sensor and get info from DT */ - tz = devm_thermal_of_zone_register(&pdev->dev, 0, data, - &bcm2835_thermal_ops); - if (IS_ERR(tz)) { - err = PTR_ERR(tz); - dev_err(&pdev->dev, - "Failed to register the thermal device: %d\n", - err); - goto err_clk; - } + tz = devm_thermal_of_zone_register(dev, 0, data, &bcm2835_thermal_ops); + if (IS_ERR(tz)) + return dev_err_probe(dev, PTR_ERR(tz), "Failed to register the thermal device\n"); /* * right now the FW does set up the HW-block, so we are not @@ -233,10 +219,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) */ err = thermal_zone_get_trip(tz, 0, &trip); if (err < 0) { - dev_err(&pdev->dev, - "Not able to read trip_temp: %d\n", - err); - goto err_tz; + dev_err(dev, "Not able to read trip_temp: %d\n", err); + return err; } /* set bandgap reference voltage and enable voltage regulator */ @@ -269,17 +253,11 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) */ err = thermal_add_hwmon_sysfs(tz); if (err) - goto err_tz; + return err; bcm2835_thermal_debugfs(pdev); return 0; -err_tz: - devm_thermal_of_zone_unregister(&pdev->dev, tz); -err_clk: - clk_disable_unprepare(data->clk); - - return err; } static void bcm2835_thermal_remove(struct platform_device *pdev) @@ -287,7 +265,6 @@ static void bcm2835_thermal_remove(struct platform_device *pdev) struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); debugfs_remove_recursive(data->debugfsdir); - clk_disable_unprepare(data->clk); } static struct platform_driver bcm2835_thermal_driver = { diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index acb52c9ee10f..4a2e869b9538 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -57,24 +57,16 @@ static void bang_bang_control(struct thermal_zone_device *tz, if (instance->trip != trip) continue; - if (instance->target == THERMAL_NO_TARGET) - instance->target = 0; - - if (instance->target != 0 && instance->target != 1) { + if (instance->target != 0 && instance->target != 1 && + instance->target != THERMAL_NO_TARGET) pr_debug("Unexpected state %ld of thermal instance %s in bang-bang\n", instance->target, instance->name); - instance->target = 1; - } - /* * Enable the fan when the trip is crossed on the way up and * disable it when the trip is crossed on the way down. */ - if (instance->target == 0 && crossed_up) - instance->target = 1; - else if (instance->target == 1 && !crossed_up) - instance->target = 0; + instance->target = crossed_up; dev_dbg(&instance->cdev->device, "target=%ld\n", instance->target); diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 45f04a25255a..1b2345a697c5 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -759,6 +759,9 @@ static void power_allocator_manage(struct thermal_zone_device *tz) return; } + if (!params->trip_max) + return; + allocate_power(tz, params->trip_max->temperature); params->update_cdevs = true; } diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index dd751ae63608..0eb657db62e4 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -388,15 +388,10 @@ static int hi6220_thermal_probe(struct hisi_thermal_data *data) { struct platform_device *pdev = data->pdev; struct device *dev = &pdev->dev; - int ret; data->clk = devm_clk_get(dev, "thermal_clk"); - if (IS_ERR(data->clk)) { - ret = PTR_ERR(data->clk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get thermal clk: %d\n", ret); - return ret; - } + if (IS_ERR(data->clk)) + return dev_err_probe(dev, PTR_ERR(data->clk), "failed to get thermal clk\n"); data->sensor = devm_kzalloc(dev, sizeof(*data->sensor), GFP_KERNEL); if (!data->sensor) diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 83eaae5ca3b8..091fb30dedf3 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -331,25 +331,16 @@ static int imx_change_mode(struct thermal_zone_device *tz, return 0; } -static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip_id, - int temp) +static int imx_set_trip_temp(struct thermal_zone_device *tz, + const struct thermal_trip *trip, int temp) { struct imx_thermal_data *data = thermal_zone_device_priv(tz); - struct thermal_trip trip; int ret; ret = pm_runtime_resume_and_get(data->dev); if (ret < 0) return ret; - ret = __thermal_zone_get_trip(tz, trip_id, &trip); - if (ret) - return ret; - - /* do not allow changing critical threshold */ - if (trip.type == THERMAL_TRIP_CRITICAL) - return -EPERM; - /* do not allow passive to be set higher than critical */ if (temp < 0 || temp > trips[IMX_TRIP_CRITICAL].temperature) return -EINVAL; @@ -601,28 +592,29 @@ static inline void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data static int imx_thermal_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct imx_thermal_data *data; struct regmap *map; int measure_freq; int ret; - data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - data->dev = &pdev->dev; + data->dev = dev; - map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon"); + map = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,tempmon"); if (IS_ERR(map)) { ret = PTR_ERR(map); - dev_err(&pdev->dev, "failed to get tempmon regmap: %d\n", ret); + dev_err(dev, "failed to get tempmon regmap: %d\n", ret); return ret; } data->tempmon = map; - data->socdata = of_device_get_match_data(&pdev->dev); + data->socdata = of_device_get_match_data(dev); if (!data->socdata) { - dev_err(&pdev->dev, "no device match found\n"); + dev_err(dev, "no device match found\n"); return -ENODEV; } @@ -645,15 +637,15 @@ static int imx_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); - if (of_property_present(pdev->dev.of_node, "nvmem-cells")) { + if (of_property_present(dev->of_node, "nvmem-cells")) { ret = imx_init_from_nvmem_cells(pdev); if (ret) - return dev_err_probe(&pdev->dev, ret, + return dev_err_probe(dev, ret, "failed to init from nvmem\n"); } else { ret = imx_init_from_tempmon_data(pdev); if (ret) { - dev_err(&pdev->dev, "failed to init from fsl,tempmon-data\n"); + dev_err(dev, "failed to init from fsl,tempmon-data\n"); return ret; } } @@ -673,15 +665,12 @@ static int imx_thermal_probe(struct platform_device *pdev) ret = imx_thermal_register_legacy_cooling(data); if (ret) - return dev_err_probe(&pdev->dev, ret, + return dev_err_probe(dev, ret, "failed to register cpufreq cooling device\n"); - data->thermal_clk = devm_clk_get(&pdev->dev, NULL); + data->thermal_clk = devm_clk_get(dev, NULL); if (IS_ERR(data->thermal_clk)) { - ret = PTR_ERR(data->thermal_clk); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "failed to get thermal clk: %d\n", ret); + ret = dev_err_probe(dev, PTR_ERR(data->thermal_clk), "failed to get thermal clk\n"); goto legacy_cleanup; } @@ -694,7 +683,7 @@ static int imx_thermal_probe(struct platform_device *pdev) */ ret = clk_prepare_enable(data->thermal_clk); if (ret) { - dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); + dev_err(dev, "failed to enable thermal clk: %d\n", ret); goto legacy_cleanup; } @@ -707,12 +696,12 @@ static int imx_thermal_probe(struct platform_device *pdev) IMX_POLLING_DELAY); if (IS_ERR(data->tz)) { ret = PTR_ERR(data->tz); - dev_err(&pdev->dev, - "failed to register thermal zone device %d\n", ret); + dev_err(dev, "failed to register thermal zone device %d\n", + ret); goto clk_disable; } - dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC" + dev_info(dev, "%s CPU temperature grade - max:%dC" " critical:%dC passive:%dC\n", data->temp_grade, data->temp_max / 1000, trips[IMX_TRIP_CRITICAL].temperature / 1000, trips[IMX_TRIP_PASSIVE].temperature / 1000); @@ -736,7 +725,7 @@ static int imx_thermal_probe(struct platform_device *pdev) usleep_range(20, 50); /* the core was configured and enabled just before */ - pm_runtime_set_active(&pdev->dev); + pm_runtime_set_active(dev); pm_runtime_enable(data->dev); ret = pm_runtime_resume_and_get(data->dev); @@ -748,11 +737,11 @@ static int imx_thermal_probe(struct platform_device *pdev) if (ret) goto thermal_zone_unregister; - ret = devm_request_threaded_irq(&pdev->dev, data->irq, + ret = devm_request_threaded_irq(dev, data->irq, imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread, 0, "imx_thermal", data); if (ret < 0) { - dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); + dev_err(dev, "failed to request alarm irq: %d\n", ret); goto thermal_zone_unregister; } diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index fa96972266e4..b0c0f0ffdcb0 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -571,7 +571,7 @@ static int int3400_thermal_probe(struct platform_device *pdev) if (!adev) return -ENODEV; - priv = kzalloc(sizeof(struct int3400_thermal_priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index 86901f9f54d8..c094a422ded3 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -25,17 +25,6 @@ struct int3403_sensor { struct int34x_thermal_zone *int340x_zone; }; -struct int3403_performance_state { - u64 performance; - u64 power; - u64 latency; - u64 linear; - u64 control; - u64 raw_performace; - char *raw_unit; - int reserved; -}; - struct int3403_cdev { struct thermal_cooling_device *cdev; unsigned long max_state; diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c index 400fde7cb3b1..31ed338eb83c 100644 --- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c @@ -39,13 +39,14 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone, } static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone, - int trip, int temp) + const struct thermal_trip *trip, int temp) { struct int34x_thermal_zone *d = thermal_zone_device_priv(zone); - char name[] = {'P', 'A', 'T', '0' + trip, '\0'}; + unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv); + char name[] = {'P', 'A', 'T', '0' + trip_index, '\0'}; acpi_status status; - if (trip > 9) + if (trip_index > 9) return -EINVAL; status = acpi_execute_simple_method(d->adev->handle, name, @@ -62,16 +63,6 @@ static void int340x_thermal_critical(struct thermal_zone_device *zone) thermal_zone_device_type(zone)); } -static inline void *int_to_trip_priv(int i) -{ - return (void *)(long)i; -} - -static inline int trip_priv_to_int(const struct thermal_trip *trip) -{ - return (long)trip->priv; -} - static int int340x_thermal_read_trips(struct acpi_device *zone_adev, struct thermal_trip *zone_trips, int trip_cnt) @@ -106,7 +97,7 @@ static int int340x_thermal_read_trips(struct acpi_device *zone_adev, break; zone_trips[trip_cnt].type = THERMAL_TRIP_ACTIVE; - zone_trips[trip_cnt].priv = int_to_trip_priv(i); + zone_trips[trip_cnt].priv = THERMAL_INT_TO_TRIP_PRIV(i); trip_cnt++; } @@ -154,6 +145,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, zone_trips[i].type = THERMAL_TRIP_PASSIVE; zone_trips[i].temperature = THERMAL_TEMP_INVALID; zone_trips[i].flags |= THERMAL_TRIP_FLAG_RW_TEMP; + zone_trips[i].priv = THERMAL_INT_TO_TRIP_PRIV(i); } trip_cnt = int340x_thermal_read_trips(adev, zone_trips, trip_cnt); @@ -224,7 +216,7 @@ static int int340x_update_one_trip(struct thermal_trip *trip, void *arg) break; case THERMAL_TRIP_ACTIVE: err = thermal_acpi_active_trip_temp(zone_adev, - trip_priv_to_int(trip), + THERMAL_TRIP_PRIV_TO_INT(trip->priv), &temp); break; default: diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index d75fae7b7ed2..7c46dd6bee73 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -440,7 +440,8 @@ void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device * proc_thermal_rapl_remove(); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_FIVR || - proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS) + proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS || + proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR) proc_thermal_rfim_remove(pdev); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_POWER_FLOOR) diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h index 674f3c85dfbc..d5eca6db2c00 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h @@ -65,6 +65,7 @@ struct rapl_mmio_regs { #define PROC_THERMAL_FEATURE_DLVR 0x10 #define PROC_THERMAL_FEATURE_WT_HINT 0x20 #define PROC_THERMAL_FEATURE_POWER_FLOOR 0x40 +#define PROC_THERMAL_FEATURE_MSI_SUPPORT 0x80 #if IS_ENABLED(CONFIG_PROC_THERMAL_MMIO_RAPL) int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index 4a1bfebb1b8e..114136893a59 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -63,6 +63,18 @@ static struct proc_thermal_mmio_info proc_thermal_mmio_info[] = { { PROC_THERMAL_MMIO_INT_STATUS_1, 0x7200, 8, 0x01 }, }; +/* List of supported MSI IDs (sources) */ +enum proc_thermal_msi_ids { + PKG_THERMAL, + DDR_THERMAL, + THERM_POWER_FLOOR, + WORKLOAD_CHANGE, + MSI_THERMAL_MAX +}; + +/* Stores IRQ associated with a MSI ID */ +static int proc_thermal_msi_map[MSI_THERMAL_MAX]; + #define B0D4_THERMAL_NOTIFY_DELAY 1000 static int notify_delay_ms = B0D4_THERMAL_NOTIFY_DELAY; @@ -146,22 +158,41 @@ static irqreturn_t proc_thermal_irq_thread_handler(int irq, void *devid) return IRQ_HANDLED; } +static int proc_thermal_match_msi_irq(int irq) +{ + int i; + + if (!use_msi) + goto msi_fail; + + for (i = 0; i < MSI_THERMAL_MAX; i++) { + if (proc_thermal_msi_map[i] == irq) + return i; + } + +msi_fail: + return -EOPNOTSUPP; +} + static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) { struct proc_thermal_pci *pci_info = devid; struct proc_thermal_device *proc_priv; - int ret = IRQ_NONE; + int ret = IRQ_NONE, msi_id; u32 status; proc_priv = pci_info->proc_priv; + msi_id = proc_thermal_match_msi_irq(irq); + if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_WT_HINT) { - if (proc_thermal_check_wt_intr(pci_info->proc_priv)) + if (msi_id == WORKLOAD_CHANGE || proc_thermal_check_wt_intr(pci_info->proc_priv)) ret = IRQ_WAKE_THREAD; } if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_POWER_FLOOR) { - if (proc_thermal_check_power_floor_intr(pci_info->proc_priv)) + if (msi_id == THERM_POWER_FLOOR || + proc_thermal_check_power_floor_intr(pci_info->proc_priv)) ret = IRQ_WAKE_THREAD; } @@ -171,7 +202,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) * interrupt before scheduling work function for thermal threshold. */ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_INT_STATUS_0, &status); - if (status) { + if (msi_id == PKG_THERMAL || status) { /* Disable enable interrupt flag */ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); pkg_thermal_schedule_work(&pci_info->work); @@ -194,7 +225,8 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) return 0; } -static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) +static int sys_set_trip_temp(struct thermal_zone_device *tzd, + const struct thermal_trip *trip, int temp) { struct proc_thermal_pci *pci_info = thermal_zone_device_priv(tzd); int tjmax, _temp; @@ -244,6 +276,45 @@ static struct thermal_zone_params tzone_params = { .no_hwmon = true, }; +static bool msi_irq; + +static int proc_thermal_setup_msi(struct pci_dev *pdev, struct proc_thermal_pci *pci_info) +{ + int ret, i, irq; + + ret = pci_alloc_irq_vectors(pdev, 1, MSI_THERMAL_MAX, PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to allocate vectors!\n"); + return ret; + } + + dev_info(&pdev->dev, "msi enabled:%d msix enabled:%d\n", pdev->msi_enabled, + pdev->msix_enabled); + + for (i = 0; i < MSI_THERMAL_MAX; i++) { + irq = pci_irq_vector(pdev, i); + + ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler, + proc_thermal_irq_thread_handler, + 0, KBUILD_MODNAME, pci_info); + if (ret) { + dev_err(&pdev->dev, "Request IRQ %d failed\n", irq); + goto err_free_msi_vectors; + } + + proc_thermal_msi_map[i] = irq; + } + + msi_irq = true; + + return 0; + +err_free_msi_vectors: + pci_free_irq_vectors(pdev); + + return ret; +} + static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct proc_thermal_device *proc_priv; @@ -253,7 +324,6 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_ .flags = THERMAL_TRIP_FLAG_RW_TEMP, }; int irq_flag = 0, irq, ret; - bool msi_irq = false; proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL); if (!proc_priv) @@ -299,27 +369,24 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_ goto err_del_legacy; } - if (use_msi && (pdev->msi_enabled || pdev->msix_enabled)) { - /* request and enable interrupt */ - ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to allocate vectors!\n"); - goto err_ret_tzone; - } + if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_MSI_SUPPORT) + use_msi = true; - irq = pci_irq_vector(pdev, 0); - msi_irq = true; + if (use_msi) { + ret = proc_thermal_setup_msi(pdev, pci_info); + if (ret) + goto err_ret_tzone; } else { irq_flag = IRQF_SHARED; irq = pdev->irq; - } - ret = devm_request_threaded_irq(&pdev->dev, irq, - proc_thermal_irq_handler, proc_thermal_irq_thread_handler, - irq_flag, KBUILD_MODNAME, pci_info); - if (ret) { - dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq); - goto err_free_vectors; + ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler, + proc_thermal_irq_thread_handler, irq_flag, + KBUILD_MODNAME, pci_info); + if (ret) { + dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq); + goto err_ret_tzone; + } } ret = thermal_zone_device_enable(pci_info->tzone); @@ -352,9 +419,6 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev) proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0); proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); - devm_free_irq(&pdev->dev, pdev->irq, pci_info); - pci_free_irq_vectors(pdev); - thermal_zone_device_unregister(pci_info->tzone); proc_thermal_mmio_remove(pdev, pci_info->proc_priv); if (!pci_info->no_legacy) @@ -408,7 +472,9 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend, static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) }, - { PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_RAPL) }, + { PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT | + PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR | + PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) }, { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) }, diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c index e56db75a94fb..0e2dc1426282 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c @@ -19,6 +19,12 @@ struct mmio_reg { u16 shift; }; +struct mapping_table { + const char *attr_name; + const u32 value; + const char *mapped_str; +}; + /* These will represent sysfs attribute names */ static const char * const fivr_strings[] = { "vco_ref_code_lo", @@ -62,6 +68,78 @@ static const struct mmio_reg dlvr_mmio_regs[] = { { 1, 0x15A10, 1, 0x1, 16}, /* dlvr_pll_busy */ }; +static const struct mmio_reg lnl_dlvr_mmio_regs[] = { + { 0, 0x5A08, 5, 0x1F, 0}, /* dlvr_spread_spectrum_pct */ + { 0, 0x5A08, 1, 0x1, 5}, /* dlvr_control_mode */ + { 0, 0x5A08, 1, 0x1, 6}, /* dlvr_control_lock */ + { 0, 0x5A08, 1, 0x1, 7}, /* dlvr_rfim_enable */ + { 0, 0x5A08, 2, 0x3, 8}, /* dlvr_freq_select */ + { 1, 0x5A10, 2, 0x3, 30}, /* dlvr_hardware_rev */ + { 1, 0x5A10, 2, 0x3, 0}, /* dlvr_freq_mhz */ + { 1, 0x5A10, 1, 0x1, 23}, /* dlvr_pll_busy */ +}; + +static const struct mapping_table lnl_dlvr_mapping[] = { + {"dlvr_freq_select", 0, "2227.2"}, + {"dlvr_freq_select", 1, "2140"}, + {"dlvr_freq_mhz", 0, "2227.2"}, + {"dlvr_freq_mhz", 1, "2140"}, + {NULL, 0, NULL}, +}; + +static int match_mapping_table(const struct mapping_table *table, const char *attr_name, + bool match_int_value, const u32 value, const char *value_str, + char **result_str, u32 *result_int) +{ + bool attr_matched = false; + int i = 0; + + if (!table) + return -EOPNOTSUPP; + + while (table[i].attr_name) { + if (strncmp(table[i].attr_name, attr_name, strlen(attr_name))) + goto match_next; + + attr_matched = true; + + if (match_int_value) { + if (table[i].value != value) + goto match_next; + + *result_str = (char *)table[i].mapped_str; + return 0; + } + + if (strncmp(table[i].mapped_str, value_str, strlen(table[i].mapped_str))) + goto match_next; + + *result_int = table[i].value; + + return 0; +match_next: + i++; + } + + /* If attribute name is matched, then the user space value is invalid */ + if (attr_matched) + return -EINVAL; + + return -EOPNOTSUPP; +} + +static int get_mapped_string(const struct mapping_table *table, const char *attr_name, + u32 value, char **result) +{ + return match_mapping_table(table, attr_name, true, value, NULL, result, NULL); +} + +static int get_mapped_value(const struct mapping_table *table, const char *attr_name, + const char *value, unsigned int *result) +{ + return match_mapping_table(table, attr_name, false, 0, value, NULL, result); +} + /* These will represent sysfs attribute names */ static const char * const dvfs_strings[] = { "rfi_restriction_run_busy", @@ -93,12 +171,14 @@ static ssize_t suffix##_show(struct device *dev,\ struct device_attribute *attr,\ char *buf)\ {\ + const struct mapping_table *mapping = NULL;\ struct proc_thermal_device *proc_priv;\ struct pci_dev *pdev = to_pci_dev(dev);\ const struct mmio_reg *mmio_regs;\ const char **match_strs;\ + int ret, err;\ u32 reg_val;\ - int ret;\ + char *str;\ \ proc_priv = pci_get_drvdata(pdev);\ if (table == 1) {\ @@ -106,7 +186,12 @@ static ssize_t suffix##_show(struct device *dev,\ mmio_regs = adl_dvfs_mmio_regs;\ } else if (table == 2) { \ match_strs = (const char **)dlvr_strings;\ - mmio_regs = dlvr_mmio_regs;\ + if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\ + mmio_regs = lnl_dlvr_mmio_regs;\ + mapping = lnl_dlvr_mapping;\ + } else {\ + mmio_regs = dlvr_mmio_regs;\ + } \ } else {\ match_strs = (const char **)fivr_strings;\ mmio_regs = tgl_fivr_mmio_regs;\ @@ -116,7 +201,12 @@ static ssize_t suffix##_show(struct device *dev,\ return ret;\ reg_val = readl((void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\ ret = (reg_val >> mmio_regs[ret].shift) & mmio_regs[ret].mask;\ - return sprintf(buf, "%u\n", ret);\ + err = get_mapped_string(mapping, attr->attr.name, ret, &str);\ + if (!err)\ + return sprintf(buf, "%s\n", str);\ + if (err == -EOPNOTSUPP)\ + return sprintf(buf, "%u\n", ret);\ + return err;\ } #define RFIM_STORE(suffix, table)\ @@ -124,6 +214,7 @@ static ssize_t suffix##_store(struct device *dev,\ struct device_attribute *attr,\ const char *buf, size_t count)\ {\ + const struct mapping_table *mapping = NULL;\ struct proc_thermal_device *proc_priv;\ struct pci_dev *pdev = to_pci_dev(dev);\ unsigned int input;\ @@ -139,7 +230,12 @@ static ssize_t suffix##_store(struct device *dev,\ mmio_regs = adl_dvfs_mmio_regs;\ } else if (table == 2) { \ match_strs = (const char **)dlvr_strings;\ - mmio_regs = dlvr_mmio_regs;\ + if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\ + mmio_regs = lnl_dlvr_mmio_regs;\ + mapping = lnl_dlvr_mapping;\ + } else {\ + mmio_regs = dlvr_mmio_regs;\ + } \ } else {\ match_strs = (const char **)fivr_strings;\ mmio_regs = tgl_fivr_mmio_regs;\ @@ -150,9 +246,14 @@ static ssize_t suffix##_store(struct device *dev,\ return ret;\ if (mmio_regs[ret].read_only)\ return -EPERM;\ - err = kstrtouint(buf, 10, &input);\ - if (err)\ + err = get_mapped_value(mapping, attr->attr.name, buf, &input);\ + if (err == -EINVAL)\ return err;\ + if (err == -EOPNOTSUPP) {\ + err = kstrtouint(buf, 10, &input);\ + if (err)\ + return err;\ + } \ mask = GENMASK(mmio_regs[ret].shift + mmio_regs[ret].bits - 1, mmio_regs[ret].shift);\ reg_val = readl((void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\ reg_val &= ~mask;\ diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c index a180a98bb9f1..5b18a46a10b0 100644 --- a/drivers/thermal/intel/intel_hfi.c +++ b/drivers/thermal/intel/intel_hfi.c @@ -401,10 +401,10 @@ static void hfi_disable(void) * intel_hfi_online() - Enable HFI on @cpu * @cpu: CPU in which the HFI will be enabled * - * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package - * level. The first CPU in the die/package to come online does the full HFI + * Enable the HFI to be used in @cpu. The HFI is enabled at the package + * level. The first CPU in the package to come online does the full HFI * initialization. Subsequent CPUs will just link themselves to the HFI - * instance of their die/package. + * instance of their package. * * This function is called before enabling the thermal vector in the local APIC * in order to ensure that @cpu has an associated HFI instance when it receives @@ -414,31 +414,31 @@ void intel_hfi_online(unsigned int cpu) { struct hfi_instance *hfi_instance; struct hfi_cpu_info *info; - u16 die_id; + u16 pkg_id; /* Nothing to do if hfi_instances are missing. */ if (!hfi_instances) return; /* - * Link @cpu to the HFI instance of its package/die. It does not + * Link @cpu to the HFI instance of its package. It does not * matter whether the instance has been initialized. */ info = &per_cpu(hfi_cpu_info, cpu); - die_id = topology_logical_die_id(cpu); + pkg_id = topology_logical_package_id(cpu); hfi_instance = info->hfi_instance; if (!hfi_instance) { - if (die_id >= max_hfi_instances) + if (pkg_id >= max_hfi_instances) return; - hfi_instance = &hfi_instances[die_id]; + hfi_instance = &hfi_instances[pkg_id]; info->hfi_instance = hfi_instance; } init_hfi_cpu_index(info); /* - * Now check if the HFI instance of the package/die of @cpu has been + * Now check if the HFI instance of the package of @cpu has been * initialized (by checking its header). In such case, all we have to * do is to add @cpu to this instance's cpumask and enable the instance * if needed. @@ -504,7 +504,7 @@ free_hw_table: * * On some processors, hardware remembers previous programming settings even * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the - * die/package of @cpu are offline. See note in intel_hfi_online(). + * package of @cpu are offline. See note in intel_hfi_online(). */ void intel_hfi_offline(unsigned int cpu) { @@ -674,9 +674,13 @@ void __init intel_hfi_init(void) if (hfi_parse_features()) return; - /* There is one HFI instance per die/package. */ - max_hfi_instances = topology_max_packages() * - topology_max_dies_per_package(); + /* + * Note: HFI resources are managed at the physical package scope. + * There could be platforms that enumerate packages as Linux dies. + * Special handling would be needed if this happens on an HFI-capable + * platform. + */ + max_hfi_instances = topology_max_packages(); /* * This allocation may fail. CPU hotplug callbacks must check diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c index f5be2c389351..fc326985796c 100644 --- a/drivers/thermal/intel/intel_pch_thermal.c +++ b/drivers/thermal/intel/intel_pch_thermal.c @@ -298,6 +298,11 @@ static int intel_pch_thermal_suspend_noirq(struct device *device) /* Get the PCH current temperature value */ pch_cur_temp = GET_PCH_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP)); + if (pch_cur_temp >= pch_thr_temp) + dev_warn(&ptd->pdev->dev, + "CPU-PCH current temp [%dC] higher than the threshold temp [%dC], S0ix might fail. Start cooling...\n", + pch_cur_temp, pch_thr_temp); + /* * If current PCH temperature is higher than configured PCH threshold * value, run some delay loop with sleep to let the current temperature diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index ec6ad26027bc..47296a14db3c 100644 --- a/drivers/thermal/intel/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c @@ -195,7 +195,7 @@ static int get_trip_temp(int trip) } static int update_trip_temp(struct soc_sensor_entry *aux_entry, - int trip, int temp) + int trip_index, int temp) { u32 out; u32 temp_out; @@ -230,9 +230,9 @@ static int update_trip_temp(struct soc_sensor_entry *aux_entry, */ temp_out = temp + QRK_DTS_TEMP_BASE; out = (store_ptps & ~(QRK_DTS_MASK_TP_THRES << - (trip * QRK_DTS_SHIFT_TP))); + (trip_index * QRK_DTS_SHIFT_TP))); out |= (temp_out & QRK_DTS_MASK_TP_THRES) << - (trip * QRK_DTS_SHIFT_TP); + (trip_index * QRK_DTS_SHIFT_TP); ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, QRK_DTS_REG_OFFSET_PTPS, out); @@ -242,10 +242,26 @@ failed: return ret; } -static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, - int temp) +static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, + const struct thermal_trip *trip, + int temp) { - return update_trip_temp(thermal_zone_device_priv(tzd), trip, temp); + unsigned int trip_index; + + switch (trip->type) { + case THERMAL_TRIP_HOT: + trip_index = QRK_DTS_ID_TP_HOT; + break; + + case THERMAL_TRIP_CRITICAL: + trip_index = QRK_DTS_ID_TP_CRITICAL; + break; + + default: + return -EINVAL; + } + + return update_trip_temp(thermal_zone_device_priv(tzd), trip_index, temp); } static int sys_get_curr_temp(struct thermal_zone_device *tzd, diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c index 7adf942665d4..43a29551ba17 100644 --- a/drivers/thermal/intel/intel_soc_dts_iosf.c +++ b/drivers/thermal/intel/intel_soc_dts_iosf.c @@ -129,18 +129,20 @@ err_restore_ptps: return status; } -static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, +static int sys_set_trip_temp(struct thermal_zone_device *tzd, + const struct thermal_trip *trip, int temp) { struct intel_soc_dts_sensor_entry *dts = thermal_zone_device_priv(tzd); struct intel_soc_dts_sensors *sensors = dts->sensors; + unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv); int status; if (temp > sensors->tj_max) return -EINVAL; mutex_lock(&sensors->dts_update_lock); - status = update_trip_temp(sensors, trip, temp); + status = update_trip_temp(sensors, trip_index, temp); mutex_unlock(&sensors->dts_update_lock); return status; @@ -293,11 +295,12 @@ static void dts_trips_reset(struct intel_soc_dts_sensors *sensors, int dts_index } static void set_trip(struct thermal_trip *trip, enum thermal_trip_type type, - u8 flags, int temp) + u8 flags, int temp, unsigned int index) { trip->type = type; trip->flags = flags; trip->temperature = temp; + trip->priv = THERMAL_INT_TO_TRIP_PRIV(index); } struct intel_soc_dts_sensors * @@ -332,7 +335,7 @@ intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type, sensors->soc_dts[i].sensors = sensors; set_trip(&trips[i][0], THERMAL_TRIP_PASSIVE, - THERMAL_TRIP_FLAG_RW_TEMP, 0); + THERMAL_TRIP_FLAG_RW_TEMP, 0, 0); ret = update_trip_temp(sensors, 0, 0); if (ret) @@ -340,10 +343,10 @@ intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type, if (critical_trip) { temp = sensors->tj_max - crit_offset; - set_trip(&trips[i][1], THERMAL_TRIP_CRITICAL, 0, temp); + set_trip(&trips[i][1], THERMAL_TRIP_CRITICAL, 0, temp, 1); } else { set_trip(&trips[i][1], THERMAL_TRIP_PASSIVE, - THERMAL_TRIP_FLAG_RW_TEMP, 0); + THERMAL_TRIP_FLAG_RW_TEMP, 0, 1); temp = 0; } diff --git a/drivers/thermal/intel/intel_soc_dts_thermal.c b/drivers/thermal/intel/intel_soc_dts_thermal.c index 9c825c6e1f38..718c6326eaf4 100644 --- a/drivers/thermal/intel/intel_soc_dts_thermal.c +++ b/drivers/thermal/intel/intel_soc_dts_thermal.c @@ -36,7 +36,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data) } static const struct x86_cpu_id soc_thermal_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ), {} }; MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids); diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c index 5e8b7f34b395..c86654f28aa5 100644 --- a/drivers/thermal/intel/intel_tcc.c +++ b/drivers/thermal/intel/intel_tcc.c @@ -6,9 +6,171 @@ #include <linux/errno.h> #include <linux/intel_tcc.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> #include <asm/msr.h> /** + * struct temp_masks - Bitmasks for temperature readings + * @tcc_offset: TCC offset in MSR_TEMPERATURE_TARGET + * @digital_readout: Digital readout in MSR_IA32_THERM_STATUS + * @pkg_digital_readout: Digital readout in MSR_IA32_PACKAGE_THERM_STATUS + * + * Bitmasks to extract the fields of the MSR_TEMPERATURE and IA32_[PACKAGE]_ + * THERM_STATUS registers for different processor models. + * + * The bitmask of TjMax is not included in this structure. It is always 0xff. + */ +struct temp_masks { + u32 tcc_offset; + u32 digital_readout; + u32 pkg_digital_readout; +}; + +#define TCC_MODEL_TEMP_MASKS(model, _tcc_offset, _digital_readout, \ + _pkg_digital_readout) \ + static const struct temp_masks temp_##model __initconst = { \ + .tcc_offset = _tcc_offset, \ + .digital_readout = _digital_readout, \ + .pkg_digital_readout = _pkg_digital_readout \ + } + +TCC_MODEL_TEMP_MASKS(nehalem, 0, 0x7f, 0x7f); +TCC_MODEL_TEMP_MASKS(haswell_x, 0xf, 0x7f, 0x7f); +TCC_MODEL_TEMP_MASKS(broadwell, 0x3f, 0x7f, 0x7f); +TCC_MODEL_TEMP_MASKS(goldmont, 0x7f, 0x7f, 0x7f); +TCC_MODEL_TEMP_MASKS(tigerlake, 0x3f, 0xff, 0xff); +TCC_MODEL_TEMP_MASKS(sapphirerapids, 0x3f, 0x7f, 0xff); + +/* Use these masks for processors not included in @tcc_cpu_ids. */ +static struct temp_masks intel_tcc_temp_masks __ro_after_init = { + .tcc_offset = 0x7f, + .digital_readout = 0xff, + .pkg_digital_readout = 0xff, +}; + +static const struct x86_cpu_id intel_tcc_cpu_ids[] __initconst = { + X86_MATCH_VFM(INTEL_CORE_YONAH, &temp_nehalem), + X86_MATCH_VFM(INTEL_CORE2_MEROM, &temp_nehalem), + X86_MATCH_VFM(INTEL_CORE2_MEROM_L, &temp_nehalem), + X86_MATCH_VFM(INTEL_CORE2_PENRYN, &temp_nehalem), + X86_MATCH_VFM(INTEL_CORE2_DUNNINGTON, &temp_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM, &temp_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM_G, &temp_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM_EP, &temp_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM_EX, &temp_nehalem), + X86_MATCH_VFM(INTEL_WESTMERE, &temp_nehalem), + X86_MATCH_VFM(INTEL_WESTMERE_EP, &temp_nehalem), + X86_MATCH_VFM(INTEL_WESTMERE_EX, &temp_nehalem), + X86_MATCH_VFM(INTEL_SANDYBRIDGE, &temp_nehalem), + X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &temp_nehalem), + X86_MATCH_VFM(INTEL_IVYBRIDGE, &temp_nehalem), + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &temp_haswell_x), + X86_MATCH_VFM(INTEL_HASWELL, &temp_nehalem), + X86_MATCH_VFM(INTEL_HASWELL_X, &temp_haswell_x), + X86_MATCH_VFM(INTEL_HASWELL_L, &temp_nehalem), + X86_MATCH_VFM(INTEL_HASWELL_G, &temp_nehalem), + X86_MATCH_VFM(INTEL_BROADWELL, &temp_broadwell), + X86_MATCH_VFM(INTEL_BROADWELL_G, &temp_broadwell), + X86_MATCH_VFM(INTEL_BROADWELL_X, &temp_haswell_x), + X86_MATCH_VFM(INTEL_BROADWELL_D, &temp_haswell_x), + X86_MATCH_VFM(INTEL_SKYLAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_SKYLAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_SKYLAKE_X, &temp_haswell_x), + X86_MATCH_VFM(INTEL_KABYLAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_KABYLAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_COMETLAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_COMETLAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_CANNONLAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE_X, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE_D, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &temp_broadwell), + X86_MATCH_VFM(INTEL_ROCKETLAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, &temp_tigerlake), + X86_MATCH_VFM(INTEL_TIGERLAKE, &temp_tigerlake), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &temp_sapphirerapids), + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &temp_sapphirerapids), + X86_MATCH_VFM(INTEL_LAKEFIELD, &temp_broadwell), + X86_MATCH_VFM(INTEL_ALDERLAKE, &temp_tigerlake), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, &temp_tigerlake), + X86_MATCH_VFM(INTEL_RAPTORLAKE, &temp_tigerlake), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &temp_tigerlake), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &temp_tigerlake), + X86_MATCH_VFM(INTEL_ATOM_BONNELL, &temp_nehalem), + X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &temp_nehalem), + X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &temp_nehalem), + X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, &temp_nehalem), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &temp_goldmont), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &temp_goldmont), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &temp_goldmont), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_TREMONT, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &temp_tigerlake), + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &temp_broadwell), + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &temp_broadwell), + {} +}; + +static int __init intel_tcc_init(void) +{ + const struct x86_cpu_id *id; + + id = x86_match_cpu(intel_tcc_cpu_ids); + if (id) + memcpy(&intel_tcc_temp_masks, (const void *)id->driver_data, + sizeof(intel_tcc_temp_masks)); + + return 0; +} +/* + * Use subsys_initcall to ensure temperature bitmasks are initialized before + * the drivers that use this library. + */ +subsys_initcall(intel_tcc_init); + +/** + * intel_tcc_get_offset_mask() - Returns the bitmask to read TCC offset + * + * Get the model-specific bitmask to extract TCC_OFFSET from the MSR + * TEMPERATURE_TARGET register. If the mask is 0, it means the processor does + * not support TCC offset. + * + * Return: The model-specific bitmask for TCC offset. + */ +u32 intel_tcc_get_offset_mask(void) +{ + return intel_tcc_temp_masks.tcc_offset; +} +EXPORT_SYMBOL_NS(intel_tcc_get_offset_mask, INTEL_TCC); + +/** + * get_temp_mask() - Returns the model-specific bitmask for temperature + * + * @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor. + * + * Get the model-specific bitmask to extract the temperature reading from the + * MSR_IA32_[PACKAGE]_THERM_STATUS register. + * + * Callers must check if the thermal status registers are supported. + * + * Return: The model-specific bitmask for temperature reading + */ +static u32 get_temp_mask(bool pkg) +{ + return pkg ? intel_tcc_temp_masks.pkg_digital_readout : + intel_tcc_temp_masks.digital_readout; +} + +/** * intel_tcc_get_tjmax() - returns the default TCC activation Temperature * @cpu: cpu that the MSR should be run on, nagative value means any cpu. * @@ -56,7 +218,7 @@ int intel_tcc_get_offset(int cpu) if (err) return err; - return (low >> 24) & 0x3f; + return (low >> 24) & intel_tcc_temp_masks.tcc_offset; } EXPORT_SYMBOL_NS_GPL(intel_tcc_get_offset, INTEL_TCC); @@ -76,7 +238,10 @@ int intel_tcc_set_offset(int cpu, int offset) u32 low, high; int err; - if (offset < 0 || offset > 0x3f) + if (!intel_tcc_temp_masks.tcc_offset) + return -ENODEV; + + if (offset < 0 || offset > intel_tcc_temp_masks.tcc_offset) return -EINVAL; if (cpu < 0) @@ -90,7 +255,7 @@ int intel_tcc_set_offset(int cpu, int offset) if (low & BIT(31)) return -EPERM; - low &= ~(0x3f << 24); + low &= ~(intel_tcc_temp_masks.tcc_offset << 24); low |= offset << 24; if (cpu < 0) @@ -113,8 +278,8 @@ EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC); */ int intel_tcc_get_temp(int cpu, int *temp, bool pkg) { - u32 low, high; u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS; + u32 low, high, mask; int tjmax, err; tjmax = intel_tcc_get_tjmax(cpu); @@ -132,7 +297,9 @@ int intel_tcc_get_temp(int cpu, int *temp, bool pkg) if (!(low & BIT(31))) return -ENODATA; - *temp = tjmax - ((low >> 16) & 0x7f); + mask = get_temp_mask(pkg); + + *temp = tjmax - ((low >> 16) & mask); return 0; } diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c index 6c392147e6d1..17110ffa80bb 100644 --- a/drivers/thermal/intel/intel_tcc_cooling.c +++ b/drivers/thermal/intel/intel_tcc_cooling.c @@ -20,7 +20,7 @@ static struct thermal_cooling_device *tcc_cdev; static int tcc_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { - *state = 0x3f; + *state = intel_tcc_get_offset_mask(); return 0; } @@ -49,21 +49,21 @@ static const struct thermal_cooling_device_ops tcc_cooling_ops = { }; static const struct x86_cpu_id tcc_ids[] __initconst = { - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL), + X86_MATCH_VFM(INTEL_SKYLAKE, NULL), + X86_MATCH_VFM(INTEL_SKYLAKE_L, NULL), + X86_MATCH_VFM(INTEL_KABYLAKE, NULL), + X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ICELAKE, NULL), + X86_MATCH_VFM(INTEL_ICELAKE_L, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_COMETLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL), {} }; diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index c0ca8e3ff2e7..65b33b56a9be 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -119,9 +119,11 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) } static int -sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) +sys_set_trip_temp(struct thermal_zone_device *tzd, + const struct thermal_trip *trip, int temp) { struct zone_device *zonedev = thermal_zone_device_priv(tzd); + unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv); u32 l, h, mask, shift, intr; int tj_max, val, ret; @@ -132,7 +134,7 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) val = (tj_max - temp)/1000; - if (trip >= MAX_NUMBER_OF_TRIPS || val < 0 || val > 0x7f) + if (trip_index >= MAX_NUMBER_OF_TRIPS || val < 0 || val > 0x7f) return -EINVAL; ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, @@ -140,7 +142,7 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) if (ret < 0) return ret; - if (trip) { + if (trip_index) { mask = THERM_MASK_THRESHOLD1; shift = THERM_SHIFT_THRESHOLD1; intr = THERM_INT_THRESHOLD1_ENABLE; @@ -296,6 +298,7 @@ static int pkg_temp_thermal_trips_init(int cpu, int tj_max, trips[i].type = THERMAL_TRIP_PASSIVE; trips[i].flags |= THERMAL_TRIP_FLAG_RW_TEMP; + trips[i].priv = THERMAL_INT_TO_TRIP_PRIV(i); pr_debug("%s: cpu=%d, trip=%d, temp=%d\n", __func__, cpu, i, trips[i].temperature); diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c index c74094a86982..9bc279ac131a 100644 --- a/drivers/thermal/k3_j72xx_bandgap.c +++ b/drivers/thermal/k3_j72xx_bandgap.c @@ -178,6 +178,7 @@ struct k3_j72xx_bandgap { void __iomem *base; void __iomem *cfg2_base; struct k3_thermal_data *ts_data[K3_VTM_MAX_NUM_TS]; + int cnt; }; /* common data structures */ @@ -338,24 +339,52 @@ static void print_look_up_table(struct device *dev, int *ref_table) dev_dbg(dev, "%d %d %d\n", i, derived_table[i], ref_table[i]); } +static void k3_j72xx_bandgap_init_hw(struct k3_j72xx_bandgap *bgp) +{ + struct k3_thermal_data *data; + int id, high_max, low_temp; + u32 val; + + for (id = 0; id < bgp->cnt; id++) { + data = bgp->ts_data[id]; + val = readl(bgp->cfg2_base + data->ctrl_offset); + val |= (K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN | + K3_VTM_TMPSENS_CTRL_SOC | + K3_VTM_TMPSENS_CTRL_CLRZ | BIT(4)); + writel(val, bgp->cfg2_base + data->ctrl_offset); + } + + /* + * Program TSHUT thresholds + * Step 1: set the thresholds to ~123C and 105C WKUP_VTM_MISC_CTRL2 + * Step 2: WKUP_VTM_TMPSENS_CTRL_j set the MAXT_OUTRG_EN bit + * This is already taken care as per of init + * Step 3: WKUP_VTM_MISC_CTRL set the ANYMAXT_OUTRG_ALERT_EN bit + */ + high_max = k3_j72xx_bandgap_temp_to_adc_code(MAX_TEMP); + low_temp = k3_j72xx_bandgap_temp_to_adc_code(COOL_DOWN_TEMP); + + writel((low_temp << 16) | high_max, bgp->cfg2_base + K3_VTM_MISC_CTRL2_OFFSET); + writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, bgp->cfg2_base + K3_VTM_MISC_CTRL_OFFSET); +} + struct k3_j72xx_bandgap_data { const bool has_errata_i2128; }; static int k3_j72xx_bandgap_probe(struct platform_device *pdev) { - int ret = 0, cnt, val, id; - int high_max, low_temp; - struct resource *res; + const struct k3_j72xx_bandgap_data *driver_data; + struct thermal_zone_device *ti_thermal; struct device *dev = &pdev->dev; + bool workaround_needed = false; struct k3_j72xx_bandgap *bgp; struct k3_thermal_data *data; - bool workaround_needed = false; - const struct k3_j72xx_bandgap_data *driver_data; - struct thermal_zone_device *ti_thermal; - int *ref_table; struct err_values err_vals; void __iomem *fuse_base; + int ret = 0, val, id; + struct resource *res; + int *ref_table; const s64 golden_factors[] = { -490019999999999936, @@ -422,10 +451,10 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) /* Get the sensor count in the VTM */ val = readl(bgp->base + K3_VTM_DEVINFO_PWR0_OFFSET); - cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK; - cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK); + bgp->cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK; + bgp->cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK); - data = devm_kcalloc(bgp->dev, cnt, sizeof(*data), GFP_KERNEL); + data = devm_kcalloc(bgp->dev, bgp->cnt, sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto err_alloc; @@ -449,8 +478,8 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) else init_table(3, ref_table, pvt_wa_factors); - /* Register the thermal sensors */ - for (id = 0; id < cnt; id++) { + /* Precompute the derived table & fill each thermal sensor struct */ + for (id = 0; id < bgp->cnt; id++) { data[id].bgp = bgp; data[id].ctrl_offset = K3_VTM_TMPSENS0_CTRL_OFFSET + id * 0x20; data[id].stat_offset = data[id].ctrl_offset + @@ -470,13 +499,13 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) else if (id == 0 && !workaround_needed) memcpy(derived_table, ref_table, TABLE_SIZE * 4); - val = readl(data[id].bgp->cfg2_base + data[id].ctrl_offset); - val |= (K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN | - K3_VTM_TMPSENS_CTRL_SOC | - K3_VTM_TMPSENS_CTRL_CLRZ | BIT(4)); - writel(val, data[id].bgp->cfg2_base + data[id].ctrl_offset); - bgp->ts_data[id] = &data[id]; + } + + k3_j72xx_bandgap_init_hw(bgp); + + /* Register the thermal sensors */ + for (id = 0; id < bgp->cnt; id++) { ti_thermal = devm_thermal_of_zone_register(bgp->dev, id, &data[id], &k3_of_thermal_ops); if (IS_ERR(ti_thermal)) { @@ -486,21 +515,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev) } } - /* - * Program TSHUT thresholds - * Step 1: set the thresholds to ~123C and 105C WKUP_VTM_MISC_CTRL2 - * Step 2: WKUP_VTM_TMPSENS_CTRL_j set the MAXT_OUTRG_EN bit - * This is already taken care as per of init - * Step 3: WKUP_VTM_MISC_CTRL set the ANYMAXT_OUTRG_ALERT_EN bit - */ - high_max = k3_j72xx_bandgap_temp_to_adc_code(MAX_TEMP); - low_temp = k3_j72xx_bandgap_temp_to_adc_code(COOL_DOWN_TEMP); - - writel((low_temp << 16) | high_max, data[0].bgp->cfg2_base + - K3_VTM_MISC_CTRL2_OFFSET); - mdelay(100); - writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, data[0].bgp->cfg2_base + - K3_VTM_MISC_CTRL_OFFSET); + platform_set_drvdata(pdev, bgp); print_look_up_table(dev, ref_table); /* @@ -527,6 +542,35 @@ static void k3_j72xx_bandgap_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } +static int k3_j72xx_bandgap_suspend(struct device *dev) +{ + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return 0; +} + +static int k3_j72xx_bandgap_resume(struct device *dev) +{ + struct k3_j72xx_bandgap *bgp = dev_get_drvdata(dev); + int ret; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + return ret; + } + + k3_j72xx_bandgap_init_hw(bgp); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(k3_j72xx_bandgap_pm_ops, + k3_j72xx_bandgap_suspend, + k3_j72xx_bandgap_resume); + static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = { .has_errata_i2128 = true, }; @@ -554,6 +598,7 @@ static struct platform_driver k3_j72xx_bandgap_sensor_driver = { .driver = { .name = "k3-j72xx-soc-thermal", .of_match_table = of_k3_j72xx_bandgap_match, + .pm = pm_sleep_ptr(&k3_j72xx_bandgap_pm_ops), }, }; diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index 819ed0110f3e..1997e91bb3be 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -128,6 +128,7 @@ struct lvts_data { int temp_factor; int temp_offset; int gt_calib_bit_offset; + unsigned int def_calibration; }; struct lvts_sensor { @@ -689,6 +690,10 @@ static int lvts_calibration_init(struct device *dev, struct lvts_ctrl *lvts_ctrl size_t calib_len) { int i; + u32 gt; + + /* A zero value for gt means that device has invalid efuse data */ + gt = (((u32 *)efuse_calibration)[0] >> lvts_ctrl->lvts_data->gt_calib_bit_offset) & 0xff; lvts_for_each_valid_sensor(i, lvts_ctrl_data) { const struct lvts_sensor_data *sensor = @@ -699,10 +704,17 @@ static int lvts_calibration_init(struct device *dev, struct lvts_ctrl *lvts_ctrl sensor->cal_offsets[2] >= calib_len) return -EINVAL; - lvts_ctrl->calibration[i] = - (efuse_calibration[sensor->cal_offsets[0]] << 0) + - (efuse_calibration[sensor->cal_offsets[1]] << 8) + - (efuse_calibration[sensor->cal_offsets[2]] << 16); + if (gt) { + lvts_ctrl->calibration[i] = + (efuse_calibration[sensor->cal_offsets[0]] << 0) + + (efuse_calibration[sensor->cal_offsets[1]] << 8) + + (efuse_calibration[sensor->cal_offsets[2]] << 16); + } else if (lvts_ctrl->lvts_data->def_calibration) { + lvts_ctrl->calibration[i] = lvts_ctrl->lvts_data->def_calibration; + } else { + dev_err(dev, "efuse contains invalid calibration data and no default given.\n"); + return -ENODATA; + } } return 0; @@ -770,14 +782,13 @@ static int lvts_golden_temp_init(struct device *dev, u8 *calib, gt = (((u32 *)calib)[0] >> lvts_data->gt_calib_bit_offset) & 0xff; /* A zero value for gt means that device has invalid efuse data */ - if (!gt) - return -ENODATA; - - if (gt < LVTS_GOLDEN_TEMP_MAX) + if (gt && gt < LVTS_GOLDEN_TEMP_MAX) golden_temp = gt; golden_temp_offset = golden_temp * 500 + lvts_data->temp_offset; + dev_info(dev, "%sgolden temp=%d\n", gt ? "" : "fake ", golden_temp); + return 0; } @@ -1440,7 +1451,7 @@ static const struct lvts_ctrl_data mt8186_lvts_data_ctrl[] = { .cal_offsets = { 29, 30, 31 } }, { .dt_id = MT8186_ADSP, .cal_offsets = { 34, 35, 28 } }, - { .dt_id = MT8186_MFG, + { .dt_id = MT8186_GPU, .cal_offsets = { 39, 32, 33 } } }, VALID_SENSOR_MAP(1, 1, 1, 0), @@ -1488,11 +1499,11 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = { }, { .lvts_sensor = { - { .dt_id = MT8188_AP_GPU1, + { .dt_id = MT8188_AP_GPU0, .cal_offsets = { 43, 44, 45 } }, - { .dt_id = MT8188_AP_GPU2, + { .dt_id = MT8188_AP_GPU1, .cal_offsets = { 46, 47, 48 } }, - { .dt_id = MT8188_AP_SOC1, + { .dt_id = MT8188_AP_ADSP, .cal_offsets = { 49, 50, 51 } }, }, VALID_SENSOR_MAP(1, 1, 1, 0), @@ -1500,9 +1511,9 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = { }, { .lvts_sensor = { - { .dt_id = MT8188_AP_SOC2, + { .dt_id = MT8188_AP_VDO, .cal_offsets = { 52, 53, 54 } }, - { .dt_id = MT8188_AP_SOC3, + { .dt_id = MT8188_AP_INFRA, .cal_offsets = { 55, 56, 57 } }, }, VALID_SENSOR_MAP(1, 1, 0, 0), @@ -1701,6 +1712,7 @@ static const struct lvts_data mt8186_lvts_data = { .temp_factor = LVTS_COEFF_A_MT7988, .temp_offset = LVTS_COEFF_B_MT7988, .gt_calib_bit_offset = 24, + .def_calibration = 19000, }; static const struct lvts_data mt8188_lvts_mcu_data = { @@ -1709,6 +1721,7 @@ static const struct lvts_data mt8188_lvts_mcu_data = { .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 20, + .def_calibration = 35000, }; static const struct lvts_data mt8188_lvts_ap_data = { @@ -1717,6 +1730,7 @@ static const struct lvts_data mt8188_lvts_ap_data = { .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 20, + .def_calibration = 35000, }; static const struct lvts_data mt8192_lvts_mcu_data = { @@ -1725,6 +1739,7 @@ static const struct lvts_data mt8192_lvts_mcu_data = { .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 24, + .def_calibration = 35000, }; static const struct lvts_data mt8192_lvts_ap_data = { @@ -1733,6 +1748,7 @@ static const struct lvts_data mt8192_lvts_ap_data = { .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 24, + .def_calibration = 35000, }; static const struct lvts_data mt8195_lvts_mcu_data = { @@ -1741,6 +1757,7 @@ static const struct lvts_data mt8195_lvts_mcu_data = { .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 24, + .def_calibration = 35000, }; static const struct lvts_data mt8195_lvts_ap_data = { @@ -1749,6 +1766,7 @@ static const struct lvts_data mt8195_lvts_ap_data = { .temp_factor = LVTS_COEFF_A_MT8195, .temp_offset = LVTS_COEFF_B_MT8195, .gt_calib_bit_offset = 24, + .def_calibration = 35000, }; static const struct of_device_id lvts_of_match[] = { diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index 756ac6842ff9..7c9f4023babc 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -829,12 +829,9 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm, channel->iio = devm_fwnode_iio_channel_get_by_name(adc_tm->dev, of_fwnode_handle(node), NULL); - if (IS_ERR(channel->iio)) { - ret = PTR_ERR(channel->iio); - if (ret != -EPROBE_DEFER) - dev_err(dev, "%s: error getting channel: %d\n", name, ret); - return ret; - } + if (IS_ERR(channel->iio)) + return dev_err_probe(dev, PTR_ERR(channel->iio), "%s: error getting channel\n", + name); ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2); if (!ret) { diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index 3cd74f6cac8f..96daad28b0c0 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -261,17 +261,13 @@ skip: return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg); } -static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp) +static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, + const struct thermal_trip *trip, int temp) { struct qpnp_tm_chip *chip = thermal_zone_device_priv(tz); - struct thermal_trip trip; int ret; - ret = __thermal_zone_get_trip(chip->tz_dev, trip_id, &trip); - if (ret) - return ret; - - if (trip.type != THERMAL_TRIP_CRITICAL) + if (trip->type != THERMAL_TRIP_CRITICAL) return 0; mutex_lock(&chip->lock); diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index e76e23026dc8..0b4421bf4785 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -1336,11 +1336,9 @@ static int tsens_probe(struct platform_device *pdev) if (priv->ops->calibrate) { ret = priv->ops->calibrate(priv); - if (ret < 0) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "%s: calibration failed\n", __func__); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "%s: calibration failed\n", + __func__); } ret = tsens_register(priv); diff --git a/drivers/thermal/renesas/Kconfig b/drivers/thermal/renesas/Kconfig new file mode 100644 index 000000000000..dcf5fc5ae08e --- /dev/null +++ b/drivers/thermal/renesas/Kconfig @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config RCAR_THERMAL + tristate "Renesas R-Car thermal driver" + depends on ARCH_RENESAS || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + help + Enable this to plug the R-Car thermal sensor driver into the Linux + thermal framework. + +config RCAR_GEN3_THERMAL + tristate "Renesas R-Car Gen3 and RZ/G2 thermal driver" + depends on ARCH_RENESAS || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + help + Enable this to plug the R-Car Gen3 or RZ/G2 thermal sensor driver into + the Linux thermal framework. + +config RZG2L_THERMAL + tristate "Renesas RZ/G2L thermal driver" + depends on ARCH_RENESAS || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + help + Enable this to plug the RZ/G2L thermal sensor driver into the Linux + thermal framework. diff --git a/drivers/thermal/renesas/Makefile b/drivers/thermal/renesas/Makefile new file mode 100644 index 000000000000..bf9cb3cb94d6 --- /dev/null +++ b/drivers/thermal/renesas/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o +obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o +obj-$(CONFIG_RZG2L_THERMAL) += rzg2l_thermal.o diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/renesas/rcar_gen3_thermal.c index 02494fa142c3..5c769871753a 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/renesas/rcar_gen3_thermal.c @@ -16,7 +16,7 @@ #include <linux/pm_runtime.h> #include <linux/thermal.h> -#include "thermal_hwmon.h" +#include "../thermal_hwmon.h" /* Register offsets */ #define REG_GEN3_IRQSTR 0x04 diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/renesas/rcar_thermal.c index 925183753fcb..1e93f60b6d74 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/renesas/rcar_thermal.c @@ -19,7 +19,7 @@ #include <linux/spinlock.h> #include <linux/thermal.h> -#include "thermal_hwmon.h" +#include "../thermal_hwmon.h" #define IDLE_INTERVAL 5000 diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/renesas/rzg2l_thermal.c index 04efd824ac4c..0e1cb9045ee6 100644 --- a/drivers/thermal/rzg2l_thermal.c +++ b/drivers/thermal/renesas/rzg2l_thermal.c @@ -17,7 +17,7 @@ #include <linux/thermal.h> #include <linux/units.h> -#include "thermal_hwmon.h" +#include "../thermal_hwmon.h" #define CTEMP_MASK 0xFFF diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 6482513bfe66..96cffb2c44ba 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1004,11 +1004,11 @@ static const struct thermal_zone_device_ops exynos_sensor_ops = { static int exynos_tmu_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct exynos_tmu_data *data; int ret; - data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data), - GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -1020,7 +1020,7 @@ static int exynos_tmu_probe(struct platform_device *pdev) * TODO: Add regulator as an SOC feature, so that regulator enable * is a compulsory call. */ - ret = devm_regulator_get_enable_optional(&pdev->dev, "vtmu"); + ret = devm_regulator_get_enable_optional(dev, "vtmu"); switch (ret) { case 0: case -ENODEV: @@ -1028,8 +1028,7 @@ static int exynos_tmu_probe(struct platform_device *pdev) case -EPROBE_DEFER: return -EPROBE_DEFER; default: - dev_err(&pdev->dev, "Failed to get enabled regulator: %d\n", - ret); + dev_err(dev, "Failed to get enabled regulator: %d\n", ret); return ret; } @@ -1037,44 +1036,40 @@ static int exynos_tmu_probe(struct platform_device *pdev) if (ret) return ret; - data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); - if (IS_ERR(data->clk)) { - dev_err(&pdev->dev, "Failed to get clock\n"); - return PTR_ERR(data->clk); - } + data->clk = devm_clk_get(dev, "tmu_apbif"); + if (IS_ERR(data->clk)) + return dev_err_probe(dev, PTR_ERR(data->clk), "Failed to get clock\n"); - data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif"); + data->clk_sec = devm_clk_get(dev, "tmu_triminfo_apbif"); if (IS_ERR(data->clk_sec)) { - if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) { - dev_err(&pdev->dev, "Failed to get triminfo clock\n"); - return PTR_ERR(data->clk_sec); - } + if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) + return dev_err_probe(dev, PTR_ERR(data->clk_sec), + "Failed to get triminfo clock\n"); } else { ret = clk_prepare(data->clk_sec); if (ret) { - dev_err(&pdev->dev, "Failed to get clock\n"); + dev_err(dev, "Failed to get clock\n"); return ret; } } ret = clk_prepare(data->clk); if (ret) { - dev_err(&pdev->dev, "Failed to get clock\n"); + dev_err(dev, "Failed to get clock\n"); goto err_clk_sec; } switch (data->soc) { case SOC_ARCH_EXYNOS5433: case SOC_ARCH_EXYNOS7: - data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk"); + data->sclk = devm_clk_get(dev, "tmu_sclk"); if (IS_ERR(data->sclk)) { - dev_err(&pdev->dev, "Failed to get sclk\n"); - ret = PTR_ERR(data->sclk); + ret = dev_err_probe(dev, PTR_ERR(data->sclk), "Failed to get sclk\n"); goto err_clk; } else { ret = clk_prepare_enable(data->sclk); if (ret) { - dev_err(&pdev->dev, "Failed to enable sclk\n"); + dev_err(dev, "Failed to enable sclk\n"); goto err_clk; } } @@ -1085,33 +1080,30 @@ static int exynos_tmu_probe(struct platform_device *pdev) ret = exynos_tmu_initialize(pdev); if (ret) { - dev_err(&pdev->dev, "Failed to initialize TMU\n"); + dev_err(dev, "Failed to initialize TMU\n"); goto err_sclk; } - data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data, + data->tzd = devm_thermal_of_zone_register(dev, 0, data, &exynos_sensor_ops); if (IS_ERR(data->tzd)) { - ret = PTR_ERR(data->tzd); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "Failed to register sensor: %d\n", - ret); + ret = dev_err_probe(dev, PTR_ERR(data->tzd), "Failed to register sensor\n"); goto err_sclk; } ret = exynos_thermal_zone_configure(pdev); if (ret) { - dev_err(&pdev->dev, "Failed to configure the thermal zone\n"); + dev_err(dev, "Failed to configure the thermal zone\n"); goto err_sclk; } - ret = devm_request_threaded_irq(&pdev->dev, data->irq, NULL, + ret = devm_request_threaded_irq(dev, data->irq, NULL, exynos_tmu_threaded_irq, IRQF_TRIGGER_RISING | IRQF_SHARED | IRQF_ONESHOT, - dev_name(&pdev->dev), data); + dev_name(dev), data); if (ret) { - dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); + dev_err(dev, "Failed to request irq: %d\n", data->irq); goto err_sclk; } diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c index 29c2269b0fb3..e427117381a4 100644 --- a/drivers/thermal/st/st_thermal_memmap.c +++ b/drivers/thermal/st/st_thermal_memmap.c @@ -142,15 +142,6 @@ static const struct st_thermal_sensor_ops st_mmap_sensor_ops = { .enable_irq = st_mmap_enable_irq, }; -/* Compatible device data stih416 mpe thermal sensor */ -static const struct st_thermal_compat_data st_416mpe_cdata = { - .reg_fields = st_mmap_thermal_regfields, - .ops = &st_mmap_sensor_ops, - .calibration_val = 14, - .temp_adjust_val = -95, - .crit_temp = 120, -}; - /* Compatible device data stih407 thermal sensor */ static const struct st_thermal_compat_data st_407_cdata = { .reg_fields = st_mmap_thermal_regfields, @@ -161,7 +152,6 @@ static const struct st_thermal_compat_data st_407_cdata = { }; static const struct of_device_id st_mmap_thermal_of_match[] = { - { .compatible = "st,stih416-mpe-thermal", .data = &st_416mpe_cdata }, { .compatible = "st,stih407-thermal", .data = &st_407_cdata }, { /* sentinel */ } }; diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index e7fe8683bfc5..d3dfc34c62c6 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -582,23 +582,18 @@ static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id) return temp; } -static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp) +static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, + const struct thermal_trip *trip, int temp) { struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz); struct tegra_soctherm *ts = zone->ts; - struct thermal_trip trip; const struct tegra_tsensor_group *sg = zone->sg; struct device *dev = zone->dev; - int ret; if (!tz) return -EINVAL; - ret = __thermal_zone_get_trip(tz, trip_id, &trip); - if (ret) - return ret; - - if (trip.type == THERMAL_TRIP_CRITICAL) { + if (trip->type == THERMAL_TRIP_CRITICAL) { /* * If thermtrips property is set in DT, * doesn't need to program critical type trip to HW, @@ -609,7 +604,7 @@ static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip else return 0; - } else if (trip.type == THERMAL_TRIP_HOT) { + } else if (trip->type == THERMAL_TRIP_HOT) { int i; for (i = 0; i < THROTTLE_SIZE; i++) { @@ -620,7 +615,7 @@ static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip continue; cdev = ts->throt_cfgs[i].cdev; - if (get_thermal_instance(tz, cdev, trip_id)) + if (thermal_trip_is_bound_to_cdev(tz, trip, cdev)) stc = find_throttle_cfg_by_name(ts, cdev->type); else continue; diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c index 1717e4a19dcb..ee3d0aa31406 100644 --- a/drivers/thermal/thermal-generic-adc.c +++ b/drivers/thermal/thermal-generic-adc.c @@ -117,44 +117,41 @@ static int gadc_thermal_read_linear_lookup_table(struct device *dev, static int gadc_thermal_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct gadc_thermal_info *gti; int ret; - if (!pdev->dev.of_node) { - dev_err(&pdev->dev, "Only DT based supported\n"); + if (!dev->of_node) { + dev_err(dev, "Only DT based supported\n"); return -ENODEV; } - gti = devm_kzalloc(&pdev->dev, sizeof(*gti), GFP_KERNEL); + gti = devm_kzalloc(dev, sizeof(*gti), GFP_KERNEL); if (!gti) return -ENOMEM; - gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel"); - if (IS_ERR(gti->channel)) { - ret = PTR_ERR(gti->channel); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "IIO channel not found: %d\n", ret); - return ret; - } + gti->channel = devm_iio_channel_get(dev, "sensor-channel"); + if (IS_ERR(gti->channel)) + return dev_err_probe(dev, PTR_ERR(gti->channel), "IIO channel not found\n"); - ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti); + ret = gadc_thermal_read_linear_lookup_table(dev, gti); if (ret < 0) return ret; - gti->dev = &pdev->dev; + gti->dev = dev; - gti->tz_dev = devm_thermal_of_zone_register(&pdev->dev, 0, gti, + gti->tz_dev = devm_thermal_of_zone_register(dev, 0, gti, &gadc_thermal_ops); if (IS_ERR(gti->tz_dev)) { ret = PTR_ERR(gti->tz_dev); if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, + dev_err(dev, "Thermal zone sensor register failed: %d\n", ret); return ret; } - devm_thermal_add_hwmon_sysfs(&pdev->dev, gti->tz_dev); + devm_thermal_add_hwmon_sysfs(dev, gti->tz_dev); return 0; } diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 1b0ab2790860..8795187fbc52 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -300,6 +300,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz) thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies); else if (tz->polling_delay_jiffies) thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies); + else if (tz->temperature == THERMAL_TEMP_INVALID) + thermal_zone_device_set_polling(tz, msecs_to_jiffies(THERMAL_RECHECK_DELAY_MS)); } static struct thermal_governor *thermal_get_tz_governor(struct thermal_zone_device *tz) @@ -463,6 +465,9 @@ static void thermal_governor_trip_crossed(struct thermal_governor *governor, const struct thermal_trip *trip, bool crossed_up) { + if (trip->type == THERMAL_TRIP_HOT || trip->type == THERMAL_TRIP_CRITICAL) + return; + if (governor->trip_crossed) governor->trip_crossed(tz, trip, crossed_up); } @@ -482,16 +487,14 @@ static void thermal_trip_crossed(struct thermal_zone_device *tz, thermal_governor_trip_crossed(governor, tz, trip, crossed_up); } -static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a, +static int thermal_trip_notify_cmp(void *not_used, const struct list_head *a, const struct list_head *b) { struct thermal_trip_desc *tda = container_of(a, struct thermal_trip_desc, notify_list_node); struct thermal_trip_desc *tdb = container_of(b, struct thermal_trip_desc, notify_list_node); - int ret = tdb->notify_temp - tda->notify_temp; - - return ascending ? ret : -ret; + return tda->notify_temp - tdb->notify_temp; } void __thermal_zone_device_update(struct thermal_zone_device *tz, @@ -511,21 +514,21 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, update_temperature(tz); if (tz->temperature == THERMAL_TEMP_INVALID) - return; - - __thermal_zone_set_trips(tz); + goto monitor; tz->notify_event = event; for_each_trip_desc(tz, td) handle_thermal_trip(tz, td, &way_up_list, &way_down_list); - list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp); + thermal_zone_set_trips(tz); + + list_sort(NULL, &way_up_list, thermal_trip_notify_cmp); list_for_each_entry(td, &way_up_list, notify_list_node) thermal_trip_crossed(tz, &td->trip, governor, true); list_sort(NULL, &way_down_list, thermal_trip_notify_cmp); - list_for_each_entry(td, &way_down_list, notify_list_node) + list_for_each_entry_reverse(td, &way_down_list, notify_list_node) thermal_trip_crossed(tz, &td->trip, governor, false); if (governor->manage) @@ -533,6 +536,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, thermal_debug_update_trip_stats(tz); +monitor: monitor_thermal_zone(tz); } @@ -1126,7 +1130,7 @@ static void thermal_cooling_device_release(struct device *dev, void *res) struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, struct device_node *np, - char *type, void *devdata, + const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { struct thermal_cooling_device **ptr, *tcd; @@ -1353,7 +1357,8 @@ thermal_zone_device_register_with_trips(const char *type, int num_trips, void *devdata, const struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, - int passive_delay, int polling_delay) + unsigned int passive_delay, + unsigned int polling_delay) { const struct thermal_trip *trip = trips; struct thermal_zone_device *tz; @@ -1386,6 +1391,14 @@ thermal_zone_device_register_with_trips(const char *type, if (num_trips > 0 && !trips) return ERR_PTR(-EINVAL); + if (polling_delay) { + if (passive_delay > polling_delay) + return ERR_PTR(-EINVAL); + + if (!passive_delay) + passive_delay = polling_delay; + } + if (!thermal_class) return ERR_PTR(-ENODEV); @@ -1649,6 +1662,7 @@ static void thermal_zone_device_resume(struct work_struct *work) tz->suspended = false; + thermal_debug_tz_resume(tz); thermal_zone_device_init(tz); __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 66f67e54e0c8..30c0e78859a7 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -133,6 +133,12 @@ struct thermal_zone_device { struct thermal_trip_desc trips[] __counted_by(num_trips); }; +/* + * Default delay after a failing thermal zone temperature check before + * attempting to check it again. + */ +#define THERMAL_RECHECK_DELAY_MS 250 + /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) #define DEFAULT_THERMAL_GOVERNOR "step_wise" @@ -244,7 +250,9 @@ void thermal_governor_update_tz(struct thermal_zone_device *tz, #define trip_to_trip_desc(__trip) \ container_of(__trip, struct thermal_trip_desc, trip) -void __thermal_zone_set_trips(struct thermal_zone_device *tz); +const char *thermal_trip_type_name(enum thermal_trip_type trip_type); + +void thermal_zone_set_trips(struct thermal_zone_device *tz); int thermal_zone_trip_id(const struct thermal_zone_device *tz, const struct thermal_trip *trip); void thermal_zone_trip_updated(struct thermal_zone_device *tz, diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c index 942447229157..7dd67bf48571 100644 --- a/drivers/thermal/thermal_debugfs.c +++ b/drivers/thermal/thermal_debugfs.c @@ -94,7 +94,6 @@ struct cdev_record { * @trip_temp: trip temperature at mitigation start * @trip_hyst: trip hysteresis at mitigation start * @count: the number of times the zone temperature was above the trip point - * @max: maximum recorded temperature above the trip point * @min: minimum recorded temperature above the trip point * @avg: average temperature above the trip point */ @@ -104,7 +103,6 @@ struct trip_stats { int trip_temp; int trip_hyst; int count; - int max; int min; int avg; }; @@ -122,12 +120,14 @@ struct trip_stats { * @timestamp: first trip point crossed the way up * @duration: total duration of the mitigation episode * @node: a list element to be added to the list of tz events + * @max_temp: maximum zone temperature during this episode * @trip_stats: per trip point statistics, flexible array */ struct tz_episode { ktime_t timestamp; ktime_t duration; struct list_head node; + int max_temp; struct trip_stats trip_stats[]; }; @@ -561,10 +561,11 @@ static struct tz_episode *thermal_debugfs_tz_event_alloc(struct thermal_zone_dev INIT_LIST_HEAD(&tze->node); tze->timestamp = now; tze->duration = KTIME_MIN; + tze->max_temp = INT_MIN; for (i = 0; i < tz->num_trips; i++) { + tze->trip_stats[i].trip_temp = THERMAL_TEMP_INVALID; tze->trip_stats[i].min = INT_MAX; - tze->trip_stats[i].max = INT_MIN; } return tze; @@ -573,20 +574,20 @@ static struct tz_episode *thermal_debugfs_tz_event_alloc(struct thermal_zone_dev void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, const struct thermal_trip *trip) { - struct tz_episode *tze; - struct tz_debugfs *tz_dbg; struct thermal_debugfs *thermal_dbg = tz->debugfs; int trip_id = thermal_zone_trip_id(tz, trip); ktime_t now = ktime_get(); struct trip_stats *trip_stats; + struct tz_debugfs *tz_dbg; + struct tz_episode *tze; if (!thermal_dbg) return; - mutex_lock(&thermal_dbg->lock); - tz_dbg = &thermal_dbg->tz_dbg; + mutex_lock(&thermal_dbg->lock); + /* * The mitigation is starting. A mitigation can contain * several episodes where each of them is related to a @@ -653,23 +654,33 @@ unlock: mutex_unlock(&thermal_dbg->lock); } +static void tz_episode_close_trip(struct tz_episode *tze, int trip_id, ktime_t now) +{ + struct trip_stats *trip_stats = &tze->trip_stats[trip_id]; + ktime_t delta = ktime_sub(now, trip_stats->timestamp); + + trip_stats->duration = ktime_add(delta, trip_stats->duration); + /* Mark the end of mitigation for this trip point. */ + trip_stats->timestamp = KTIME_MAX; +} + void thermal_debug_tz_trip_down(struct thermal_zone_device *tz, const struct thermal_trip *trip) { struct thermal_debugfs *thermal_dbg = tz->debugfs; + int trip_id = thermal_zone_trip_id(tz, trip); + ktime_t now = ktime_get(); struct tz_episode *tze; struct tz_debugfs *tz_dbg; - ktime_t delta, now = ktime_get(); - int trip_id = thermal_zone_trip_id(tz, trip); int i; if (!thermal_dbg) return; - mutex_lock(&thermal_dbg->lock); - tz_dbg = &thermal_dbg->tz_dbg; + mutex_lock(&thermal_dbg->lock); + /* * The temperature crosses the way down but there was not * mitigation detected before. That may happen when the @@ -695,13 +706,7 @@ void thermal_debug_tz_trip_down(struct thermal_zone_device *tz, tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node); - delta = ktime_sub(now, tze->trip_stats[trip_id].timestamp); - - tze->trip_stats[trip_id].duration = - ktime_add(delta, tze->trip_stats[trip_id].duration); - - /* Mark the end of mitigation for this trip point. */ - tze->trip_stats[trip_id].timestamp = KTIME_MAX; + tz_episode_close_trip(tze, trip_id, now); /* * This event closes the mitigation as we are crossing the @@ -724,20 +729,22 @@ void thermal_debug_update_trip_stats(struct thermal_zone_device *tz) if (!thermal_dbg) return; - mutex_lock(&thermal_dbg->lock); - tz_dbg = &thermal_dbg->tz_dbg; + mutex_lock(&thermal_dbg->lock); + if (!tz_dbg->nr_trips) goto out; tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node); + if (tz->temperature > tze->max_temp) + tze->max_temp = tz->temperature; + for (i = 0; i < tz_dbg->nr_trips; i++) { int trip_id = tz_dbg->trips_crossed[i]; struct trip_stats *trip_stats = &tze->trip_stats[trip_id]; - trip_stats->max = max(trip_stats->max, tz->temperature); trip_stats->min = min(trip_stats->min, tz->temperature); trip_stats->avg += (tz->temperature - trip_stats->avg) / ++trip_stats->count; @@ -777,7 +784,6 @@ static int tze_seq_show(struct seq_file *s, void *v) struct thermal_zone_device *tz = thermal_dbg->tz_dbg.tz; struct thermal_trip_desc *td; struct tz_episode *tze; - const char *type; u64 duration_ms; int trip_id; char c; @@ -793,10 +799,10 @@ static int tze_seq_show(struct seq_file *s, void *v) c = '='; } - seq_printf(s, ",-Mitigation at %lluus, duration%c%llums\n", - ktime_to_us(tze->timestamp), c, duration_ms); + seq_printf(s, ",-Mitigation at %llums, duration%c%llums, max. temp=%dm°C\n", + ktime_to_ms(tze->timestamp), c, duration_ms, tze->max_temp); - seq_printf(s, "| trip | type | temp(°mC) | hyst(°mC) | duration | avg(°mC) | min(°mC) | max(°mC) |\n"); + seq_printf(s, "| trip | type | temp(m°C) | hyst(m°C) | duration(ms) | avg(m°C) | min(m°C) |\n"); for_each_trip_desc(tz, td) { const struct thermal_trip *trip = &td->trip; @@ -814,16 +820,9 @@ static int tze_seq_show(struct seq_file *s, void *v) trip_stats = &tze->trip_stats[trip_id]; /* Skip trips without any stats. */ - if (trip_stats->min > trip_stats->max) + if (trip_stats->trip_temp == THERMAL_TEMP_INVALID) continue; - if (trip->type == THERMAL_TRIP_PASSIVE) - type = "passive"; - else if (trip->type == THERMAL_TRIP_ACTIVE) - type = "active"; - else - type = "hot"; - if (trip_stats->timestamp != KTIME_MAX) { /* Mitigation in progress. */ ktime_t delta = ktime_sub(ktime_get(), @@ -837,15 +836,14 @@ static int tze_seq_show(struct seq_file *s, void *v) c = ' '; } - seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d | %*d |\n", + seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d |\n", 4 , trip_id, - 8, type, + 8, thermal_trip_type_name(trip->type), 9, trip_stats->trip_temp, 9, trip_stats->trip_hyst, - c, 10, duration_ms, + c, 11, duration_ms, 9, trip_stats->avg, - 9, trip_stats->min, - 9, trip_stats->max); + 9, trip_stats->min); } return 0; @@ -922,3 +920,39 @@ void thermal_debug_tz_remove(struct thermal_zone_device *tz) thermal_debugfs_remove_id(thermal_dbg); kfree(trips_crossed); } + +void thermal_debug_tz_resume(struct thermal_zone_device *tz) +{ + struct thermal_debugfs *thermal_dbg = tz->debugfs; + ktime_t now = ktime_get(); + struct tz_debugfs *tz_dbg; + struct tz_episode *tze; + int i; + + if (!thermal_dbg) + return; + + mutex_lock(&thermal_dbg->lock); + + tz_dbg = &thermal_dbg->tz_dbg; + + if (!tz_dbg->nr_trips) + goto out; + + /* + * A mitigation episode was in progress before the preceding system + * suspend transition, so close it because the zone handling is starting + * over from scratch. + */ + tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node); + + for (i = 0; i < tz_dbg->nr_trips; i++) + tz_episode_close_trip(tze, tz_dbg->trips_crossed[i], now); + + tze->duration = ktime_sub(now, tze->timestamp); + + tz_dbg->nr_trips = 0; + +out: + mutex_unlock(&thermal_dbg->lock); +} diff --git a/drivers/thermal/thermal_debugfs.h b/drivers/thermal/thermal_debugfs.h index 74ee65ee82ff..1c957ce2ec8f 100644 --- a/drivers/thermal/thermal_debugfs.h +++ b/drivers/thermal/thermal_debugfs.h @@ -7,6 +7,7 @@ void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev); void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state); void thermal_debug_tz_add(struct thermal_zone_device *tz); void thermal_debug_tz_remove(struct thermal_zone_device *tz); +void thermal_debug_tz_resume(struct thermal_zone_device *tz); void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, const struct thermal_trip *trip); void thermal_debug_tz_trip_down(struct thermal_zone_device *tz, @@ -20,6 +21,7 @@ static inline void thermal_debug_cdev_state_update(const struct thermal_cooling_ int state) {} static inline void thermal_debug_tz_add(struct thermal_zone_device *tz) {} static inline void thermal_debug_tz_remove(struct thermal_zone_device *tz) {} +static inline void thermal_debug_tz_resume(struct thermal_zone_device *tz) {} static inline void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, const struct thermal_trip *trip) {}; static inline void thermal_debug_tz_trip_down(struct thermal_zone_device *tz, diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index d9f4e26ec125..81e019493557 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -39,30 +39,53 @@ int get_tz_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip return trend; } +static struct thermal_instance *get_instance(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, + const struct thermal_trip *trip) +{ + struct thermal_instance *ti; + + list_for_each_entry(ti, &tz->thermal_instances, tz_node) { + if (ti->trip == trip && ti->cdev == cdev) + return ti; + } + + return NULL; +} + +bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev) +{ + bool ret; + + mutex_lock(&tz->lock); + mutex_lock(&cdev->lock); + + ret = !!get_instance(tz, cdev, trip); + + mutex_unlock(&cdev->lock); + mutex_unlock(&tz->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(thermal_trip_is_bound_to_cdev); + struct thermal_instance * get_thermal_instance(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int trip_index) { - struct thermal_instance *pos = NULL; - struct thermal_instance *target_instance = NULL; - const struct thermal_trip *trip; + struct thermal_instance *ti; mutex_lock(&tz->lock); mutex_lock(&cdev->lock); - trip = &tz->trips[trip_index].trip; - - list_for_each_entry(pos, &tz->thermal_instances, tz_node) { - if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { - target_instance = pos; - break; - } - } + ti = get_instance(tz, cdev, &tz->trips[trip_index].trip); mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); - return target_instance; + return ti; } EXPORT_SYMBOL(get_thermal_instance); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 88211ccdfbd6..72b302bf914e 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -88,18 +88,7 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr, if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1) return -EINVAL; - switch (tz->trips[trip_id].trip.type) { - case THERMAL_TRIP_CRITICAL: - return sprintf(buf, "critical\n"); - case THERMAL_TRIP_HOT: - return sprintf(buf, "hot\n"); - case THERMAL_TRIP_PASSIVE: - return sprintf(buf, "passive\n"); - case THERMAL_TRIP_ACTIVE: - return sprintf(buf, "active\n"); - default: - return sprintf(buf, "unknown\n"); - } + return sprintf(buf, "%s\n", thermal_trip_type_name(tz->trips[trip_id].trip.type)); } static ssize_t @@ -124,7 +113,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr, if (temp != trip->temperature) { if (tz->ops.set_trip_temp) { - ret = tz->ops.set_trip_temp(tz, trip_id, temp); + ret = tz->ops.set_trip_temp(tz, trip, temp); if (ret) goto unlock; } @@ -150,7 +139,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr, if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) return -EINVAL; - return sprintf(buf, "%d\n", tz->trips[trip_id].trip.temperature); + return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.temperature)); } static ssize_t @@ -174,7 +163,7 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr, trip = &tz->trips[trip_id].trip; if (hyst != trip->hysteresis) { - trip->hysteresis = hyst; + WRITE_ONCE(trip->hysteresis, hyst); thermal_zone_trip_updated(tz, trip); } @@ -194,7 +183,7 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr, if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) return -EINVAL; - return sprintf(buf, "%d\n", tz->trips[trip_id].trip.hysteresis); + return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.hysteresis)); } static ssize_t diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c index 49e63db68517..c0b679b846b3 100644 --- a/drivers/thermal/thermal_trip.c +++ b/drivers/thermal/thermal_trip.c @@ -9,6 +9,21 @@ */ #include "thermal_core.h" +static const char *trip_type_names[] = { + [THERMAL_TRIP_ACTIVE] = "active", + [THERMAL_TRIP_PASSIVE] = "passive", + [THERMAL_TRIP_HOT] = "hot", + [THERMAL_TRIP_CRITICAL] = "critical", +}; + +const char *thermal_trip_type_name(enum thermal_trip_type trip_type) +{ + if (trip_type < THERMAL_TRIP_ACTIVE || trip_type > THERMAL_TRIP_CRITICAL) + return "unknown"; + + return trip_type_names[trip_type]; +} + int for_each_thermal_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data) @@ -47,7 +62,7 @@ int thermal_zone_get_num_trips(struct thermal_zone_device *tz) EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips); /** - * __thermal_zone_set_trips - Computes the next trip points for the driver + * thermal_zone_set_trips - Computes the next trip points for the driver * @tz: a pointer to a thermal zone device structure * * The function computes the next temperature boundaries by browsing @@ -61,7 +76,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips); * * It does not return a value */ -void __thermal_zone_set_trips(struct thermal_zone_device *tz) +void thermal_zone_set_trips(struct thermal_zone_device *tz) { const struct thermal_trip_desc *td; int low = -INT_MAX, high = INT_MAX; @@ -73,17 +88,11 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz) return; for_each_trip_desc(tz, td) { - const struct thermal_trip *trip = &td->trip; - int trip_low; - - trip_low = trip->temperature - trip->hysteresis; + if (td->threshold < tz->temperature && td->threshold > low) + low = td->threshold; - if (trip_low < tz->temperature && trip_low > low) - low = trip_low; - - if (trip->temperature > tz->temperature && - trip->temperature < high) - high = trip->temperature; + if (td->threshold > tz->temperature && td->threshold < high) + high = td->threshold; } /* No need to change trip points */ @@ -105,27 +114,17 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz) dev_err(&tz->device, "Failed to set trips: %d\n", ret); } -int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, - struct thermal_trip *trip) -{ - if (!tz || trip_id < 0 || trip_id >= tz->num_trips || !trip) - return -EINVAL; - - *trip = tz->trips[trip_id].trip; - return 0; -} -EXPORT_SYMBOL_GPL(__thermal_zone_get_trip); - int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, struct thermal_trip *trip) { - int ret; + if (!tz || !trip || trip_id < 0 || trip_id >= tz->num_trips) + return -EINVAL; mutex_lock(&tz->lock); - ret = __thermal_zone_get_trip(tz, trip_id, trip); + *trip = tz->trips[trip_id].trip; mutex_unlock(&tz->lock); - return ret; + return 0; } EXPORT_SYMBOL_GPL(thermal_zone_get_trip); @@ -152,7 +151,7 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, if (trip->temperature == temp) return; - trip->temperature = temp; + WRITE_ONCE(trip->temperature, temp); thermal_notify_tz_trip_change(tz, trip); if (temp == THERMAL_TEMP_INVALID) { diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c index 274f36358b21..0325b7195136 100644 --- a/drivers/thermal/uniphier_thermal.c +++ b/drivers/thermal/uniphier_thermal.c @@ -239,13 +239,34 @@ static irqreturn_t uniphier_tm_alarm_irq_thread(int irq, void *_tdev) return IRQ_HANDLED; } +struct trip_walk_data { + struct uniphier_tm_dev *tdev; + int crit_temp; + int index; +}; + +static int uniphier_tm_trip_walk_cb(struct thermal_trip *trip, void *arg) +{ + struct trip_walk_data *twd = arg; + + if (trip->type == THERMAL_TRIP_CRITICAL && + trip->temperature < twd->crit_temp) + twd->crit_temp = trip->temperature; + + uniphier_tm_set_alert(twd->tdev, twd->index, trip->temperature); + twd->tdev->alert_en[twd->index++] = true; + + return 0; +} + static int uniphier_tm_probe(struct platform_device *pdev) { + struct trip_walk_data twd = { .crit_temp = INT_MAX, .index = 0 }; struct device *dev = &pdev->dev; struct regmap *regmap; struct device_node *parent; struct uniphier_tm_dev *tdev; - int i, ret, irq, crit_temp = INT_MAX; + int ret, irq; tdev = devm_kzalloc(dev, sizeof(*tdev), GFP_KERNEL); if (!tdev) @@ -293,20 +314,10 @@ static int uniphier_tm_probe(struct platform_device *pdev) } /* set alert temperatures */ - for (i = 0; i < thermal_zone_get_num_trips(tdev->tz_dev); i++) { - struct thermal_trip trip; + twd.tdev = tdev; + thermal_zone_for_each_trip(tdev->tz_dev, uniphier_tm_trip_walk_cb, &twd); - ret = thermal_zone_get_trip(tdev->tz_dev, i, &trip); - if (ret) - return ret; - - if (trip.type == THERMAL_TRIP_CRITICAL && - trip.temperature < crit_temp) - crit_temp = trip.temperature; - uniphier_tm_set_alert(tdev, i, trip.temperature); - tdev->alert_en[i] = true; - } - if (crit_temp > CRITICAL_TEMP_LIMIT) { + if (twd.crit_temp > CRITICAL_TEMP_LIMIT) { dev_err(dev, "critical trip is over limit(>%d), or not set\n", CRITICAL_TEMP_LIMIT); return -EINVAL; diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index ddac0a13cf84..1af9aed99c65 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -672,7 +672,8 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id) * https://www.ti.com/lit/pdf/sprz536 */ if (priv->habit & UART_RX_TIMEOUT_QUIRK && - (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT) { + (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT && + serial_port_in(port, UART_OMAP_RX_LVL) == 0) { unsigned char efr2, timeout_h, timeout_l; efr2 = serial_in(up, UART_OMAP_EFR2); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index f4f40c9373c2..ff32cd2d2863 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -120,6 +120,7 @@ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ +#define UFCR_RXTL_MASK 0x3F /* Receiver trigger 6 bits wide */ #define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) @@ -1551,6 +1552,7 @@ static void imx_uart_shutdown(struct uart_port *port) struct imx_port *sport = (struct imx_port *)port; unsigned long flags; u32 ucr1, ucr2, ucr4, uts; + int loops; if (sport->dma_is_enabled) { dmaengine_terminate_sync(sport->dma_chan_tx); @@ -1613,6 +1615,56 @@ static void imx_uart_shutdown(struct uart_port *port) ucr4 &= ~UCR4_TCEN; imx_uart_writel(sport, ucr4, UCR4); + /* + * We have to ensure the tx state machine ends up in OFF. This + * is especially important for rs485 where we must not leave + * the RTS signal high, blocking the bus indefinitely. + * + * All interrupts are now disabled, so imx_uart_stop_tx() will + * no longer be called from imx_uart_transmit_buffer(). It may + * still be called via the hrtimers, and if those are in play, + * we have to honour the delays. + */ + if (sport->tx_state == WAIT_AFTER_RTS || sport->tx_state == SEND) + imx_uart_stop_tx(port); + + /* + * In many cases (rs232 mode, or if tx_state was + * WAIT_AFTER_RTS, or if tx_state was SEND and there is no + * delay_rts_after_send), this will have moved directly to + * OFF. In rs485 mode, tx_state might already have been + * WAIT_AFTER_SEND and the hrtimer thus already started, or + * the above imx_uart_stop_tx() call could have started it. In + * those cases, we have to wait for the hrtimer to fire and + * complete the transition to OFF. + */ + loops = port->rs485.flags & SER_RS485_ENABLED ? + port->rs485.delay_rts_after_send : 0; + while (sport->tx_state != OFF && loops--) { + uart_port_unlock_irqrestore(&sport->port, flags); + msleep(1); + uart_port_lock_irqsave(&sport->port, &flags); + } + + if (sport->tx_state != OFF) { + dev_warn(sport->port.dev, "unexpected tx_state %d\n", + sport->tx_state); + /* + * This machine may be busted, but ensure the RTS + * signal is inactive in order not to block other + * devices. + */ + if (port->rs485.flags & SER_RS485_ENABLED) { + ucr2 = imx_uart_readl(sport, UCR2); + if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) + imx_uart_rts_active(sport, &ucr2); + else + imx_uart_rts_inactive(sport, &ucr2); + imx_uart_writel(sport, ucr2, UCR2); + } + sport->tx_state = OFF; + } + uart_port_unlock_irqrestore(&sport->port, flags); clk_disable_unprepare(sport->clk_per); @@ -1933,7 +1985,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio struct serial_rs485 *rs485conf) { struct imx_port *sport = (struct imx_port *)port; - u32 ucr2; + u32 ucr2, ufcr; if (rs485conf->flags & SER_RS485_ENABLED) { /* Enable receiver if low-active RTS signal is requested */ @@ -1953,7 +2005,10 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio /* Make sure Rx is enabled in case Tx is active with Rx disabled */ if (!(rs485conf->flags & SER_RS485_ENABLED) || rs485conf->flags & SER_RS485_RX_DURING_TX) { - imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + /* If the receiver trigger is 0, set it to a default value */ + ufcr = imx_uart_readl(sport, UFCR); + if ((ufcr & UFCR_RXTL_MASK) == 0) + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); imx_uart_start_rx(port); } diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c index 19f0a305cc43..3b4206e815fe 100644 --- a/drivers/tty/serial/ma35d1_serial.c +++ b/drivers/tty/serial/ma35d1_serial.c @@ -688,12 +688,13 @@ static int ma35d1serial_probe(struct platform_device *pdev) struct uart_ma35d1_port *up; int ret = 0; - if (pdev->dev.of_node) { - ret = of_alias_get_id(pdev->dev.of_node, "serial"); - if (ret < 0) { - dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret); - return ret; - } + if (!pdev->dev.of_node) + return -ENODEV; + + ret = of_alias_get_id(pdev->dev.of_node, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret); + return ret; } up = &ma35d1serial_ports[ret]; up->port.line = ret; diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 2bd25afe0d92..69a632fefc41 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -649,15 +649,25 @@ static void qcom_geni_serial_start_tx_dma(struct uart_port *uport) static void qcom_geni_serial_start_tx_fifo(struct uart_port *uport) { + unsigned char c; u32 irq_en; - if (qcom_geni_serial_main_active(uport) || - !qcom_geni_serial_tx_empty(uport)) - return; + /* + * Start a new transfer in case the previous command was cancelled and + * left data in the FIFO which may prevent the watermark interrupt + * from triggering. Note that the stale data is discarded. + */ + if (!qcom_geni_serial_main_active(uport) && + !qcom_geni_serial_tx_empty(uport)) { + if (uart_fifo_out(uport, &c, 1) == 1) { + writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + qcom_geni_serial_setup_tx(uport, 1); + writel(c, uport->membase + SE_GENI_TX_FIFOn); + } + } irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN; - writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); } @@ -665,13 +675,17 @@ static void qcom_geni_serial_start_tx_fifo(struct uart_port *uport) static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport) { u32 irq_en; - struct qcom_geni_serial_port *port = to_dev_port(uport); irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN); writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG); writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); - /* Possible stop tx is called multiple times. */ +} + +static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport); + if (!qcom_geni_serial_main_active(uport)) return; @@ -684,6 +698,8 @@ static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport) writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); } writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + + port->tx_remaining = 0; } static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop) @@ -862,7 +878,7 @@ static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport, memset(buf, 0, sizeof(buf)); tx_bytes = min(remaining, BYTES_PER_FIFO_WORD); - tx_bytes = uart_fifo_out(uport, buf, tx_bytes); + uart_fifo_out(uport, buf, tx_bytes); iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1); @@ -890,13 +906,17 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport, else pending = kfifo_len(&tport->xmit_fifo); - /* All data has been transmitted and acknowledged as received */ - if (!pending && !status && done) { + /* All data has been transmitted or command has been cancelled */ + if (!pending && done) { qcom_geni_serial_stop_tx_fifo(uport); goto out_write_wakeup; } - avail = port->tx_fifo_depth - (status & TX_FIFO_WC); + if (active) + avail = port->tx_fifo_depth - (status & TX_FIFO_WC); + else + avail = port->tx_fifo_depth; + avail *= BYTES_PER_FIFO_WORD; chunk = min(avail, pending); @@ -1069,11 +1089,15 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport) { disable_irq(uport->irq); - if (uart_console(uport)) - return; - qcom_geni_serial_stop_tx(uport); qcom_geni_serial_stop_rx(uport); + + qcom_geni_serial_cancel_tx_cmd(uport); +} + +static void qcom_geni_serial_flush_buffer(struct uart_port *uport) +{ + qcom_geni_serial_cancel_tx_cmd(uport); } static int qcom_geni_serial_port_setup(struct uart_port *uport) @@ -1532,6 +1556,7 @@ static const struct uart_ops qcom_geni_console_pops = { .request_port = qcom_geni_serial_request_port, .config_port = qcom_geni_serial_config_port, .shutdown = qcom_geni_serial_shutdown, + .flush_buffer = qcom_geni_serial_flush_buffer, .type = qcom_geni_serial_get_type, .set_mctrl = qcom_geni_serial_set_mctrl, .get_mctrl = qcom_geni_serial_get_mctrl, diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 8944548c30fa..c532416aec22 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -105,16 +105,15 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac); * @hba: per adapter instance * @req: pointer to the request to be issued * - * Return: the hardware queue instance on which the request would - * be queued. + * Return: the hardware queue instance on which the request will be or has + * been queued. %NULL if the request has already been freed. */ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req) { - u32 utag = blk_mq_unique_tag(req); - u32 hwq = blk_mq_unique_tag_to_hwq(utag); + struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx); - return &hba->uhq[hwq]; + return hctx ? &hba->uhq[hctx->queue_num] : NULL; } /** @@ -515,6 +514,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) if (!cmd) return -EINVAL; hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) + return 0; } else { hwq = hba->dev_cmd_queue; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 1b65e6ae4137..702fa1996992 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -5193,17 +5193,19 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) } /** - * ufshcd_slave_configure - adjust SCSI device configurations + * ufshcd_device_configure - adjust SCSI device configurations * @sdev: pointer to SCSI device + * @lim: queue limits * * Return: 0 (success). */ -static int ufshcd_slave_configure(struct scsi_device *sdev) +static int ufshcd_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; - blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); + lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1; /* * Block runtime-pm until all consumers are added. @@ -6456,6 +6458,8 @@ static bool ufshcd_abort_one(struct request *rq, void *priv) /* Release cmd in MCQ mode if abort succeeds */ if (is_mcq_enabled(hba) && (*ret == 0)) { hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); + if (!hwq) + return 0; spin_lock_irqsave(&hwq->cq_lock, flags); if (ufshcd_cmd_inflight(lrbp->cmd)) ufshcd_release_scsi_cmd(hba, lrbp); @@ -8908,7 +8912,7 @@ static const struct scsi_host_template ufshcd_driver_template = { .queuecommand = ufshcd_queuecommand, .mq_poll = ufshcd_poll, .slave_alloc = ufshcd_slave_alloc, - .slave_configure = ufshcd_slave_configure, + .device_configure = ufshcd_device_configure, .slave_destroy = ufshcd_slave_destroy, .change_queue_depth = ufshcd_change_queue_depth, .eh_abort_handler = ufshcd_abort, diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 3362af165ef5..880d52c0949d 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -291,6 +291,20 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, if (ifp->desc.bNumEndpoints >= num_ep) goto skip_to_next_endpoint_or_interface_descriptor; + /* Save a copy of the descriptor and use it instead of the original */ + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + memcpy(&endpoint->desc, d, n); + d = &endpoint->desc; + + /* Clear the reserved bits in bEndpointAddress */ + i = d->bEndpointAddress & + (USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK); + if (i != d->bEndpointAddress) { + dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n", + cfgno, inum, asnum, d->bEndpointAddress, i); + endpoint->desc.bEndpointAddress = i; + } + /* Check for duplicate endpoint addresses */ if (config_endpoint_is_duplicate(config, inum, asnum, d)) { dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", @@ -308,10 +322,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, } } - endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + /* Accept this endpoint */ ++ifp->desc.bNumEndpoints; - - memcpy(&endpoint->desc, d, n); INIT_LIST_HEAD(&endpoint->urb_list); /* diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c index f1a499ee482c..763e4122ed5b 100644 --- a/drivers/usb/core/of.c +++ b/drivers/usb/core/of.c @@ -84,9 +84,12 @@ static bool usb_of_has_devices_or_graph(const struct usb_device *hub) if (of_graph_is_present(np)) return true; - for_each_child_of_node(np, child) - if (of_property_present(child, "reg")) + for_each_child_of_node(np, child) { + if (of_property_present(child, "reg")) { + of_node_put(child); return true; + } + } return false; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index b4783574b8e6..13171454f959 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -506,6 +506,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | USB_QUIRK_DELAY_CTRL_MSG }, + /* START BP-850k Printer */ + { USB_DEVICE(0x1bc3, 0x0003), .driver_info = USB_QUIRK_NO_SET_INTF }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 9ef821ca2fc7..052852f80146 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -54,6 +54,10 @@ #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e #define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 +#define PCI_DEVICE_ID_INTEL_PTLH 0xe332 +#define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e +#define PCI_DEVICE_ID_INTEL_PTLU 0xe432 +#define PCI_DEVICE_ID_INTEL_PTLU_PCH 0xe47e #define PCI_DEVICE_ID_AMD_MR 0x163a #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" @@ -430,6 +434,10 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, PTLU_PCH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) }, { PCI_DEVICE_DATA(AMD, MR, &dwc3_pci_amd_mr_swnode) }, diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index ce3cfa1f36f5..0e7c1e947c0a 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -115,9 +115,12 @@ static int usb_string_copy(const char *s, char **s_copy) int ret; char *str; char *copy = *s_copy; + ret = strlen(s); if (ret > USB_MAX_STRING_LEN) return -EOVERFLOW; + if (ret < 1) + return -EINVAL; if (copy) { str = copy; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 37eb37b0affa..0a8cf6c17f82 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1125,10 +1125,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "Start the secondary HCD\n"); retval = xhci_run(xhci->shared_hcd); } - + if (retval) + return retval; + /* + * Resume roothubs unconditionally as PORTSC change bits are not + * immediately visible after xHC reset + */ hcd->state = HC_STATE_SUSPENDED; - if (xhci->shared_hcd) + + if (xhci->shared_hcd) { xhci->shared_hcd->state = HC_STATE_SUSPENDED; + usb_hcd_resume_root_hub(xhci->shared_hcd); + } + usb_hcd_resume_root_hub(hcd); + goto done; } @@ -1152,7 +1162,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbc_resume(xhci); - done: if (retval == 0) { /* * Resume roothubs only if there are pending events. @@ -1178,6 +1187,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) usb_hcd_resume_root_hub(hcd); } } +done: /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 8b0308d84270..85697466b147 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1737,6 +1737,49 @@ static void mos7840_port_remove(struct usb_serial_port *port) kfree(mos7840_port); } +static int mos7840_suspend(struct usb_serial *serial, pm_message_t message) +{ + struct moschip_port *mos7840_port; + struct usb_serial_port *port; + int i; + + for (i = 0; i < serial->num_ports; ++i) { + port = serial->port[i]; + if (!tty_port_initialized(&port->port)) + continue; + + mos7840_port = usb_get_serial_port_data(port); + + usb_kill_urb(mos7840_port->read_urb); + mos7840_port->read_urb_busy = false; + } + + return 0; +} + +static int mos7840_resume(struct usb_serial *serial) +{ + struct moschip_port *mos7840_port; + struct usb_serial_port *port; + int res; + int i; + + for (i = 0; i < serial->num_ports; ++i) { + port = serial->port[i]; + if (!tty_port_initialized(&port->port)) + continue; + + mos7840_port = usb_get_serial_port_data(port); + + mos7840_port->read_urb_busy = true; + res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO); + if (res) + mos7840_port->read_urb_busy = false; + } + + return 0; +} + static struct usb_serial_driver moschip7840_4port_device = { .driver = { .owner = THIS_MODULE, @@ -1764,6 +1807,8 @@ static struct usb_serial_driver moschip7840_4port_device = { .port_probe = mos7840_port_probe, .port_remove = mos7840_port_remove, .read_bulk_callback = mos7840_bulk_in_callback, + .suspend = mos7840_suspend, + .resume = mos7840_resume, }; static struct usb_serial_driver * const serial_drivers[] = { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 8a5846d4adf6..311040f9b935 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1425,6 +1425,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3000, 0xff), /* Telit FN912 */ + .driver_info = RSVD(0) | NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3001, 0xff), /* Telit FN912 */ + .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */ .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ @@ -1433,6 +1437,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x9000, 0xff), /* Telit generic core-dump device */ + .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ @@ -2224,6 +2230,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7126, 0xff, 0x00, 0x00), + .driver_info = NCTRL(2) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00), + .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), .driver_info = RSVD(1) | RSVD(4) }, @@ -2284,6 +2294,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */ .driver_info = RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */ + .driver_info = RSVD(5) | RSVD(6) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ @@ -2321,6 +2333,32 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */ .driver_info = RSVD(5) }, + { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff), /* Rolling RW350-GL (laptop MBIM) */ + .driver_info = RSVD(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Global */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for Global SKU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for China SKU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for SA */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for EU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for NA */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for China EDU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Golbal EDU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 987c7921affa..ba0ce0075b2f 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1260,7 +1260,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info( struct vfio_pci_hot_reset_info hdr; struct vfio_pci_fill_info fill = {}; bool slot = false; - int ret, count; + int ret, count = 0; if (copy_from_user(&hdr, arg, minsz)) return -EFAULT; diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index 654290a8e1ba..f714009b9ff7 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -2,7 +2,7 @@ /* * AMD Secure Encrypted Virtualization (SEV) guest driver interface * - * Copyright (C) 2021 Advanced Micro Devices, Inc. + * Copyright (C) 2021-2024 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <brijesh.singh@amd.com> */ @@ -23,6 +23,7 @@ #include <linux/sockptr.h> #include <linux/cleanup.h> #include <linux/uuid.h> +#include <linux/configfs.h> #include <uapi/linux/sev-guest.h> #include <uapi/linux/psp-sev.h> @@ -38,6 +39,8 @@ #define SNP_REQ_MAX_RETRY_DURATION (60*HZ) #define SNP_REQ_RETRY_DELAY (2*HZ) +#define SVSM_MAX_RETRIES 3 + struct snp_guest_crypto { struct crypto_aead *tfm; u8 *iv, *authtag; @@ -70,8 +73,15 @@ struct snp_guest_dev { u8 *vmpck; }; -static u32 vmpck_id; -module_param(vmpck_id, uint, 0444); +/* + * The VMPCK ID represents the key used by the SNP guest to communicate with the + * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key + * used will be the key associated with the VMPL at which the guest is running. + * Should the default key be wiped (see snp_disable_vmpck()), this parameter + * allows for using one of the remaining VMPCKs. + */ +static int vmpck_id = -1; +module_param(vmpck_id, int, 0444); MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP."); /* Mutex to serialize the shared buffer access and command handling. */ @@ -783,6 +793,143 @@ struct snp_msg_cert_entry { u32 length; }; +static int sev_svsm_report_new(struct tsm_report *report, void *data) +{ + unsigned int rep_len, man_len, certs_len; + struct tsm_desc *desc = &report->desc; + struct svsm_attest_call ac = {}; + unsigned int retry_count; + void *rep, *man, *certs; + struct svsm_call call; + unsigned int size; + bool try_again; + void *buffer; + u64 call_id; + int ret; + + /* + * Allocate pages for the request: + * - Report blob (4K) + * - Manifest blob (4K) + * - Certificate blob (16K) + * + * Above addresses must be 4K aligned + */ + rep_len = SZ_4K; + man_len = SZ_4K; + certs_len = SEV_FW_BLOB_MAX_SIZE; + + guard(mutex)(&snp_cmd_mutex); + + if (guid_is_null(&desc->service_guid)) { + call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES); + } else { + export_guid(ac.service_guid, &desc->service_guid); + ac.service_manifest_ver = desc->service_manifest_version; + + call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE); + } + + retry_count = 0; + +retry: + memset(&call, 0, sizeof(call)); + + size = rep_len + man_len + certs_len; + buffer = alloc_pages_exact(size, __GFP_ZERO); + if (!buffer) + return -ENOMEM; + + rep = buffer; + ac.report_buf.pa = __pa(rep); + ac.report_buf.len = rep_len; + + man = rep + rep_len; + ac.manifest_buf.pa = __pa(man); + ac.manifest_buf.len = man_len; + + certs = man + man_len; + ac.certificates_buf.pa = __pa(certs); + ac.certificates_buf.len = certs_len; + + ac.nonce.pa = __pa(desc->inblob); + ac.nonce.len = desc->inblob_len; + + ret = snp_issue_svsm_attest_req(call_id, &call, &ac); + if (ret) { + free_pages_exact(buffer, size); + + switch (call.rax_out) { + case SVSM_ERR_INVALID_PARAMETER: + try_again = false; + + if (ac.report_buf.len > rep_len) { + rep_len = PAGE_ALIGN(ac.report_buf.len); + try_again = true; + } + + if (ac.manifest_buf.len > man_len) { + man_len = PAGE_ALIGN(ac.manifest_buf.len); + try_again = true; + } + + if (ac.certificates_buf.len > certs_len) { + certs_len = PAGE_ALIGN(ac.certificates_buf.len); + try_again = true; + } + + /* If one of the buffers wasn't large enough, retry the request */ + if (try_again && retry_count < SVSM_MAX_RETRIES) { + retry_count++; + goto retry; + } + + return -EINVAL; + default: + pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n", + ret, call.rax_out); + return -EINVAL; + } + } + + /* + * Allocate all the blob memory buffers at once so that the cleanup is + * done for errors that occur after the first allocation (i.e. before + * using no_free_ptr()). + */ + rep_len = ac.report_buf.len; + void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL); + + man_len = ac.manifest_buf.len; + void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL); + + certs_len = ac.certificates_buf.len; + void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL; + + if (!rbuf || !mbuf || (certs_len && !cbuf)) { + free_pages_exact(buffer, size); + return -ENOMEM; + } + + memcpy(rbuf, rep, rep_len); + report->outblob = no_free_ptr(rbuf); + report->outblob_len = rep_len; + + memcpy(mbuf, man, man_len); + report->manifestblob = no_free_ptr(mbuf); + report->manifestblob_len = man_len; + + if (certs_len) { + memcpy(cbuf, certs, certs_len); + report->auxblob = no_free_ptr(cbuf); + report->auxblob_len = certs_len; + } + + free_pages_exact(buffer, size); + + return 0; +} + static int sev_report_new(struct tsm_report *report, void *data) { struct snp_msg_cert_entry *cert_table; @@ -797,6 +944,13 @@ static int sev_report_new(struct tsm_report *report, void *data) if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE) return -EINVAL; + if (desc->service_provider) { + if (strcmp(desc->service_provider, "svsm")) + return -EINVAL; + + return sev_svsm_report_new(report, data); + } + void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -885,9 +1039,42 @@ static int sev_report_new(struct tsm_report *report, void *data) return 0; } -static const struct tsm_ops sev_tsm_ops = { +static bool sev_report_attr_visible(int n) +{ + switch (n) { + case TSM_REPORT_GENERATION: + case TSM_REPORT_PROVIDER: + case TSM_REPORT_PRIVLEVEL: + case TSM_REPORT_PRIVLEVEL_FLOOR: + return true; + case TSM_REPORT_SERVICE_PROVIDER: + case TSM_REPORT_SERVICE_GUID: + case TSM_REPORT_SERVICE_MANIFEST_VER: + return snp_vmpl; + } + + return false; +} + +static bool sev_report_bin_attr_visible(int n) +{ + switch (n) { + case TSM_REPORT_INBLOB: + case TSM_REPORT_OUTBLOB: + case TSM_REPORT_AUXBLOB: + return true; + case TSM_REPORT_MANIFESTBLOB: + return snp_vmpl; + } + + return false; +} + +static struct tsm_ops sev_tsm_ops = { .name = KBUILD_MODNAME, .report_new = sev_report_new, + .report_attr_visible = sev_report_attr_visible, + .report_bin_attr_visible = sev_report_bin_attr_visible, }; static void unregister_sev_tsm(void *data) @@ -923,6 +1110,10 @@ static int __init sev_guest_probe(struct platform_device *pdev) if (!snp_dev) goto e_unmap; + /* Adjust the default VMPCK key based on the executing VMPL level */ + if (vmpck_id == -1) + vmpck_id = snp_vmpl; + ret = -EINVAL; snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno); if (!snp_dev->vmpck) { @@ -968,7 +1159,10 @@ static int __init sev_guest_probe(struct platform_device *pdev) snp_dev->input.resp_gpa = __pa(snp_dev->response); snp_dev->input.data_gpa = __pa(snp_dev->certs_data); - ret = tsm_register(&sev_tsm_ops, snp_dev, &tsm_report_extra_type); + /* Set the privlevel_floor attribute based on the vmpck_id */ + sev_tsm_ops.privlevel_floor = vmpck_id; + + ret = tsm_register(&sev_tsm_ops, snp_dev); if (ret) goto e_free_cert_data; @@ -1009,8 +1203,13 @@ static void __exit sev_guest_remove(struct platform_device *pdev) * This driver is meant to be a common SEV guest interface driver and to * support any SEV guest API. As such, even though it has been introduced * with the SEV-SNP support, it is named "sev-guest". + * + * sev_guest_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound + * at runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. */ -static struct platform_driver sev_guest_driver = { +static struct platform_driver sev_guest_driver __refdata = { .remove_new = __exit_p(sev_guest_remove), .driver = { .name = "sev-guest", diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c index 1253bf76b570..2acba56ad42e 100644 --- a/drivers/virt/coco/tdx-guest/tdx-guest.c +++ b/drivers/virt/coco/tdx-guest/tdx-guest.c @@ -249,6 +249,28 @@ done: return ret; } +static bool tdx_report_attr_visible(int n) +{ + switch (n) { + case TSM_REPORT_GENERATION: + case TSM_REPORT_PROVIDER: + return true; + } + + return false; +} + +static bool tdx_report_bin_attr_visible(int n) +{ + switch (n) { + case TSM_REPORT_INBLOB: + case TSM_REPORT_OUTBLOB: + return true; + } + + return false; +} + static long tdx_guest_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -281,6 +303,8 @@ MODULE_DEVICE_TABLE(x86cpu, tdx_guest_ids); static const struct tsm_ops tdx_tsm_ops = { .name = KBUILD_MODNAME, .report_new = tdx_report_new, + .report_attr_visible = tdx_report_attr_visible, + .report_bin_attr_visible = tdx_report_bin_attr_visible, }; static int __init tdx_guest_init(void) @@ -301,7 +325,7 @@ static int __init tdx_guest_init(void) goto free_misc; } - ret = tsm_register(&tdx_tsm_ops, NULL, NULL); + ret = tsm_register(&tdx_tsm_ops, NULL); if (ret) goto free_quote; diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/tsm.c index d1c2db83a8ca..9432d4e303f1 100644 --- a/drivers/virt/coco/tsm.c +++ b/drivers/virt/coco/tsm.c @@ -14,7 +14,6 @@ static struct tsm_provider { const struct tsm_ops *ops; - const struct config_item_type *type; void *data; } provider; static DECLARE_RWSEM(tsm_rwsem); @@ -35,7 +34,7 @@ static DECLARE_RWSEM(tsm_rwsem); * The attestation report format is TSM provider specific, when / if a standard * materializes that can be published instead of the vendor layout. Until then * the 'provider' attribute indicates the format of 'outblob', and optionally - * 'auxblob'. + * 'auxblob' and 'manifestblob'. */ struct tsm_report_state { @@ -48,6 +47,7 @@ struct tsm_report_state { enum tsm_data_select { TSM_REPORT, TSM_CERTS, + TSM_MANIFEST, }; static struct tsm_report *to_tsm_report(struct config_item *cfg) @@ -119,6 +119,74 @@ static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg, } CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor); +static ssize_t tsm_report_service_provider_store(struct config_item *cfg, + const char *buf, size_t len) +{ + struct tsm_report *report = to_tsm_report(cfg); + size_t sp_len; + char *sp; + int rc; + + guard(rwsem_write)(&tsm_rwsem); + rc = try_advance_write_generation(report); + if (rc) + return rc; + + sp_len = (buf[len - 1] != '\n') ? len : len - 1; + + sp = kstrndup(buf, sp_len, GFP_KERNEL); + if (!sp) + return -ENOMEM; + kfree(report->desc.service_provider); + + report->desc.service_provider = sp; + + return len; +} +CONFIGFS_ATTR_WO(tsm_report_, service_provider); + +static ssize_t tsm_report_service_guid_store(struct config_item *cfg, + const char *buf, size_t len) +{ + struct tsm_report *report = to_tsm_report(cfg); + int rc; + + guard(rwsem_write)(&tsm_rwsem); + rc = try_advance_write_generation(report); + if (rc) + return rc; + + report->desc.service_guid = guid_null; + + rc = guid_parse(buf, &report->desc.service_guid); + if (rc) + return rc; + + return len; +} +CONFIGFS_ATTR_WO(tsm_report_, service_guid); + +static ssize_t tsm_report_service_manifest_version_store(struct config_item *cfg, + const char *buf, size_t len) +{ + struct tsm_report *report = to_tsm_report(cfg); + unsigned int val; + int rc; + + rc = kstrtouint(buf, 0, &val); + if (rc) + return rc; + + guard(rwsem_write)(&tsm_rwsem); + rc = try_advance_write_generation(report); + if (rc) + return rc; + report->desc.service_manifest_version = val; + + return len; +} +CONFIGFS_ATTR_WO(tsm_report_, service_manifest_version); + static ssize_t tsm_report_inblob_write(struct config_item *cfg, const void *buf, size_t count) { @@ -163,6 +231,9 @@ static ssize_t __read_report(struct tsm_report *report, void *buf, size_t count, if (select == TSM_REPORT) { out = report->outblob; len = report->outblob_len; + } else if (select == TSM_MANIFEST) { + out = report->manifestblob; + len = report->manifestblob_len; } else { out = report->auxblob; len = report->auxblob_len; @@ -188,7 +259,7 @@ static ssize_t read_cached_report(struct tsm_report *report, void *buf, /* * A given TSM backend always fills in ->outblob regardless of - * whether the report includes an auxblob or not. + * whether the report includes an auxblob/manifestblob or not. */ if (!report->outblob || state->read_generation != state->write_generation) @@ -224,8 +295,10 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf, kvfree(report->outblob); kvfree(report->auxblob); + kvfree(report->manifestblob); report->outblob = NULL; report->auxblob = NULL; + report->manifestblob = NULL; rc = ops->report_new(report, provider.data); if (rc < 0) return rc; @@ -252,34 +325,31 @@ static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf, } CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_OUTBLOB_MAX); -#define TSM_DEFAULT_ATTRS() \ - &tsm_report_attr_generation, \ - &tsm_report_attr_provider +static ssize_t tsm_report_manifestblob_read(struct config_item *cfg, void *buf, + size_t count) +{ + struct tsm_report *report = to_tsm_report(cfg); -static struct configfs_attribute *tsm_report_attrs[] = { - TSM_DEFAULT_ATTRS(), - NULL, -}; + return tsm_report_read(report, buf, count, TSM_MANIFEST); +} +CONFIGFS_BIN_ATTR_RO(tsm_report_, manifestblob, NULL, TSM_OUTBLOB_MAX); -static struct configfs_attribute *tsm_report_extra_attrs[] = { - TSM_DEFAULT_ATTRS(), - &tsm_report_attr_privlevel, - &tsm_report_attr_privlevel_floor, +static struct configfs_attribute *tsm_report_attrs[] = { + [TSM_REPORT_GENERATION] = &tsm_report_attr_generation, + [TSM_REPORT_PROVIDER] = &tsm_report_attr_provider, + [TSM_REPORT_PRIVLEVEL] = &tsm_report_attr_privlevel, + [TSM_REPORT_PRIVLEVEL_FLOOR] = &tsm_report_attr_privlevel_floor, + [TSM_REPORT_SERVICE_PROVIDER] = &tsm_report_attr_service_provider, + [TSM_REPORT_SERVICE_GUID] = &tsm_report_attr_service_guid, + [TSM_REPORT_SERVICE_MANIFEST_VER] = &tsm_report_attr_service_manifest_version, NULL, }; -#define TSM_DEFAULT_BIN_ATTRS() \ - &tsm_report_attr_inblob, \ - &tsm_report_attr_outblob - static struct configfs_bin_attribute *tsm_report_bin_attrs[] = { - TSM_DEFAULT_BIN_ATTRS(), - NULL, -}; - -static struct configfs_bin_attribute *tsm_report_bin_extra_attrs[] = { - TSM_DEFAULT_BIN_ATTRS(), - &tsm_report_attr_auxblob, + [TSM_REPORT_INBLOB] = &tsm_report_attr_inblob, + [TSM_REPORT_OUTBLOB] = &tsm_report_attr_outblob, + [TSM_REPORT_AUXBLOB] = &tsm_report_attr_auxblob, + [TSM_REPORT_MANIFESTBLOB] = &tsm_report_attr_manifestblob, NULL, }; @@ -288,8 +358,10 @@ static void tsm_report_item_release(struct config_item *cfg) struct tsm_report *report = to_tsm_report(cfg); struct tsm_report_state *state = to_state(report); + kvfree(report->manifestblob); kvfree(report->auxblob); kvfree(report->outblob); + kfree(report->desc.service_provider); kfree(state); } @@ -297,21 +369,44 @@ static struct configfs_item_operations tsm_report_item_ops = { .release = tsm_report_item_release, }; -const struct config_item_type tsm_report_default_type = { - .ct_owner = THIS_MODULE, - .ct_bin_attrs = tsm_report_bin_attrs, - .ct_attrs = tsm_report_attrs, - .ct_item_ops = &tsm_report_item_ops, +static bool tsm_report_is_visible(struct config_item *item, + struct configfs_attribute *attr, int n) +{ + guard(rwsem_read)(&tsm_rwsem); + if (!provider.ops) + return false; + + if (!provider.ops->report_attr_visible) + return true; + + return provider.ops->report_attr_visible(n); +} + +static bool tsm_report_is_bin_visible(struct config_item *item, + struct configfs_bin_attribute *attr, int n) +{ + guard(rwsem_read)(&tsm_rwsem); + if (!provider.ops) + return false; + + if (!provider.ops->report_bin_attr_visible) + return true; + + return provider.ops->report_bin_attr_visible(n); +} + +static struct configfs_group_operations tsm_report_attr_group_ops = { + .is_visible = tsm_report_is_visible, + .is_bin_visible = tsm_report_is_bin_visible, }; -EXPORT_SYMBOL_GPL(tsm_report_default_type); -const struct config_item_type tsm_report_extra_type = { +static const struct config_item_type tsm_report_type = { .ct_owner = THIS_MODULE, - .ct_bin_attrs = tsm_report_bin_extra_attrs, - .ct_attrs = tsm_report_extra_attrs, + .ct_bin_attrs = tsm_report_bin_attrs, + .ct_attrs = tsm_report_attrs, .ct_item_ops = &tsm_report_item_ops, + .ct_group_ops = &tsm_report_attr_group_ops, }; -EXPORT_SYMBOL_GPL(tsm_report_extra_type); static struct config_item *tsm_report_make_item(struct config_group *group, const char *name) @@ -326,7 +421,7 @@ static struct config_item *tsm_report_make_item(struct config_group *group, if (!state) return ERR_PTR(-ENOMEM); - config_item_init_type_name(&state->cfg, name, provider.type); + config_item_init_type_name(&state->cfg, name, &tsm_report_type); return &state->cfg; } @@ -353,16 +448,10 @@ static struct configfs_subsystem tsm_configfs = { .su_mutex = __MUTEX_INITIALIZER(tsm_configfs.su_mutex), }; -int tsm_register(const struct tsm_ops *ops, void *priv, - const struct config_item_type *type) +int tsm_register(const struct tsm_ops *ops, void *priv) { const struct tsm_ops *conflict; - if (!type) - type = &tsm_report_default_type; - if (!(type == &tsm_report_default_type || type == &tsm_report_extra_type)) - return -EINVAL; - guard(rwsem_write)(&tsm_rwsem); conflict = provider.ops; if (conflict) { @@ -372,7 +461,6 @@ int tsm_register(const struct tsm_ops *ops, void *priv, provider.ops = ops; provider.data = priv; - provider.type = type; return 0; } EXPORT_SYMBOL_GPL(tsm_register); @@ -384,7 +472,6 @@ int tsm_unregister(const struct tsm_ops *ops) return -EBUSY; provider.ops = NULL; provider.data = NULL; - provider.type = NULL; return 0; } EXPORT_SYMBOL_GPL(tsm_unregister); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index f6a2216c2c87..9b7fcc7dbb38 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -729,4 +729,5 @@ static void __exit evtchn_cleanup(void) module_init(evtchn_init); module_exit(evtchn_cleanup); +MODULE_DESCRIPTION("Xen /dev/xen/evtchn device driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index c16df629907e..b4b4ebed68da 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -208,7 +208,7 @@ static void do_reboot(void) orderly_reboot(); } -static struct shutdown_handler shutdown_handlers[] = { +static const struct shutdown_handler shutdown_handlers[] = { { "poweroff", true, do_poweroff }, { "halt", false, do_poweroff }, { "reboot", true, do_reboot }, diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c index 2fa10ca5be14..0f0dad427d7e 100644 --- a/drivers/xen/privcmd-buf.c +++ b/drivers/xen/privcmd-buf.c @@ -19,6 +19,7 @@ #include "privcmd.h" +MODULE_DESCRIPTION("Xen Mmap of hypercall buffers"); MODULE_LICENSE("GPL"); struct privcmd_buf_private { diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 67dfa4778864..9563650dfbaf 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -17,6 +17,7 @@ #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/srcu.h> #include <linux/string.h> #include <linux/workqueue.h> #include <linux/errno.h> @@ -48,6 +49,7 @@ #include "privcmd.h" +MODULE_DESCRIPTION("Xen hypercall passthrough driver"); MODULE_LICENSE("GPL"); #define PRIV_VMA_LOCKED ((void *)1) @@ -845,7 +847,8 @@ out: #ifdef CONFIG_XEN_PRIVCMD_EVENTFD /* Irqfd support */ static struct workqueue_struct *irqfd_cleanup_wq; -static DEFINE_MUTEX(irqfds_lock); +static DEFINE_SPINLOCK(irqfds_lock); +DEFINE_STATIC_SRCU(irqfds_srcu); static LIST_HEAD(irqfds_list); struct privcmd_kernel_irqfd { @@ -873,6 +876,9 @@ static void irqfd_shutdown(struct work_struct *work) container_of(work, struct privcmd_kernel_irqfd, shutdown); u64 cnt; + /* Make sure irqfd has been initialized in assign path */ + synchronize_srcu(&irqfds_srcu); + eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt); eventfd_ctx_put(kirqfd->eventfd); kfree(kirqfd); @@ -909,9 +915,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) irqfd_inject(kirqfd); if (flags & EPOLLHUP) { - mutex_lock(&irqfds_lock); + unsigned long flags; + + spin_lock_irqsave(&irqfds_lock, flags); irqfd_deactivate(kirqfd); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); } return 0; @@ -929,10 +937,11 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) { struct privcmd_kernel_irqfd *kirqfd, *tmp; + unsigned long flags; __poll_t events; struct fd f; void *dm_op; - int ret; + int ret, idx; kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL); if (!kirqfd) @@ -968,18 +977,19 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup); init_poll_funcptr(&kirqfd->pt, irqfd_poll_func); - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry(tmp, &irqfds_list, list) { if (kirqfd->eventfd == tmp->eventfd) { ret = -EBUSY; - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); goto error_eventfd; } } + idx = srcu_read_lock(&irqfds_srcu); list_add_tail(&kirqfd->list, &irqfds_list); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); /* * Check if there was an event already pending on the eventfd before we @@ -989,6 +999,8 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) if (events & EPOLLIN) irqfd_inject(kirqfd); + srcu_read_unlock(&irqfds_srcu, idx); + /* * Do not drop the file until the kirqfd is fully initialized, otherwise * we might race against the EPOLLHUP. @@ -1011,12 +1023,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) { struct privcmd_kernel_irqfd *kirqfd; struct eventfd_ctx *eventfd; + unsigned long flags; eventfd = eventfd_ctx_fdget(irqfd->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry(kirqfd, &irqfds_list, list) { if (kirqfd->eventfd == eventfd) { @@ -1025,7 +1038,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) } } - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); eventfd_ctx_put(eventfd); @@ -1073,13 +1086,14 @@ static int privcmd_irqfd_init(void) static void privcmd_irqfd_exit(void) { struct privcmd_kernel_irqfd *kirqfd, *tmp; + unsigned long flags; - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list) irqfd_deactivate(kirqfd); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); destroy_workqueue(irqfd_cleanup_wq); } diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index e34b623e4b41..4faebbb84999 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -1708,5 +1708,6 @@ static void __exit xen_pcibk_cleanup(void) module_init(xen_pcibk_init); module_exit(xen_pcibk_cleanup); +MODULE_DESCRIPTION("Xen PCI-device stub driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:pci"); diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index 2196474ce6ef..4e23d53d269e 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c @@ -18,6 +18,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <asm/byteorder.h> #include <asm/setup.h> @@ -152,7 +153,7 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) platform_set_drvdata(pdev, bus); pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", - zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); + zorro_num_autocon, str_plural(zorro_num_autocon)); /* First identify all devices ... */ for (i = 0; i < zorro_num_autocon; i++) { |