diff options
Diffstat (limited to 'drivers')
88 files changed, 5108 insertions, 805 deletions
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 1e29ba76615d..ac6c7e4900f4 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -457,7 +457,7 @@ static void regmap_sunxi_rsb_free_ctx(void *context) kfree(ctx); } -static struct regmap_bus regmap_sunxi_rsb = { +static const struct regmap_bus regmap_sunxi_rsb = { .reg_write = regmap_sunxi_rsb_reg_write, .reg_read = regmap_sunxi_rsb_reg_read, .free_context = regmap_sunxi_rsb_free_ctx, diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig index 9345ce4976d7..94abd8f632a7 100644 --- a/drivers/cache/Kconfig +++ b/drivers/cache/Kconfig @@ -14,4 +14,13 @@ config SIFIVE_CCACHE help Support for the composable cache controller on SiFive platforms. +config STARFIVE_STARLINK_CACHE + bool "StarFive StarLink Cache controller" + depends on RISCV + depends on ARCH_STARFIVE + select RISCV_DMA_NONCOHERENT + select RISCV_NONSTANDARD_CACHE_OPS + help + Support for the StarLink cache controller IP from StarFive. + endmenu diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile index 7657cff3bd6c..55c5e851034d 100644 --- a/drivers/cache/Makefile +++ b/drivers/cache/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o -obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o +obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o +obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o +obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o diff --git a/drivers/cache/starfive_starlink_cache.c b/drivers/cache/starfive_starlink_cache.c new file mode 100644 index 000000000000..24c7d078ca22 --- /dev/null +++ b/drivers/cache/starfive_starlink_cache.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cache Management Operations for StarFive's Starlink cache controller + * + * Copyright (C) 2024 Shanghai StarFive Technology Co., Ltd. + * + * Author: Joshua Yeong <joshua.yeong@starfivetech.com> + */ + +#include <linux/bitfield.h> +#include <linux/cacheflush.h> +#include <linux/iopoll.h> +#include <linux/of_address.h> + +#include <asm/dma-noncoherent.h> + +#define STARLINK_CACHE_FLUSH_START_ADDR 0x0 +#define STARLINK_CACHE_FLUSH_END_ADDR 0x8 +#define STARLINK_CACHE_FLUSH_CTL 0x10 +#define STARLINK_CACHE_ALIGN 0x40 + +#define STARLINK_CACHE_ADDRESS_RANGE_MASK GENMASK(39, 0) +#define STARLINK_CACHE_FLUSH_CTL_MODE_MASK GENMASK(2, 1) +#define STARLINK_CACHE_FLUSH_CTL_ENABLE_MASK BIT(0) + +#define STARLINK_CACHE_FLUSH_CTL_CLEAN_INVALIDATE 0 +#define STARLINK_CACHE_FLUSH_CTL_MAKE_INVALIDATE 1 +#define STARLINK_CACHE_FLUSH_CTL_CLEAN_SHARED 2 +#define STARLINK_CACHE_FLUSH_POLL_DELAY_US 1 +#define STARLINK_CACHE_FLUSH_TIMEOUT_US 5000000 + +static void __iomem *starlink_cache_base; + +static void starlink_cache_flush_complete(void) +{ + volatile void __iomem *ctl = starlink_cache_base + STARLINK_CACHE_FLUSH_CTL; + u64 v; + int ret; + + ret = readq_poll_timeout_atomic(ctl, v, !(v & STARLINK_CACHE_FLUSH_CTL_ENABLE_MASK), + STARLINK_CACHE_FLUSH_POLL_DELAY_US, + STARLINK_CACHE_FLUSH_TIMEOUT_US); + if (ret) + WARN(1, "StarFive Starlink cache flush operation timeout\n"); +} + +static void starlink_cache_dma_cache_wback(phys_addr_t paddr, unsigned long size) +{ + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr), + starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR); + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size), + starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR); + + mb(); + writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK, + STARLINK_CACHE_FLUSH_CTL_CLEAN_SHARED), + starlink_cache_base + STARLINK_CACHE_FLUSH_CTL); + + starlink_cache_flush_complete(); +} + +static void starlink_cache_dma_cache_invalidate(phys_addr_t paddr, unsigned long size) +{ + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr), + starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR); + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size), + starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR); + + mb(); + writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK, + STARLINK_CACHE_FLUSH_CTL_MAKE_INVALIDATE), + starlink_cache_base + STARLINK_CACHE_FLUSH_CTL); + + starlink_cache_flush_complete(); +} + +static void starlink_cache_dma_cache_wback_inv(phys_addr_t paddr, unsigned long size) +{ + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr), + starlink_cache_base + STARLINK_CACHE_FLUSH_START_ADDR); + writeq(FIELD_PREP(STARLINK_CACHE_ADDRESS_RANGE_MASK, paddr + size), + starlink_cache_base + STARLINK_CACHE_FLUSH_END_ADDR); + + mb(); + writeq(FIELD_PREP(STARLINK_CACHE_FLUSH_CTL_MODE_MASK, + STARLINK_CACHE_FLUSH_CTL_CLEAN_INVALIDATE), + starlink_cache_base + STARLINK_CACHE_FLUSH_CTL); + + starlink_cache_flush_complete(); +} + +static const struct riscv_nonstd_cache_ops starlink_cache_ops = { + .wback = &starlink_cache_dma_cache_wback, + .inv = &starlink_cache_dma_cache_invalidate, + .wback_inv = &starlink_cache_dma_cache_wback_inv, +}; + +static const struct of_device_id starlink_cache_ids[] = { + { .compatible = "starfive,jh8100-starlink-cache" }, + { /* sentinel */ } +}; + +static int __init starlink_cache_init(void) +{ + struct device_node *np; + u32 block_size; + int ret; + + np = of_find_matching_node(NULL, starlink_cache_ids); + if (!of_device_is_available(np)) + return -ENODEV; + + ret = of_property_read_u32(np, "cache-block-size", &block_size); + if (ret) + return ret; + + if (block_size % STARLINK_CACHE_ALIGN) + return -EINVAL; + + starlink_cache_base = of_iomap(np, 0); + if (!starlink_cache_base) + return -ENOMEM; + + riscv_cbom_block_size = block_size; + riscv_noncoherent_supported(); + riscv_noncoherent_register_cache_ops(&starlink_cache_ops); + + return 0; +} +arch_initcall(starlink_cache_init); diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index ea05d9d67490..0a46b5d49d32 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -191,6 +191,7 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev, case QCOM_ID_IPQ5312: case QCOM_ID_IPQ5302: case QCOM_ID_IPQ5300: + case QCOM_ID_IPQ5321: case QCOM_ID_IPQ9514: case QCOM_ID_IPQ9550: case QCOM_ID_IPQ9554: diff --git a/drivers/firmware/arm_ffa/Makefile b/drivers/firmware/arm_ffa/Makefile index 9d9f37523200..168990a7e792 100644 --- a/drivers/firmware/arm_ffa/Makefile +++ b/drivers/firmware/arm_ffa/Makefile @@ -2,5 +2,7 @@ ffa-bus-y = bus.o ffa-driver-y = driver.o ffa-transport-$(CONFIG_ARM_FFA_SMCCC) += smccc.o -ffa-module-objs := $(ffa-bus-y) $(ffa-driver-y) $(ffa-transport-y) -obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-module.o +ffa-core-objs := $(ffa-bus-y) +ffa-module-objs := $(ffa-driver-y) $(ffa-transport-y) +obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-core.o +obj-$(CONFIG_ARM_FFA_TRANSPORT) += ffa-module.o diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 2f557e90f2eb..0c83931485f6 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -30,12 +30,11 @@ static int ffa_device_match(struct device *dev, struct device_driver *drv) while (!uuid_is_null(&id_table->uuid)) { /* * FF-A v1.0 doesn't provide discovery of UUIDs, just the - * partition IDs, so fetch the partitions IDs for this - * id_table UUID and assign the UUID to the device if the - * partition ID matches + * partition IDs, so match it unconditionally here and handle + * it via the installed bus notifier during driver binding. */ if (uuid_is_null(&ffa_dev->uuid)) - ffa_device_match_uuid(ffa_dev, &id_table->uuid); + return 1; if (uuid_equal(&ffa_dev->uuid, &id_table->uuid)) return 1; @@ -50,6 +49,10 @@ static int ffa_device_probe(struct device *dev) struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); struct ffa_device *ffa_dev = to_ffa_dev(dev); + /* UUID can be still NULL with FF-A v1.0, so just skip probing them */ + if (uuid_is_null(&ffa_dev->uuid)) + return -ENODEV; + return ffa_drv->probe(ffa_dev); } @@ -232,14 +235,21 @@ void ffa_device_unregister(struct ffa_device *ffa_dev) } EXPORT_SYMBOL_GPL(ffa_device_unregister); -int arm_ffa_bus_init(void) +static int __init arm_ffa_bus_init(void) { return bus_register(&ffa_bus_type); } +subsys_initcall(arm_ffa_bus_init); -void arm_ffa_bus_exit(void) +static void __exit arm_ffa_bus_exit(void) { ffa_devices_unregister(); bus_unregister(&ffa_bus_type); ida_destroy(&ffa_bus_id); } +module_exit(arm_ffa_bus_exit); + +MODULE_ALIAS("ffa-core"); +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("ARM FF-A bus"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_ffa/common.h b/drivers/firmware/arm_ffa/common.h index d6eccf1fd3f6..9c6425a81d0d 100644 --- a/drivers/firmware/arm_ffa/common.h +++ b/drivers/firmware/arm_ffa/common.h @@ -14,8 +14,6 @@ typedef struct arm_smccc_1_2_regs ffa_value_t; typedef void (ffa_fn)(ffa_value_t, ffa_value_t *); -int arm_ffa_bus_init(void); -void arm_ffa_bus_exit(void); bool ffa_device_is_valid(struct ffa_device *ffa_dev); void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid); diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 1609247cfafc..7ba98c7af2e9 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -1224,14 +1224,6 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) int count, idx; struct ffa_partition_info *pbuf, *tpbuf; - /* - * FF-A v1.1 provides UUID for each partition as part of the discovery - * API, the discovered UUID must be populated in the device's UUID and - * there is no need to copy the same from the driver table. - */ - if (drv_info->version > FFA_VERSION_1_0) - return; - count = ffa_partition_probe(uuid, &pbuf); if (count <= 0) return; @@ -1242,6 +1234,35 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) kfree(pbuf); } +static int +ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) +{ + struct device *dev = data; + struct ffa_device *fdev = to_ffa_dev(dev); + + if (action == BUS_NOTIFY_BIND_DRIVER) { + struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); + const struct ffa_device_id *id_table= ffa_drv->id_table; + + /* + * FF-A v1.1 provides UUID for each partition as part of the + * discovery API, the discovered UUID must be populated in the + * device's UUID and there is no need to workaround by copying + * the same from the driver table. + */ + if (uuid_is_null(&fdev->uuid)) + ffa_device_match_uuid(fdev, &id_table->uuid); + + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static struct notifier_block ffa_bus_nb = { + .notifier_call = ffa_bus_notifier, +}; + static int ffa_setup_partitions(void) { int count, idx, ret; @@ -1250,6 +1271,12 @@ static int ffa_setup_partitions(void) struct ffa_dev_part_info *info; struct ffa_partition_info *pbuf, *tpbuf; + if (drv_info->version == FFA_VERSION_1_0) { + ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb); + if (ret) + pr_err("Failed to register FF-A bus notifiers\n"); + } + count = ffa_partition_probe(&uuid_null, &pbuf); if (count <= 0) { pr_info("%s: No partitions found, error %d\n", __func__, count); @@ -1261,7 +1288,7 @@ static int ffa_setup_partitions(void) import_uuid(&uuid, (u8 *)tpbuf->uuid); /* Note that if the UUID will be uuid_null, that will require - * ffa_device_match() to find the UUID of this partition id + * ffa_bus_notifier() to find the UUID of this partition id * with help of ffa_device_match_uuid(). FF-A v1.1 and above * provides UUID here for each partition as part of the * discovery API and the same is passed. @@ -1581,14 +1608,9 @@ static int __init ffa_init(void) if (ret) return ret; - ret = arm_ffa_bus_init(); - if (ret) - return ret; - drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); if (!drv_info) { - ret = -ENOMEM; - goto ffa_bus_exit; + return -ENOMEM; } ret = ffa_version_check(&drv_info->version); @@ -1649,11 +1671,9 @@ free_pages: free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); free_drv_info: kfree(drv_info); -ffa_bus_exit: - arm_ffa_bus_exit(); return ret; } -subsys_initcall(ffa_init); +module_init(ffa_init); static void __exit ffa_exit(void) { @@ -1663,7 +1683,6 @@ static void __exit ffa_exit(void) free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); kfree(drv_info); - arm_ffa_bus_exit(); } module_exit(ffa_exit); diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index b5ac25dbc1ca..4b8c5250cdb5 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -326,6 +326,7 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem); +bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem); /* declarations for message passing transports */ struct scmi_msg_payld; diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index 615a3b2ad83d..0219a12e3209 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -21,6 +21,7 @@ * @cl: Mailbox Client * @chan: Transmit/Receive mailbox uni/bi-directional channel * @chan_receiver: Optional Receiver mailbox unidirectional channel + * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area */ @@ -28,6 +29,7 @@ struct scmi_mailbox { struct mbox_client cl; struct mbox_chan *chan; struct mbox_chan *chan_receiver; + struct mbox_chan *chan_platform_receiver; struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; }; @@ -91,6 +93,8 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx) * for replies on the a2p channel. Set as zero if not present. * @p2a_chan: A reference to the optional p2a channel. * Set as zero if not present. + * @p2a_rx_chan: A reference to the optional p2a completion channel. + * Set as zero if not present. * * At first, validate the transport configuration as described in terms of * 'mboxes' and 'shmem', then determin which mailbox channel indexes are @@ -98,8 +102,8 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx) * * Return: 0 on Success or error */ -static int mailbox_chan_validate(struct device *cdev, - int *a2p_rx_chan, int *p2a_chan) +static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan, + int *p2a_chan, int *p2a_rx_chan) { int num_mb, num_sh, ret = 0; struct device_node *np = cdev->of_node; @@ -109,8 +113,9 @@ static int mailbox_chan_validate(struct device *cdev, dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh); /* Bail out if mboxes and shmem descriptors are inconsistent */ - if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 3 || - (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2)) { + if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 || + (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || + (num_mb == 4 && num_sh != 2)) { dev_warn(cdev, "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n", of_node_full_name(np), num_mb, num_sh); @@ -139,6 +144,7 @@ static int mailbox_chan_validate(struct device *cdev, case 1: *a2p_rx_chan = 0; *p2a_chan = 0; + *p2a_rx_chan = 0; break; case 2: if (num_sh == 2) { @@ -148,10 +154,17 @@ static int mailbox_chan_validate(struct device *cdev, *a2p_rx_chan = 1; *p2a_chan = 0; } + *p2a_rx_chan = 0; break; case 3: *a2p_rx_chan = 1; *p2a_chan = 2; + *p2a_rx_chan = 0; + break; + case 4: + *a2p_rx_chan = 1; + *p2a_chan = 2; + *p2a_rx_chan = 3; break; } } @@ -166,12 +179,12 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, struct device *cdev = cinfo->dev; struct scmi_mailbox *smbox; struct device_node *shmem; - int ret, a2p_rx_chan, p2a_chan, idx = tx ? 0 : 1; + int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan, idx = tx ? 0 : 1; struct mbox_client *cl; resource_size_t size; struct resource res; - ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan); + ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); if (ret) return ret; @@ -229,6 +242,17 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, } } + if (!tx && p2a_rx_chan) { + smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan); + if (IS_ERR(smbox->chan_platform_receiver)) { + ret = PTR_ERR(smbox->chan_platform_receiver); + if (ret != -EPROBE_DEFER) + dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n"); + return ret; + } + } + + cinfo->transport_info = smbox; smbox->cinfo = cinfo; @@ -243,9 +267,11 @@ static int mailbox_chan_free(int id, void *p, void *data) if (smbox && !IS_ERR(smbox->chan)) { mbox_free_channel(smbox->chan); mbox_free_channel(smbox->chan_receiver); + mbox_free_channel(smbox->chan_platform_receiver); cinfo->transport_info = NULL; smbox->chan = NULL; smbox->chan_receiver = NULL; + smbox->chan_platform_receiver = NULL; smbox->cinfo = NULL; } @@ -300,8 +326,27 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, static void mailbox_clear_channel(struct scmi_chan_info *cinfo) { struct scmi_mailbox *smbox = cinfo->transport_info; + struct mbox_chan *intr_chan; + int ret; shmem_clear_channel(smbox->shmem); + + if (!shmem_channel_intr_enabled(smbox->shmem)) + return; + + if (smbox->chan_platform_receiver) + intr_chan = smbox->chan_platform_receiver; + else if (smbox->chan) + intr_chan = smbox->chan; + else + return; + + ret = mbox_send_message(intr_chan, NULL); + /* mbox_send_message returns non-negative value on success, so reset */ + if (ret > 0) + ret = 0; + + mbox_client_txdone(intr_chan, ret); } static bool diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c index 6eb7d2a4b6b1..21f467a92942 100644 --- a/drivers/firmware/arm_scmi/scmi_power_control.c +++ b/drivers/firmware/arm_scmi/scmi_power_control.c @@ -50,6 +50,7 @@ #include <linux/reboot.h> #include <linux/scmi_protocol.h> #include <linux/slab.h> +#include <linux/suspend.h> #include <linux/time64.h> #include <linux/timer.h> #include <linux/types.h> @@ -78,6 +79,7 @@ enum scmi_syspower_state { * @reboot_nb: A notifier_block optionally used to track reboot progress * @forceful_work: A worker used to trigger a forceful transition once a * graceful has timed out. + * @suspend_work: A worker used to trigger system suspend */ struct scmi_syspower_conf { struct device *dev; @@ -90,6 +92,7 @@ struct scmi_syspower_conf { struct notifier_block reboot_nb; struct delayed_work forceful_work; + struct work_struct suspend_work; }; #define userspace_nb_to_sconf(x) \ @@ -249,6 +252,9 @@ static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc, case SCMI_SYSTEM_WARMRESET: orderly_reboot(); break; + case SCMI_SYSTEM_SUSPEND: + schedule_work(&sc->suspend_work); + break; default: break; } @@ -277,7 +283,8 @@ static int scmi_userspace_notifier(struct notifier_block *nb, struct scmi_system_power_state_notifier_report *er = data; struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb); - if (er->system_state >= SCMI_SYSTEM_POWERUP) { + if (er->system_state >= SCMI_SYSTEM_MAX || + er->system_state == SCMI_SYSTEM_POWERUP) { dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n", er->system_state); return NOTIFY_DONE; @@ -315,6 +322,16 @@ static int scmi_userspace_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static void scmi_suspend_work_func(struct work_struct *work) +{ + struct scmi_syspower_conf *sc = + container_of(work, struct scmi_syspower_conf, suspend_work); + + pm_suspend(PM_SUSPEND_MEM); + + sc->state = SCMI_SYSPOWER_IDLE; +} + static int scmi_syspower_probe(struct scmi_device *sdev) { int ret; @@ -338,6 +355,8 @@ static int scmi_syspower_probe(struct scmi_device *sdev) sc->userspace_nb.notifier_call = &scmi_userspace_notifier; sc->dev = &sdev->dev; + INIT_WORK(&sc->suspend_work, scmi_suspend_work_func); + return handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_SYSTEM, SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 8bf495bcad09..b74e5a740f2c 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -128,3 +128,8 @@ bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem) return (ioread32(&shmem->channel_status) & SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); } + +bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem) +{ + return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED; +} diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index 5d7f62fe1d5f..f25a9746249b 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -340,4 +340,5 @@ static struct platform_driver meson_sm_driver = { }, }; module_platform_driver_probe(meson_sm_driver, meson_sm_probe); +MODULE_DESCRIPTION("Amlogic Secure Monitor driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c index 835a19a7a3a0..30de47895b1c 100644 --- a/drivers/firmware/microchip/mpfs-auto-update.c +++ b/drivers/firmware/microchip/mpfs-auto-update.c @@ -9,6 +9,7 @@ * * Author: Conor Dooley <conor.dooley@microchip.com> */ +#include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/firmware.h> #include <linux/math.h> @@ -71,8 +72,9 @@ #define AUTO_UPDATE_UPGRADE_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_UPGRADE_INDEX) #define AUTO_UPDATE_BLANK_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_BLANK_INDEX) #define AUTO_UPDATE_DIRECTORY_SIZE SZ_1K -#define AUTO_UPDATE_RESERVED_SIZE SZ_1M -#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_RESERVED_SIZE) +#define AUTO_UPDATE_INFO_BASE AUTO_UPDATE_DIRECTORY_SIZE +#define AUTO_UPDATE_INFO_SIZE SZ_1M +#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE) #define AUTO_UPDATE_TIMEOUT_MS 60000 @@ -86,6 +88,17 @@ struct mpfs_auto_update_priv { bool cancel_request; }; +static bool mpfs_auto_update_is_bitstream_info(const u8 *data, u32 size) +{ + if (size < 4) + return false; + + if (data[0] == 0x4d && data[1] == 0x43 && data[2] == 0x48 && data[3] == 0x50) + return true; + + return false; +} + static enum fw_upload_err mpfs_auto_update_prepare(struct fw_upload *fw_uploader, const u8 *data, u32 size) { @@ -162,28 +175,17 @@ static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_up static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader) { struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; - struct mpfs_mss_response *response; - struct mpfs_mss_msg *message; - u32 *response_msg; + u32 *response_msg __free(kfree) = + kzalloc(AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL); + struct mpfs_mss_response *response __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_response), GFP_KERNEL); + struct mpfs_mss_msg *message __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_msg), GFP_KERNEL); int ret; - response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), - GFP_KERNEL); - if (!response_msg) + if (!response_msg || !response || !message) return -ENOMEM; - response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL); - if (!response) { - ret = -ENOMEM; - goto free_response_msg; - } - - message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL); - if (!message) { - ret = -ENOMEM; - goto free_response; - } - /* * The system controller can verify that an image in the flash is valid. * Rather than duplicate the check in this driver, call the relevant @@ -205,31 +207,25 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader) ret = mpfs_blocking_transaction(priv->sys_controller, message); if (ret | response->resp_status) { dev_warn(priv->dev, "Verification of Upgrade Image failed!\n"); - ret = ret ? ret : -EBADMSG; - goto free_message; + return ret ? ret : -EBADMSG; } dev_info(priv->dev, "Verification of Upgrade Image passed!\n"); -free_message: - devm_kfree(priv->dev, message); -free_response: - devm_kfree(priv->dev, response); -free_response_msg: - devm_kfree(priv->dev, response_msg); - - return ret; + return 0; } -static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv, char *buffer, +static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv, u32 image_address, loff_t directory_address) { struct erase_info erase; - size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE; + size_t erase_size = round_up(AUTO_UPDATE_DIRECTORY_SIZE, (u64)priv->flash->erasesize); size_t bytes_written = 0, bytes_read = 0; + char *buffer __free(kfree) = kzalloc(erase_size, GFP_KERNEL); int ret; - erase_size = round_up(erase_size, (u64)priv->flash->erasesize); + if (!buffer) + return -ENOMEM; erase.addr = AUTO_UPDATE_DIRECTORY_BASE; erase.len = erase_size; @@ -275,7 +271,7 @@ static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv return ret; if (bytes_written != erase_size) - return ret; + return -EIO; return 0; } @@ -285,26 +281,36 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const { struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; struct erase_info erase; - char *buffer; loff_t directory_address = AUTO_UPDATE_UPGRADE_DIRECTORY; size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE; size_t bytes_written = 0; + bool is_info = mpfs_auto_update_is_bitstream_info(data, size); u32 image_address; int ret; erase_size = round_up(erase_size, (u64)priv->flash->erasesize); - image_address = AUTO_UPDATE_BITSTREAM_BASE + - AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream; - - buffer = devm_kzalloc(priv->dev, erase_size, GFP_KERNEL); - if (!buffer) - return -ENOMEM; + if (is_info) + image_address = AUTO_UPDATE_INFO_BASE; + else + image_address = AUTO_UPDATE_BITSTREAM_BASE + + AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream; - ret = mpfs_auto_update_set_image_address(priv, buffer, image_address, directory_address); - if (ret) { - dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret); - goto out; + /* + * For bitstream info, the descriptor is written to a fixed offset, + * so there is no need to set the image address. + */ + if (!is_info) { + ret = mpfs_auto_update_set_image_address(priv, image_address, directory_address); + if (ret) { + dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret); + return ret; + } + } else { + if (size > AUTO_UPDATE_INFO_SIZE) { + dev_err(priv->dev, "bitstream info exceeds permitted size\n"); + return -ENOSPC; + } } /* @@ -318,7 +324,7 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const dev_info(priv->dev, "Erasing the flash at address (0x%x)\n", image_address); ret = mtd_erase(priv->flash, &erase); if (ret) - goto out; + return ret; /* * No parsing etc of the bitstream is required. The system controller @@ -328,18 +334,15 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const dev_info(priv->dev, "Writing the image to the flash at address (0x%x)\n", image_address); ret = mtd_write(priv->flash, (loff_t)image_address, size, &bytes_written, data); if (ret) - goto out; + return ret; - if (bytes_written != size) { - ret = -EIO; - goto out; - } + if (bytes_written != size) + return -EIO; *written = bytes_written; + dev_info(priv->dev, "Wrote 0x%zx bytes to the flash\n", bytes_written); -out: - devm_kfree(priv->dev, buffer); - return ret; + return 0; } static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader, const u8 *data, @@ -362,6 +365,9 @@ static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader, goto out; } + if (mpfs_auto_update_is_bitstream_info(data, size)) + goto out; + ret = mpfs_auto_update_verify_image(fw_uploader); if (ret) err = FW_UPLOAD_ERR_FW_INVALID; @@ -381,23 +387,15 @@ static const struct fw_upload_ops mpfs_auto_update_ops = { static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv) { - struct mpfs_mss_response *response; - struct mpfs_mss_msg *message; - u32 *response_msg; + u32 *response_msg __free(kfree) = + kzalloc(AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL); + struct mpfs_mss_response *response __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_response), GFP_KERNEL); + struct mpfs_mss_msg *message __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_msg), GFP_KERNEL); int ret; - response_msg = devm_kzalloc(priv->dev, - AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), - GFP_KERNEL); - if (!response_msg) - return -ENOMEM; - - response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL); - if (!response) - return -ENOMEM; - - message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL); - if (!message) + if (!response_msg || !response || !message) return -ENOMEM; /* diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig index 3f05d9854ddf..7f6eb4174734 100644 --- a/drivers/firmware/qcom/Kconfig +++ b/drivers/firmware/qcom/Kconfig @@ -7,8 +7,39 @@ menu "Qualcomm firmware drivers" config QCOM_SCM + select QCOM_TZMEM tristate +config QCOM_TZMEM + tristate + select GENERIC_ALLOCATOR + +choice + prompt "TrustZone interface memory allocator mode" + default QCOM_TZMEM_MODE_GENERIC + help + Selects the mode of the memory allocator providing memory buffers of + suitable format for sharing with the TrustZone. If in doubt, select + 'Generic'. + +config QCOM_TZMEM_MODE_GENERIC + bool "Generic" + help + Use the generic allocator mode. The memory is page-aligned, non-cachable + and physically contiguous. + +config QCOM_TZMEM_MODE_SHMBRIDGE + bool "SHM Bridge" + help + Use Qualcomm Shared Memory Bridge. The memory has the same alignment as + in the 'Generic' allocator but is also explicitly marked as an SHM Bridge + buffer. + + With this selected, all buffers passed to the TrustZone must be allocated + using the TZMem allocator or else the TrustZone will refuse to use them. + +endchoice + config QCOM_SCM_DOWNLOAD_MODE_DEFAULT bool "Qualcomm download mode enabled by default" depends on QCOM_SCM diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile index c9f12ee8224a..0be40a1abc13 100644 --- a/drivers/firmware/qcom/Makefile +++ b/drivers/firmware/qcom/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_QCOM_SCM) += qcom-scm.o qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o +obj-$(CONFIG_QCOM_TZMEM) += qcom_tzmem.o obj-$(CONFIG_QCOM_QSEECOM) += qcom_qseecom.o obj-$(CONFIG_QCOM_QSEECOM_UEFISECAPP) += qcom_qseecom_uefisecapp.o diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c index bc550ad0dbe0..6fefa4fe80e8 100644 --- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c +++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c @@ -13,11 +13,14 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/ucs2_string.h> #include <linux/firmware/qcom/qcom_qseecom.h> +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> /* -- Qualcomm "uefisecapp" interface definitions. -------------------------- */ @@ -272,6 +275,7 @@ struct qsee_rsp_uefi_query_variable_info { struct qcuefi_client { struct qseecom_client *client; struct efivars efivars; + struct qcom_tzmem_pool *mempool; }; static struct device *qcuefi_dev(struct qcuefi_client *qcuefi) @@ -293,12 +297,11 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e { struct qsee_req_uefi_get_variable *req_data; struct qsee_rsp_uefi_get_variable *rsp_data; + void *cmd_buf __free(qcom_tzmem) = NULL; unsigned long buffer_size = *data_size; - efi_status_t efi_status = EFI_SUCCESS; unsigned long name_length; - dma_addr_t cmd_buf_dma; + efi_status_t efi_status; size_t cmd_buf_size; - void *cmd_buf; size_t guid_offs; size_t name_offs; size_t req_size; @@ -333,11 +336,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e __reqdata_offs(rsp_size, &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -351,30 +352,22 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e req_data->length = req_size; status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, rsp_size); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, rsp_size); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length < sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length < sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", @@ -388,18 +381,14 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e *attributes = rsp_data->attributes; } - goto out_free; + return qsee_uefi_status_to_efi(rsp_data->status); } - if (rsp_data->length > rsp_size) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length > rsp_size) + return EFI_DEVICE_ERROR; - if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) + return EFI_DEVICE_ERROR; /* * Note: We need to set attributes and data size even if the buffer is @@ -422,22 +411,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e if (attributes) *attributes = rsp_data->attributes; - if (buffer_size == 0 && !data) { - efi_status = EFI_SUCCESS; - goto out_free; - } + if (buffer_size == 0 && !data) + return EFI_SUCCESS; - if (buffer_size < rsp_data->data_size) { - efi_status = EFI_BUFFER_TOO_SMALL; - goto out_free; - } + if (buffer_size < rsp_data->data_size) + return EFI_BUFFER_TOO_SMALL; memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size); -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const efi_char16_t *name, @@ -446,11 +428,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e { struct qsee_req_uefi_set_variable *req_data; struct qsee_rsp_uefi_set_variable *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; + void *cmd_buf __free(qcom_tzmem) = NULL; unsigned long name_length; - dma_addr_t cmd_buf_dma; size_t cmd_buf_size; - void *cmd_buf; size_t name_offs; size_t guid_offs; size_t data_offs; @@ -486,11 +466,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e __reqdata_offs(sizeof(*rsp_data), &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -506,10 +484,8 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e req_data->length = req_size; status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); @@ -517,33 +493,24 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, sizeof(*rsp_data)); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, sizeof(*rsp_data)); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length != sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length != sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", __func__, rsp_data->status); - efi_status = qsee_uefi_status_to_efi(rsp_data->status); + return qsee_uefi_status_to_efi(rsp_data->status); } -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, @@ -552,10 +519,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, { struct qsee_req_uefi_get_next_variable *req_data; struct qsee_rsp_uefi_get_next_variable *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; - dma_addr_t cmd_buf_dma; + void *cmd_buf __free(qcom_tzmem) = NULL; + efi_status_t efi_status; size_t cmd_buf_size; - void *cmd_buf; size_t guid_offs; size_t name_offs; size_t req_size; @@ -587,11 +553,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, __reqdata_offs(rsp_size, &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -606,28 +570,20 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, *name_size / sizeof(*name)); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, rsp_size); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, rsp_size); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length < sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length < sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", @@ -642,53 +598,40 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, if (efi_status == EFI_BUFFER_TOO_SMALL) *name_size = rsp_data->name_size; - goto out_free; + return efi_status; } - if (rsp_data->length > rsp_size) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length > rsp_size) + return EFI_DEVICE_ERROR; - if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) + return EFI_DEVICE_ERROR; - if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) + return EFI_DEVICE_ERROR; if (rsp_data->name_size > *name_size) { *name_size = rsp_data->name_size; - efi_status = EFI_BUFFER_TOO_SMALL; - goto out_free; + return EFI_BUFFER_TOO_SMALL; } - if (rsp_data->guid_size != sizeof(*guid)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->guid_size != sizeof(*guid)) + return EFI_DEVICE_ERROR; memcpy(guid, ((void *)rsp_data) + rsp_data->guid_offset, rsp_data->guid_size); status = ucs2_strscpy(name, ((void *)rsp_data) + rsp_data->name_offset, rsp_data->name_size / sizeof(*name)); *name_size = rsp_data->name_size; - if (status < 0) { + if (status < 0) /* * Return EFI_DEVICE_ERROR here because the buffer size should * have already been validated above, causing this function to * bail with EFI_BUFFER_TOO_SMALL. */ - efi_status = EFI_DEVICE_ERROR; - } + return EFI_DEVICE_ERROR; -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, u32 attr, @@ -697,10 +640,8 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, { struct qsee_req_uefi_query_variable_info *req_data; struct qsee_rsp_uefi_query_variable_info *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; - dma_addr_t cmd_buf_dma; + void *cmd_buf __free(qcom_tzmem) = NULL; size_t cmd_buf_size; - void *cmd_buf; size_t req_offs; size_t rsp_offs; int status; @@ -710,11 +651,9 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, __reqdata_offs(sizeof(*rsp_data), &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -724,28 +663,21 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, req_data->length = sizeof(*req_data); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, sizeof(*req_data), - cmd_buf_dma + rsp_offs, sizeof(*rsp_data)); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, sizeof(*req_data), + cmd_buf + rsp_offs, sizeof(*rsp_data)); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) + return EFI_DEVICE_ERROR; - if (rsp_data->length != sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length != sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", __func__, rsp_data->status); - efi_status = qsee_uefi_status_to_efi(rsp_data->status); - goto out_free; + return qsee_uefi_status_to_efi(rsp_data->status); } if (storage_space) @@ -757,10 +689,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, if (max_variable_size) *max_variable_size = rsp_data->max_variable_size; -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } /* -- Global efivar interface. ---------------------------------------------- */ @@ -871,6 +800,7 @@ static const struct efivar_operations qcom_efivar_ops = { static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *aux_dev_id) { + struct qcom_tzmem_pool_config pool_config; struct qcuefi_client *qcuefi; int status; @@ -889,6 +819,16 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, if (status) qcuefi_set_reference(NULL); + memset(&pool_config, 0, sizeof(pool_config)); + pool_config.initial_size = SZ_4K; + pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER; + pool_config.increment = 2; + pool_config.max_size = SZ_256K; + + qcuefi->mempool = devm_qcom_tzmem_pool_new(&aux_dev->dev, &pool_config); + if (IS_ERR(qcuefi->mempool)) + return PTR_ERR(qcuefi->mempool); + return status; } diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c index 16cf88acfa8e..dca5f3f1883b 100644 --- a/drivers/firmware/qcom/qcom_scm-smc.c +++ b/drivers/firmware/qcom/qcom_scm-smc.c @@ -2,6 +2,7 @@ /* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. */ +#include <linux/cleanup.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/delay.h> @@ -9,6 +10,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> #include <linux/arm-smccc.h> #include <linux/dma-mapping.h> @@ -150,11 +152,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, enum qcom_scm_convention qcom_convention, struct qcom_scm_res *res, bool atomic) { + struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool(); int arglen = desc->arginfo & 0xf; int i, ret; - dma_addr_t args_phys = 0; - void *args_virt = NULL; - size_t alloc_len; + void *args_virt __free(qcom_tzmem) = NULL; gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL; u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ? @@ -172,9 +173,9 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { - alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); - args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); - + args_virt = qcom_tzmem_alloc(mempool, + SCM_SMC_N_EXT_ARGS * sizeof(u64), + flag); if (!args_virt) return -ENOMEM; @@ -192,25 +193,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, SCM_SMC_FIRST_EXT_IDX]); } - args_phys = dma_map_single(dev, args_virt, alloc_len, - DMA_TO_DEVICE); - - if (dma_mapping_error(dev, args_phys)) { - kfree(args_virt); - return -ENOMEM; - } - - smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; + smc.args[SCM_SMC_LAST_REG_IDX] = qcom_tzmem_to_phys(args_virt); } - /* ret error check follows after args_virt cleanup*/ ret = __scm_smc_do(dev, &smc, &smc_res, atomic); - - if (args_virt) { - dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); - kfree(args_virt); - } - if (ret) return ret; diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 68f4df7e6c3c..00c379a3cceb 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -6,12 +6,15 @@ #include <linux/arm-smccc.h> #include <linux/bitfield.h> #include <linux/bits.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/cpumask.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/export.h> #include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> #include <linux/init.h> #include <linux/interconnect.h> #include <linux/interrupt.h> @@ -20,11 +23,14 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/reset-controller.h> +#include <linux/sizes.h> #include <linux/types.h> #include "qcom_scm.h" +#include "qcom_tzmem.h" static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); module_param(download_mode, bool, 0); @@ -43,6 +49,8 @@ struct qcom_scm { int scm_vote_count; u64 dload_mode_addr; + + struct qcom_tzmem_pool *mempool; }; struct qcom_scm_current_perm_info { @@ -114,7 +122,6 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { }; #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) -#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) #define QCOM_DLOAD_MASK GENMASK(5, 4) #define QCOM_DLOAD_NODUMP 0 @@ -198,6 +205,11 @@ static void qcom_scm_bw_disable(void) enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; static DEFINE_SPINLOCK(scm_query_lock); +struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) +{ + return __scm->mempool; +} + static enum qcom_scm_convention __get_convention(void) { unsigned long flags; @@ -570,6 +582,13 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, * During the scm call memory protection will be enabled for the meta * data blob, so make sure it's physically contiguous, 4K aligned and * non-cachable to avoid XPU violations. + * + * For PIL calls the hypervisor creates SHM Bridges for the blob + * buffers on behalf of Linux so we must not do it ourselves hence + * not using the TZMem allocator here. + * + * If we pass a buffer that is already part of an SHM Bridge to this + * call, it will fail. */ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL); @@ -1008,14 +1027,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, struct qcom_scm_mem_map_info *mem_to_map; phys_addr_t mem_to_map_phys; phys_addr_t dest_phys; - dma_addr_t ptr_phys; + phys_addr_t ptr_phys; size_t mem_to_map_sz; size_t dest_sz; size_t src_sz; size_t ptr_sz; int next_vm; __le32 *src; - void *ptr; int ret, i, b; u64 srcvm_bits = *srcvm; @@ -1025,10 +1043,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(dest_sz, SZ_64); - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + ptr_sz, GFP_KERNEL); if (!ptr) return -ENOMEM; + ptr_phys = qcom_tzmem_to_phys(ptr); + /* Fill source vmid detail */ src = ptr; i = 0; @@ -1057,7 +1078,6 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, ptr_phys, src_sz, dest_phys, dest_sz); - dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); if (ret) { dev_err(__scm->dev, "Assign memory protection call failed %d\n", ret); @@ -1205,32 +1225,21 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, .args[4] = data_unit_size, .owner = ARM_SMCCC_OWNER_SIP, }; - void *keybuf; - dma_addr_t key_phys; - int ret; - /* - * 'key' may point to vmalloc()'ed memory, but we need to pass a - * physical address that's been properly flushed. The sanctioned way to - * do this is by using the DMA API. But as is best practice for crypto - * keys, we also must wipe the key after use. This makes kmemdup() + - * dma_map_single() not clearly correct, since the DMA API can use - * bounce buffers. Instead, just use dma_alloc_coherent(). Programming - * keys is normally rare and thus not performance-critical. - */ + int ret; - keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, - GFP_KERNEL); + void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + key_size, + GFP_KERNEL); if (!keybuf) return -ENOMEM; memcpy(keybuf, key, key_size); - desc.args[1] = key_phys; + desc.args[1] = qcom_tzmem_to_phys(keybuf); ret = qcom_scm_call(__scm->dev, &desc, NULL); memzero_explicit(keybuf, key_size); - dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); return ret; } EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); @@ -1342,6 +1351,66 @@ bool qcom_scm_lmh_dcvsh_available(void) } EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); +int qcom_scm_shm_bridge_enable(void) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, + .owner = ARM_SMCCC_OWNER_SIP + }; + + struct qcom_scm_res res; + + if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, + QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) + return -EOPNOTSUPP; + + return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); + +int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, + u64 ipfn_and_s_perm_flags, u64 size_and_flags, + u64 ns_vmids, u64 *handle) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, + .owner = ARM_SMCCC_OWNER_SIP, + .args[0] = pfn_and_ns_perm_flags, + .args[1] = ipfn_and_s_perm_flags, + .args[2] = size_and_flags, + .args[3] = ns_vmids, + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL, QCOM_SCM_VAL), + }; + + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + if (handle && !ret) + *handle = res.result[1]; + + return ret ?: res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); + +int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, + .owner = ARM_SMCCC_OWNER_SIP, + .args[0] = handle, + .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); + int qcom_scm_lmh_profile_change(u32 profile_id) { struct qcom_scm_desc desc = { @@ -1359,8 +1428,6 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, u64 limit_node, u32 node_id, u64 version) { - dma_addr_t payload_phys; - u32 *payload_buf; int ret, payload_size = 5 * sizeof(u32); struct qcom_scm_desc desc = { @@ -1375,7 +1442,9 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, .owner = ARM_SMCCC_OWNER_SIP, }; - payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); + u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + payload_size, + GFP_KERNEL); if (!payload_buf) return -ENOMEM; @@ -1385,15 +1454,28 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, payload_buf[3] = 1; payload_buf[4] = payload_val; - desc.args[0] = payload_phys; + desc.args[0] = qcom_tzmem_to_phys(payload_buf); ret = qcom_scm_call(__scm->dev, &desc, NULL); - dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); return ret; } EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); +int qcom_scm_gpu_init_regs(u32 gpu_req) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_GPU, + .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = gpu_req, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); + static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) { struct device_node *tcsr; @@ -1545,37 +1627,27 @@ int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) unsigned long app_name_len = strlen(app_name); struct qcom_scm_desc desc = {}; struct qcom_scm_qseecom_resp res = {}; - dma_addr_t name_buf_phys; - char *name_buf; int status; if (app_name_len >= name_buf_size) return -EINVAL; - name_buf = kzalloc(name_buf_size, GFP_KERNEL); + char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + name_buf_size, + GFP_KERNEL); if (!name_buf) return -ENOMEM; memcpy(name_buf, app_name, app_name_len); - name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE); - status = dma_mapping_error(__scm->dev, name_buf_phys); - if (status) { - kfree(name_buf); - dev_err(__scm->dev, "qseecom: failed to map dma address\n"); - return status; - } - desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; desc.svc = QSEECOM_TZ_SVC_APP_MGR; desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); - desc.args[0] = name_buf_phys; + desc.args[0] = qcom_tzmem_to_phys(name_buf); desc.args[1] = app_name_len; status = qcom_scm_qseecom_call(&desc, &res); - dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE); - kfree(name_buf); if (status) return status; @@ -1597,9 +1669,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); /** * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. * @app_id: The ID of the target app. - * @req: DMA address of the request buffer sent to the app. + * @req: Request buffer sent to the app (must be TZ memory) * @req_size: Size of the request buffer. - * @rsp: DMA address of the response buffer, written to by the app. + * @rsp: Response buffer, written to by the app (must be TZ memory) * @rsp_size: Size of the response buffer. * * Sends a request to the QSEE app associated with the given ID and read back @@ -1610,13 +1682,18 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); * * Return: Zero on success, nonzero on failure. */ -int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, - dma_addr_t rsp, size_t rsp_size) +int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, + void *rsp, size_t rsp_size) { struct qcom_scm_qseecom_resp res = {}; struct qcom_scm_desc desc = {}; + phys_addr_t req_phys; + phys_addr_t rsp_phys; int status; + req_phys = qcom_tzmem_to_phys(req); + rsp_phys = qcom_tzmem_to_phys(rsp); + desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; desc.cmd = QSEECOM_TZ_CMD_APP_SEND; @@ -1624,9 +1701,9 @@ int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL); desc.args[0] = app_id; - desc.args[1] = req; + desc.args[1] = req_phys; desc.args[2] = req_size; - desc.args[3] = rsp; + desc.args[3] = rsp_phys; desc.args[4] = rsp_size; status = qcom_scm_qseecom_call(&desc, &res); @@ -1649,6 +1726,8 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { { .compatible = "lenovo,flex-5g" }, { .compatible = "lenovo,thinkpad-x13s", }, { .compatible = "qcom,sc8180x-primus" }, + { .compatible = "qcom,x1e80100-crd" }, + { .compatible = "qcom,x1e80100-qcp" }, { } }; @@ -1793,9 +1872,8 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data) goto out; } - if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE && - flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) { - dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags); + if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { + dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); goto out; } @@ -1810,6 +1888,7 @@ out: static int qcom_scm_probe(struct platform_device *pdev) { + struct qcom_tzmem_pool_config pool_config; struct qcom_scm *scm; int irq, ret; @@ -1885,6 +1964,26 @@ static int qcom_scm_probe(struct platform_device *pdev) if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) qcom_scm_disable_sdi(); + ret = of_reserved_mem_device_init(__scm->dev); + if (ret && ret != -ENODEV) + return dev_err_probe(__scm->dev, ret, + "Failed to setup the reserved memory region for TZ mem\n"); + + ret = qcom_tzmem_enable(__scm->dev); + if (ret) + return dev_err_probe(__scm->dev, ret, + "Failed to enable the TrustZone memory allocator\n"); + + memset(&pool_config, 0, sizeof(pool_config)); + pool_config.initial_size = 0; + pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; + pool_config.max_size = SZ_256K; + + __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config); + if (IS_ERR(__scm->mempool)) + return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), + "Failed to create the SCM memory pool\n"); + /* * Initialize the QSEECOM interface. * diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h index 4532907e8489..685b8f59e7a6 100644 --- a/drivers/firmware/qcom/qcom_scm.h +++ b/drivers/firmware/qcom/qcom_scm.h @@ -5,6 +5,7 @@ #define __QCOM_SCM_INT_H struct device; +struct qcom_tzmem_pool; enum qcom_scm_convention { SMC_CONVENTION_UNKNOWN, @@ -78,6 +79,8 @@ int scm_legacy_call_atomic(struct device *dev, const struct qcom_scm_desc *desc, int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, struct qcom_scm_res *res); +struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void); + #define QCOM_SCM_SVC_BOOT 0x01 #define QCOM_SCM_BOOT_SET_ADDR 0x01 #define QCOM_SCM_BOOT_TERMINATE_PC 0x02 @@ -113,6 +116,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 #define QCOM_SCM_MP_VIDEO_VAR 0x08 #define QCOM_SCM_MP_ASSIGN 0x16 +#define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c +#define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d +#define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e #define QCOM_SCM_SVC_OCMEM 0x0f #define QCOM_SCM_OCMEM_LOCK_CMD 0x01 @@ -138,6 +144,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_WAITQ_RESUME 0x02 #define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03 +#define QCOM_SCM_SVC_GPU 0x28 +#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01 + /* common error codes */ #define QCOM_SCM_V2_EBUSY -12 #define QCOM_SCM_ENOMEM -5 diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c new file mode 100644 index 000000000000..17948cfc82e7 --- /dev/null +++ b/drivers/firmware/qcom/qcom_tzmem.c @@ -0,0 +1,469 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Memory allocator for buffers shared with the TrustZone. + * + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/firmware/qcom/qcom_tzmem.h> +#include <linux/genalloc.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/radix-tree.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "qcom_tzmem.h" + +struct qcom_tzmem_area { + struct list_head list; + void *vaddr; + dma_addr_t paddr; + size_t size; + void *priv; +}; + +struct qcom_tzmem_pool { + struct gen_pool *genpool; + struct list_head areas; + enum qcom_tzmem_policy policy; + size_t increment; + size_t max_size; + spinlock_t lock; +}; + +struct qcom_tzmem_chunk { + phys_addr_t paddr; + size_t size; + struct qcom_tzmem_pool *owner; +}; + +static struct device *qcom_tzmem_dev; +static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC); +static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock); + +#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC) + +static int qcom_tzmem_init(void) +{ + return 0; +} + +static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) +{ + return 0; +} + +static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) +{ + +} + +#elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE) + +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/of.h> + +#define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9 + +static bool qcom_tzmem_using_shm_bridge; + +/* List of machines that are known to not support SHM bridge correctly. */ +static const char *const qcom_tzmem_blacklist[] = { + "qcom,sc8180x", + "qcom,sdm845", /* reset in rmtfs memory assignment */ + "qcom,sm8150", /* reset in rmtfs memory assignment */ + NULL +}; + +static int qcom_tzmem_init(void) +{ + const char *const *platform; + int ret; + + for (platform = qcom_tzmem_blacklist; *platform; platform++) { + if (of_machine_is_compatible(*platform)) + goto notsupp; + } + + ret = qcom_scm_shm_bridge_enable(); + if (ret == -EOPNOTSUPP) + goto notsupp; + + if (!ret) + qcom_tzmem_using_shm_bridge = true; + + return ret; + +notsupp: + dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n"); + return 0; +} + +static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) +{ + u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags; + int ret; + + if (!qcom_tzmem_using_shm_bridge) + return 0; + + pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; + ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; + size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT); + + u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm, + ipfn_and_s_perm, size_and_flags, + QCOM_SCM_VMID_HLOS, handle); + if (ret) + return ret; + + area->priv = no_free_ptr(handle); + + return 0; +} + +static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) +{ + u64 *handle = area->priv; + + if (!qcom_tzmem_using_shm_bridge) + return; + + qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle); + kfree(handle); +} + +#endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */ + +static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool, + size_t size, gfp_t gfp) +{ + int ret; + + struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area), + gfp); + if (!area) + return -ENOMEM; + + area->size = PAGE_ALIGN(size); + + area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size, + &area->paddr, gfp); + if (!area->vaddr) + return -ENOMEM; + + ret = qcom_tzmem_init_area(area); + if (ret) { + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + return ret; + } + + ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr, + (phys_addr_t)area->paddr, size, -1); + if (ret) { + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + return ret; + } + + scoped_guard(spinlock_irqsave, &pool->lock) + list_add_tail(&area->list, &pool->areas); + + area = NULL; + return 0; +} + +/** + * qcom_tzmem_pool_new() - Create a new TZ memory pool. + * @config: Pool configuration. + * + * Create a new pool of memory suitable for sharing with the TrustZone. + * + * Must not be used in atomic context. + * + * Return: New memory pool address or ERR_PTR() on error. + */ +struct qcom_tzmem_pool * +qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config) +{ + int ret = -ENOMEM; + + might_sleep(); + + switch (config->policy) { + case QCOM_TZMEM_POLICY_STATIC: + if (!config->initial_size) + return ERR_PTR(-EINVAL); + break; + case QCOM_TZMEM_POLICY_MULTIPLIER: + if (!config->increment) + return ERR_PTR(-EINVAL); + break; + case QCOM_TZMEM_POLICY_ON_DEMAND: + break; + default: + return ERR_PTR(-EINVAL); + } + + struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool), + GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + + pool->genpool = gen_pool_create(PAGE_SHIFT, -1); + if (!pool->genpool) + return ERR_PTR(-ENOMEM); + + gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL); + + pool->policy = config->policy; + pool->increment = config->increment; + pool->max_size = config->max_size; + INIT_LIST_HEAD(&pool->areas); + spin_lock_init(&pool->lock); + + if (config->initial_size) { + ret = qcom_tzmem_pool_add_memory(pool, config->initial_size, + GFP_KERNEL); + if (ret) { + gen_pool_destroy(pool->genpool); + return ERR_PTR(ret); + } + } + + return_ptr(pool); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new); + +/** + * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources. + * @pool: Memory pool to free. + * + * Must not be called if any of the allocated chunks has not been freed. + * Must not be used in atomic context. + */ +void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool) +{ + struct qcom_tzmem_area *area, *next; + struct qcom_tzmem_chunk *chunk; + struct radix_tree_iter iter; + bool non_empty = false; + void __rcu **slot; + + might_sleep(); + + if (!pool) + return; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { + radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) { + chunk = radix_tree_deref_slot_protected(slot, + &qcom_tzmem_chunks_lock); + + if (chunk->owner == pool) + non_empty = true; + } + } + + WARN(non_empty, "Freeing TZ memory pool with memory still allocated"); + + list_for_each_entry_safe(area, next, &pool->areas, list) { + list_del(&area->list); + qcom_tzmem_cleanup_area(area); + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + kfree(area); + } + + gen_pool_destroy(pool->genpool); + kfree(pool); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free); + +static void devm_qcom_tzmem_pool_free(void *data) +{ + struct qcom_tzmem_pool *pool = data; + + qcom_tzmem_pool_free(pool); +} + +/** + * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new(). + * @dev: Device managing this resource. + * @config: Pool configuration. + * + * Must not be used in atomic context. + * + * Return: Address of the managed pool or ERR_PTR() on failure. + */ +struct qcom_tzmem_pool * +devm_qcom_tzmem_pool_new(struct device *dev, + const struct qcom_tzmem_pool_config *config) +{ + struct qcom_tzmem_pool *pool; + int ret; + + pool = qcom_tzmem_pool_new(config); + if (IS_ERR(pool)) + return pool; + + ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool); + if (ret) + return ERR_PTR(ret); + + return pool; +} +EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new); + +static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool, + size_t requested, gfp_t gfp) +{ + size_t current_size = gen_pool_size(pool->genpool); + + if (pool->max_size && (current_size + requested) > pool->max_size) + return false; + + switch (pool->policy) { + case QCOM_TZMEM_POLICY_STATIC: + return false; + case QCOM_TZMEM_POLICY_MULTIPLIER: + requested = current_size * pool->increment; + break; + case QCOM_TZMEM_POLICY_ON_DEMAND: + break; + } + + return !qcom_tzmem_pool_add_memory(pool, requested, gfp); +} + +/** + * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ. + * @pool: TZ memory pool from which to allocate memory. + * @size: Number of bytes to allocate. + * @gfp: GFP flags. + * + * Can be used in any context. + * + * Return: + * Address of the allocated buffer or NULL if no more memory can be allocated. + * The buffer must be released using qcom_tzmem_free(). + */ +void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp) +{ + unsigned long vaddr; + int ret; + + if (!size) + return NULL; + + size = PAGE_ALIGN(size); + + struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk), + gfp); + if (!chunk) + return NULL; + +again: + vaddr = gen_pool_alloc(pool->genpool, size); + if (!vaddr) { + if (qcom_tzmem_try_grow_pool(pool, size, gfp)) + goto again; + + return NULL; + } + + chunk->paddr = gen_pool_virt_to_phys(pool->genpool, vaddr); + chunk->size = size; + chunk->owner = pool; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { + ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk); + if (ret) { + gen_pool_free(pool->genpool, vaddr, size); + return NULL; + } + + chunk = NULL; + } + + return (void *)vaddr; +} +EXPORT_SYMBOL_GPL(qcom_tzmem_alloc); + +/** + * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool. + * @vaddr: Virtual address of the buffer. + * + * Can be used in any context. + */ +void qcom_tzmem_free(void *vaddr) +{ + struct qcom_tzmem_chunk *chunk; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) + chunk = radix_tree_delete_item(&qcom_tzmem_chunks, + (unsigned long)vaddr, NULL); + + if (!chunk) { + WARN(1, "Virtual address %p not owned by TZ memory allocator", + vaddr); + return; + } + + scoped_guard(spinlock_irqsave, &chunk->owner->lock) + gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr, + chunk->size); + kfree(chunk); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_free); + +/** + * qcom_tzmem_to_phys() - Map the virtual address of a TZ buffer to physical. + * @vaddr: Virtual address of the buffer allocated from a TZ memory pool. + * + * Can be used in any context. The address must have been returned by a call + * to qcom_tzmem_alloc(). + * + * Returns: Physical address of the buffer. + */ +phys_addr_t qcom_tzmem_to_phys(void *vaddr) +{ + struct qcom_tzmem_chunk *chunk; + + guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock); + + chunk = radix_tree_lookup(&qcom_tzmem_chunks, (unsigned long)vaddr); + if (!chunk) + return 0; + + return chunk->paddr; +} +EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys); + +int qcom_tzmem_enable(struct device *dev) +{ + if (qcom_tzmem_dev) + return -EBUSY; + + qcom_tzmem_dev = dev; + + return qcom_tzmem_init(); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_enable); + +MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers"); +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/qcom/qcom_tzmem.h b/drivers/firmware/qcom/qcom_tzmem.h new file mode 100644 index 000000000000..8fa8a3eb940e --- /dev/null +++ b/drivers/firmware/qcom/qcom_tzmem.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#ifndef __QCOM_TZMEM_PRIV_H +#define __QCOM_TZMEM_PRIV_H + +struct device; + +int qcom_tzmem_enable(struct device *dev); + +#endif /* __QCOM_TZMEM_PRIV_H */ diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index ef3a8214d002..5846c60220f5 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -4,7 +4,7 @@ * * Communication protocol with TI SCI hardware * The system works in a message response protocol - * See: http://processors.wiki.ti.com/index.php/TISCI for details + * See: https://software-dl.ti.com/tisci/esd/latest/index.html for details * * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ */ diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 31d962cdd6eb..3e7f186d239a 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -2,7 +2,7 @@ /* * Turris Mox rWTM firmware driver * - * Copyright (C) 2019 Marek Behún <kabel@kernel.org> + * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org> */ #include <linux/armada-37xx-rwtm-mailbox.h> @@ -174,6 +174,9 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data) struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev); struct armada_37xx_rwtm_rx_msg *msg = data; + if (completion_done(&rwtm->cmd_done)) + return; + rwtm->reply = *msg; complete(&rwtm->cmd_done); } @@ -199,9 +202,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval); if (ret == -ENODATA) { @@ -235,9 +237,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval); if (ret == -ENODATA) { @@ -274,9 +275,8 @@ static int check_get_random_support(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); } @@ -499,6 +499,7 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rwtm); mutex_init(&rwtm->busy); + init_completion(&rwtm->cmd_done); rwtm->mbox_client.dev = dev; rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback; @@ -512,8 +513,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) goto remove_files; } - init_completion(&rwtm->cmd_done); - ret = mox_get_board_info(rwtm); if (ret < 0) dev_warn(dev, "Cannot read board information: %i\n", ret); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 9bc45357e1a8..add8acf66a9c 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -41,9 +41,6 @@ /* IOCTL/QUERY feature payload size */ #define FEATURE_PAYLOAD_SIZE 2 -/* Firmware feature check version mask */ -#define FIRMWARE_VERSION_MASK GENMASK(15, 0) - static bool feature_check_enabled; static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); static u32 ioctl_features[FEATURE_PAYLOAD_SIZE]; diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 8efdd1f97139..c82d8d8a16ea 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -167,7 +167,7 @@ config FSL_CORENET_CF represents a coherency violation. config FSL_IFC - bool "Freescale IFC driver" if COMPILE_TEST + bool "Freescale IFC driver" depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST depends on HAS_IOMEM diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index cbf8ae85e1ae..614257308516 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -234,8 +234,7 @@ config MTD_NAND_FSL_IFC tristate "Freescale IFC NAND controller" depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST depends on HAS_IOMEM - select FSL_IFC - select MEMORY + depends on FSL_IFC help Various Freescale chips e.g P1010, include a NAND Flash machine with built-in hardware ECC capabilities. diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index fbab6ac0f0d1..7594f64eb737 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -188,6 +188,9 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus) dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus); if (ch->soc_no_adp_ctrl) { + if (ch->vbus) + regulator_hardware_enable(ch->vbus, vbus); + vbus_ctrl_reg = USB2_VBCTRL; vbus_ctrl_val = USB2_VBCTRL_VBOUT; } @@ -718,7 +721,10 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) phy_set_drvdata(channel->rphys[i].phy, &channel->rphys[i]); } - channel->vbus = devm_regulator_get_optional(dev, "vbus"); + if (channel->soc_no_adp_ctrl && channel->is_otg_channel) + channel->vbus = devm_regulator_get_exclusive(dev, "vbus"); + else + channel->vbus = devm_regulator_get_optional(dev, "vbus"); if (IS_ERR(channel->vbus)) { if (PTR_ERR(channel->vbus) == -EPROBE_DEFER) { ret = PTR_ERR(channel->vbus); diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 81a298517df2..960fd6a82450 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -7,6 +7,8 @@ source "drivers/platform/goldfish/Kconfig" source "drivers/platform/chrome/Kconfig" +source "drivers/platform/cznic/Kconfig" + source "drivers/platform/mellanox/Kconfig" source "drivers/platform/olpc/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index fbbe4f77aa5d..bf69cc8d7429 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -10,5 +10,6 @@ obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ +obj-$(CONFIG_CZNIC_PLATFORMS) += cznic/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ obj-$(CONFIG_ARM64) += arm64/ diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig new file mode 100644 index 000000000000..cb0d4d686d8a --- /dev/null +++ b/drivers/platform/cznic/Kconfig @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.rst. +# + +menuconfig CZNIC_PLATFORMS + bool "Platform support for CZ.NIC's Turris hardware" + help + Say Y here to be able to choose driver support for CZ.NIC's Turris + devices. This option alone does not add any kernel code. + +if CZNIC_PLATFORMS + +config TURRIS_OMNIA_MCU + tristate "Turris Omnia MCU driver" + depends on MACH_ARMADA_38X || COMPILE_TEST + depends on I2C + depends on OF + depends on WATCHDOG + depends on GPIOLIB + depends on HW_RANDOM + depends on RTC_CLASS + depends on WATCHDOG_CORE + select GPIOLIB_IRQCHIP + help + Say Y here to add support for the features implemented by the + microcontroller on the CZ.NIC's Turris Omnia SOHO router. + The features include: + - board poweroff into true low power mode (with voltage regulators + disabled) and the ability to configure wake up from this mode (via + rtcwake) + - true random number generator (if available on the MCU) + - MCU watchdog + - GPIO pins + - to get front button press events (the front button can be + configured either to generate press events to the CPU or to change + front LEDs panel brightness) + - to enable / disable USB port voltage regulators and to detect + USB overcurrent + - to detect MiniPCIe / mSATA card presence in MiniPCIe port 0 + - to configure resets of various peripherals on board revisions 32+ + - to enable / disable the VHV voltage regulator to the SOC in order + to be able to program SOC's OTP on board revisions 32+ + - to get input from the LED output pins of the WAN ethernet PHY, LAN + switch and MiniPCIe ports + To compile this driver as a module, choose M here; the module will be + called turris-omnia-mcu. + +endif # CZNIC_PLATFORMS diff --git a/drivers/platform/cznic/Makefile b/drivers/platform/cznic/Makefile new file mode 100644 index 000000000000..eae4c6b341ff --- /dev/null +++ b/drivers/platform/cznic/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_TURRIS_OMNIA_MCU) += turris-omnia-mcu.o +turris-omnia-mcu-y := turris-omnia-mcu-base.o +turris-omnia-mcu-y += turris-omnia-mcu-gpio.o +turris-omnia-mcu-y += turris-omnia-mcu-sys-off-wakeup.o +turris-omnia-mcu-y += turris-omnia-mcu-trng.o +turris-omnia-mcu-y += turris-omnia-mcu-watchdog.o diff --git a/drivers/platform/cznic/turris-omnia-mcu-base.c b/drivers/platform/cznic/turris-omnia-mcu-base.c new file mode 100644 index 000000000000..c68a7a84a951 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-base.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/array_size.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/hex.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +#define OMNIA_FW_VERSION_LEN 20 +#define OMNIA_FW_VERSION_HEX_LEN (2 * OMNIA_FW_VERSION_LEN + 1) +#define OMNIA_BOARD_INFO_LEN 16 + +int omnia_cmd_write_read(const struct i2c_client *client, + void *cmd, unsigned int cmd_len, + void *reply, unsigned int reply_len) +{ + struct i2c_msg msgs[2]; + int ret, num; + + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = cmd_len; + msgs[0].buf = cmd; + num = 1; + + if (reply_len) { + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = reply_len; + msgs[1].buf = reply; + num++; + } + + ret = i2c_transfer(client->adapter, msgs, num); + if (ret < 0) + return ret; + if (ret != num) + return -EIO; + + return 0; +} + +static int omnia_get_version_hash(struct omnia_mcu *mcu, bool bootloader, + char version[static OMNIA_FW_VERSION_HEX_LEN]) +{ + u8 reply[OMNIA_FW_VERSION_LEN]; + char *p; + int err; + + err = omnia_cmd_read(mcu->client, + bootloader ? OMNIA_CMD_GET_FW_VERSION_BOOT + : OMNIA_CMD_GET_FW_VERSION_APP, + reply, sizeof(reply)); + if (err) + return err; + + p = bin2hex(version, reply, OMNIA_FW_VERSION_LEN); + *p = '\0'; + + return 0; +} + +static ssize_t fw_version_hash_show(struct device *dev, char *buf, + bool bootloader) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + char version[OMNIA_FW_VERSION_HEX_LEN]; + int err; + + err = omnia_get_version_hash(mcu, bootloader, version); + if (err) + return err; + + return sysfs_emit(buf, "%s\n", version); +} + +static ssize_t fw_version_hash_application_show(struct device *dev, + struct device_attribute *a, + char *buf) +{ + return fw_version_hash_show(dev, buf, false); +} +static DEVICE_ATTR_RO(fw_version_hash_application); + +static ssize_t fw_version_hash_bootloader_show(struct device *dev, + struct device_attribute *a, + char *buf) +{ + return fw_version_hash_show(dev, buf, true); +} +static DEVICE_ATTR_RO(fw_version_hash_bootloader); + +static ssize_t fw_features_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "0x%x\n", mcu->features); +} +static DEVICE_ATTR_RO(fw_features); + +static ssize_t mcu_type_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", mcu->type); +} +static DEVICE_ATTR_RO(mcu_type); + +static ssize_t reset_selector_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + u8 reply; + int err; + + err = omnia_cmd_read_u8(to_i2c_client(dev), OMNIA_CMD_GET_RESET, + &reply); + if (err) + return err; + + return sysfs_emit(buf, "%d\n", reply); +} +static DEVICE_ATTR_RO(reset_selector); + +static ssize_t serial_number_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%016llX\n", mcu->board_serial_number); +} +static DEVICE_ATTR_RO(serial_number); + +static ssize_t first_mac_address_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%pM\n", mcu->board_first_mac); +} +static DEVICE_ATTR_RO(first_mac_address); + +static ssize_t board_revision_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%u\n", mcu->board_revision); +} +static DEVICE_ATTR_RO(board_revision); + +static struct attribute *omnia_mcu_base_attrs[] = { + &dev_attr_fw_version_hash_application.attr, + &dev_attr_fw_version_hash_bootloader.attr, + &dev_attr_fw_features.attr, + &dev_attr_mcu_type.attr, + &dev_attr_reset_selector.attr, + &dev_attr_serial_number.attr, + &dev_attr_first_mac_address.attr, + &dev_attr_board_revision.attr, + NULL +}; + +static umode_t omnia_mcu_base_attrs_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + if ((a == &dev_attr_serial_number.attr || + a == &dev_attr_first_mac_address.attr || + a == &dev_attr_board_revision.attr) && + !(mcu->features & OMNIA_FEAT_BOARD_INFO)) + return 0; + + return a->mode; +} + +static const struct attribute_group omnia_mcu_base_group = { + .attrs = omnia_mcu_base_attrs, + .is_visible = omnia_mcu_base_attrs_visible, +}; + +static const struct attribute_group *omnia_mcu_groups[] = { + &omnia_mcu_base_group, + &omnia_mcu_gpio_group, + &omnia_mcu_poweroff_group, + NULL +}; + +static void omnia_mcu_print_version_hash(struct omnia_mcu *mcu, bool bootloader) +{ + const char *type = bootloader ? "bootloader" : "application"; + struct device *dev = &mcu->client->dev; + char version[OMNIA_FW_VERSION_HEX_LEN]; + int err; + + err = omnia_get_version_hash(mcu, bootloader, version); + if (err) { + dev_err(dev, "Cannot read MCU %s firmware version: %d\n", + type, err); + return; + } + + dev_info(dev, "MCU %s firmware version hash: %s\n", type, version); +} + +static const char *omnia_status_to_mcu_type(u16 status) +{ + switch (status & OMNIA_STS_MCU_TYPE_MASK) { + case OMNIA_STS_MCU_TYPE_STM32: + return "STM32"; + case OMNIA_STS_MCU_TYPE_GD32: + return "GD32"; + case OMNIA_STS_MCU_TYPE_MKL: + return "MKL"; + default: + return "unknown"; + } +} + +static void omnia_info_missing_feature(struct device *dev, const char *feature) +{ + dev_info(dev, + "Your board's MCU firmware does not support the %s feature.\n", + feature); +} + +static int omnia_mcu_read_features(struct omnia_mcu *mcu) +{ + static const struct { + u16 mask; + const char *name; + } features[] = { +#define _DEF_FEAT(_n, _m) { OMNIA_FEAT_ ## _n, _m } + _DEF_FEAT(EXT_CMDS, "extended control and status"), + _DEF_FEAT(WDT_PING, "watchdog pinging"), + _DEF_FEAT(LED_STATE_EXT_MASK, "peripheral LED pins reading"), + _DEF_FEAT(NEW_INT_API, "new interrupt API"), + _DEF_FEAT(POWEROFF_WAKEUP, "poweroff and wakeup"), + _DEF_FEAT(TRNG, "true random number generator"), +#undef _DEF_FEAT + }; + struct i2c_client *client = mcu->client; + struct device *dev = &client->dev; + bool suggest_fw_upgrade = false; + u16 status; + int err; + + /* status word holds MCU type, which we need below */ + err = omnia_cmd_read_u16(client, OMNIA_CMD_GET_STATUS_WORD, &status); + if (err) + return err; + + /* + * Check whether MCU firmware supports the OMNIA_CMD_GET_FEATURES + * command. + */ + if (status & OMNIA_STS_FEATURES_SUPPORTED) { + /* try read 32-bit features */ + err = omnia_cmd_read_u32(client, OMNIA_CMD_GET_FEATURES, + &mcu->features); + if (err) { + /* try read 16-bit features */ + u16 features16; + + err = omnia_cmd_read_u16(client, OMNIA_CMD_GET_FEATURES, + &features16); + if (err) + return err; + + mcu->features = features16; + } else { + if (mcu->features & OMNIA_FEAT_FROM_BIT_16_INVALID) + mcu->features &= GENMASK(15, 0); + } + } else { + dev_info(dev, + "Your board's MCU firmware does not support feature reading.\n"); + suggest_fw_upgrade = true; + } + + mcu->type = omnia_status_to_mcu_type(status); + dev_info(dev, "MCU type %s%s\n", mcu->type, + (mcu->features & OMNIA_FEAT_PERIPH_MCU) ? + ", with peripheral resets wired" : ""); + + omnia_mcu_print_version_hash(mcu, true); + + if (mcu->features & OMNIA_FEAT_BOOTLOADER) + dev_warn(dev, + "MCU is running bootloader firmware. Was firmware upgrade interrupted?\n"); + else + omnia_mcu_print_version_hash(mcu, false); + + for (unsigned int i = 0; i < ARRAY_SIZE(features); i++) { + if (mcu->features & features[i].mask) + continue; + + omnia_info_missing_feature(dev, features[i].name); + suggest_fw_upgrade = true; + } + + if (suggest_fw_upgrade) + dev_info(dev, + "Consider upgrading MCU firmware with the omnia-mcutool utility.\n"); + + return 0; +} + +static int omnia_mcu_read_board_info(struct omnia_mcu *mcu) +{ + u8 reply[1 + OMNIA_BOARD_INFO_LEN]; + int err; + + err = omnia_cmd_read(mcu->client, OMNIA_CMD_BOARD_INFO_GET, reply, + sizeof(reply)); + if (err) + return err; + + if (reply[0] != OMNIA_BOARD_INFO_LEN) + return -EIO; + + mcu->board_serial_number = get_unaligned_le64(&reply[1]); + + /* we can't use ether_addr_copy() because reply is not u16-aligned */ + memcpy(mcu->board_first_mac, &reply[9], sizeof(mcu->board_first_mac)); + + mcu->board_revision = reply[15]; + + return 0; +} + +static int omnia_mcu_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct omnia_mcu *mcu; + int err; + + if (!client->irq) + return dev_err_probe(dev, -EINVAL, "IRQ resource not found\n"); + + mcu = devm_kzalloc(dev, sizeof(*mcu), GFP_KERNEL); + if (!mcu) + return -ENOMEM; + + mcu->client = client; + i2c_set_clientdata(client, mcu); + + err = omnia_mcu_read_features(mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot determine MCU supported features\n"); + + if (mcu->features & OMNIA_FEAT_BOARD_INFO) { + err = omnia_mcu_read_board_info(mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot read board info\n"); + } + + err = omnia_mcu_register_sys_off_and_wakeup(mcu); + if (err) + return err; + + err = omnia_mcu_register_watchdog(mcu); + if (err) + return err; + + err = omnia_mcu_register_gpiochip(mcu); + if (err) + return err; + + return omnia_mcu_register_trng(mcu); +} + +static const struct of_device_id of_omnia_mcu_match[] = { + { .compatible = "cznic,turris-omnia-mcu" }, + {} +}; + +static struct i2c_driver omnia_mcu_driver = { + .probe = omnia_mcu_probe, + .driver = { + .name = "turris-omnia-mcu", + .of_match_table = of_omnia_mcu_match, + .dev_groups = omnia_mcu_groups, + }, +}; +module_i2c_driver(omnia_mcu_driver); + +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); +MODULE_DESCRIPTION("CZ.NIC's Turris Omnia MCU"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c new file mode 100644 index 000000000000..91da56a704c7 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c @@ -0,0 +1,1095 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU GPIO and IRQ driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/device.h> +#include <linux/devm-helpers.h> +#include <linux/errno.h> +#include <linux/gpio/driver.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/sysfs.h> +#include <linux/types.h> +#include <linux/workqueue.h> +#include <asm/unaligned.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +#define OMNIA_CMD_INT_ARG_LEN 8 +#define FRONT_BUTTON_RELEASE_DELAY_MS 50 + +static const char * const omnia_mcu_gpio_templates[64] = { + /* GPIOs with value read from the 16-bit wide status */ + [4] = "MiniPCIe0 Card Detect", + [5] = "MiniPCIe0 mSATA Indicator", + [6] = "Front USB3 port over-current", + [7] = "Rear USB3 port over-current", + [8] = "Front USB3 port power", + [9] = "Rear USB3 port power", + [12] = "Front Button", + + /* GPIOs with value read from the 32-bit wide extended status */ + [16] = "SFP nDET", + [28] = "MiniPCIe0 LED", + [29] = "MiniPCIe1 LED", + [30] = "MiniPCIe2 LED", + [31] = "MiniPCIe0 PAN LED", + [32] = "MiniPCIe1 PAN LED", + [33] = "MiniPCIe2 PAN LED", + [34] = "WAN PHY LED0", + [35] = "WAN PHY LED1", + [36] = "LAN switch p0 LED0", + [37] = "LAN switch p0 LED1", + [38] = "LAN switch p1 LED0", + [39] = "LAN switch p1 LED1", + [40] = "LAN switch p2 LED0", + [41] = "LAN switch p2 LED1", + [42] = "LAN switch p3 LED0", + [43] = "LAN switch p3 LED1", + [44] = "LAN switch p4 LED0", + [45] = "LAN switch p4 LED1", + [46] = "LAN switch p5 LED0", + [47] = "LAN switch p5 LED1", + + /* GPIOs with value read from the 16-bit wide extended control status */ + [48] = "eMMC nRESET", + [49] = "LAN switch nRESET", + [50] = "WAN PHY nRESET", + [51] = "MiniPCIe0 nPERST", + [52] = "MiniPCIe1 nPERST", + [53] = "MiniPCIe2 nPERST", + [54] = "WAN PHY SFP mux", + [56] = "VHV power disable", +}; + +struct omnia_gpio { + u8 cmd; + u8 ctl_cmd; + u8 bit; + u8 ctl_bit; + u8 int_bit; + u16 feat; + u16 feat_mask; +}; + +#define OMNIA_GPIO_INVALID_INT_BIT 0xff + +#define _DEF_GPIO(_cmd, _ctl_cmd, _bit, _ctl_bit, _int_bit, _feat, _feat_mask) \ + { \ + .cmd = _cmd, \ + .ctl_cmd = _ctl_cmd, \ + .bit = _bit, \ + .ctl_bit = _ctl_bit, \ + .int_bit = (_int_bit) < 0 ? OMNIA_GPIO_INVALID_INT_BIT \ + : (_int_bit), \ + .feat = _feat, \ + .feat_mask = _feat_mask, \ + } + +#define _DEF_GPIO_STS(_name) \ + _DEF_GPIO(OMNIA_CMD_GET_STATUS_WORD, 0, __bf_shf(OMNIA_STS_ ## _name), \ + 0, __bf_shf(OMNIA_INT_ ## _name), 0, 0) + +#define _DEF_GPIO_CTL(_name) \ + _DEF_GPIO(OMNIA_CMD_GET_STATUS_WORD, OMNIA_CMD_GENERAL_CONTROL, \ + __bf_shf(OMNIA_STS_ ## _name), __bf_shf(OMNIA_CTL_ ## _name), \ + -1, 0, 0) + +#define _DEF_GPIO_EXT_STS(_name, _feat) \ + _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \ + __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \ + __bf_shf(OMNIA_INT_ ## _name), \ + OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS, \ + OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS) + +#define _DEF_GPIO_EXT_STS_LED(_name, _ledext) \ + _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \ + __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \ + __bf_shf(OMNIA_INT_ ## _name), \ + OMNIA_FEAT_LED_STATE_ ## _ledext, \ + OMNIA_FEAT_LED_STATE_EXT_MASK) + +#define _DEF_GPIO_EXT_STS_LEDALL(_name) \ + _DEF_GPIO(OMNIA_CMD_GET_EXT_STATUS_DWORD, 0, \ + __bf_shf(OMNIA_EXT_STS_ ## _name), 0, \ + __bf_shf(OMNIA_INT_ ## _name), \ + OMNIA_FEAT_LED_STATE_EXT_MASK, 0) + +#define _DEF_GPIO_EXT_CTL(_name, _feat) \ + _DEF_GPIO(OMNIA_CMD_GET_EXT_CONTROL_STATUS, OMNIA_CMD_EXT_CONTROL, \ + __bf_shf(OMNIA_EXT_CTL_ ## _name), \ + __bf_shf(OMNIA_EXT_CTL_ ## _name), -1, \ + OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS, \ + OMNIA_FEAT_ ## _feat | OMNIA_FEAT_EXT_CMDS) + +#define _DEF_INT(_name) \ + _DEF_GPIO(0, 0, 0, 0, __bf_shf(OMNIA_INT_ ## _name), 0, 0) + +static inline bool is_int_bit_valid(const struct omnia_gpio *gpio) +{ + return gpio->int_bit != OMNIA_GPIO_INVALID_INT_BIT; +} + +static const struct omnia_gpio omnia_gpios[64] = { + /* GPIOs with value read from the 16-bit wide status */ + [4] = _DEF_GPIO_STS(CARD_DET), + [5] = _DEF_GPIO_STS(MSATA_IND), + [6] = _DEF_GPIO_STS(USB30_OVC), + [7] = _DEF_GPIO_STS(USB31_OVC), + [8] = _DEF_GPIO_CTL(USB30_PWRON), + [9] = _DEF_GPIO_CTL(USB31_PWRON), + + /* brightness changed interrupt, no GPIO */ + [11] = _DEF_INT(BRIGHTNESS_CHANGED), + + [12] = _DEF_GPIO_STS(BUTTON_PRESSED), + + /* TRNG interrupt, no GPIO */ + [13] = _DEF_INT(TRNG), + + /* MESSAGE_SIGNED interrupt, no GPIO */ + [14] = _DEF_INT(MESSAGE_SIGNED), + + /* GPIOs with value read from the 32-bit wide extended status */ + [16] = _DEF_GPIO_EXT_STS(SFP_nDET, PERIPH_MCU), + [28] = _DEF_GPIO_EXT_STS_LEDALL(WLAN0_MSATA_LED), + [29] = _DEF_GPIO_EXT_STS_LEDALL(WLAN1_LED), + [30] = _DEF_GPIO_EXT_STS_LEDALL(WLAN2_LED), + [31] = _DEF_GPIO_EXT_STS_LED(WPAN0_LED, EXT), + [32] = _DEF_GPIO_EXT_STS_LED(WPAN1_LED, EXT), + [33] = _DEF_GPIO_EXT_STS_LED(WPAN2_LED, EXT), + [34] = _DEF_GPIO_EXT_STS_LEDALL(WAN_LED0), + [35] = _DEF_GPIO_EXT_STS_LED(WAN_LED1, EXT_V32), + [36] = _DEF_GPIO_EXT_STS_LEDALL(LAN0_LED0), + [37] = _DEF_GPIO_EXT_STS_LEDALL(LAN0_LED1), + [38] = _DEF_GPIO_EXT_STS_LEDALL(LAN1_LED0), + [39] = _DEF_GPIO_EXT_STS_LEDALL(LAN1_LED1), + [40] = _DEF_GPIO_EXT_STS_LEDALL(LAN2_LED0), + [41] = _DEF_GPIO_EXT_STS_LEDALL(LAN2_LED1), + [42] = _DEF_GPIO_EXT_STS_LEDALL(LAN3_LED0), + [43] = _DEF_GPIO_EXT_STS_LEDALL(LAN3_LED1), + [44] = _DEF_GPIO_EXT_STS_LEDALL(LAN4_LED0), + [45] = _DEF_GPIO_EXT_STS_LEDALL(LAN4_LED1), + [46] = _DEF_GPIO_EXT_STS_LEDALL(LAN5_LED0), + [47] = _DEF_GPIO_EXT_STS_LEDALL(LAN5_LED1), + + /* GPIOs with value read from the 16-bit wide extended control status */ + [48] = _DEF_GPIO_EXT_CTL(nRES_MMC, PERIPH_MCU), + [49] = _DEF_GPIO_EXT_CTL(nRES_LAN, PERIPH_MCU), + [50] = _DEF_GPIO_EXT_CTL(nRES_PHY, PERIPH_MCU), + [51] = _DEF_GPIO_EXT_CTL(nPERST0, PERIPH_MCU), + [52] = _DEF_GPIO_EXT_CTL(nPERST1, PERIPH_MCU), + [53] = _DEF_GPIO_EXT_CTL(nPERST2, PERIPH_MCU), + [54] = _DEF_GPIO_EXT_CTL(PHY_SFP, PERIPH_MCU), + [56] = _DEF_GPIO_EXT_CTL(nVHV_CTRL, PERIPH_MCU), +}; + +/* mapping from interrupts to indexes of GPIOs in the omnia_gpios array */ +const u8 omnia_int_to_gpio_idx[32] = { + [__bf_shf(OMNIA_INT_CARD_DET)] = 4, + [__bf_shf(OMNIA_INT_MSATA_IND)] = 5, + [__bf_shf(OMNIA_INT_USB30_OVC)] = 6, + [__bf_shf(OMNIA_INT_USB31_OVC)] = 7, + [__bf_shf(OMNIA_INT_BUTTON_PRESSED)] = 12, + [__bf_shf(OMNIA_INT_TRNG)] = 13, + [__bf_shf(OMNIA_INT_MESSAGE_SIGNED)] = 14, + [__bf_shf(OMNIA_INT_SFP_nDET)] = 16, + [__bf_shf(OMNIA_INT_BRIGHTNESS_CHANGED)] = 11, + [__bf_shf(OMNIA_INT_WLAN0_MSATA_LED)] = 28, + [__bf_shf(OMNIA_INT_WLAN1_LED)] = 29, + [__bf_shf(OMNIA_INT_WLAN2_LED)] = 30, + [__bf_shf(OMNIA_INT_WPAN0_LED)] = 31, + [__bf_shf(OMNIA_INT_WPAN1_LED)] = 32, + [__bf_shf(OMNIA_INT_WPAN2_LED)] = 33, + [__bf_shf(OMNIA_INT_WAN_LED0)] = 34, + [__bf_shf(OMNIA_INT_WAN_LED1)] = 35, + [__bf_shf(OMNIA_INT_LAN0_LED0)] = 36, + [__bf_shf(OMNIA_INT_LAN0_LED1)] = 37, + [__bf_shf(OMNIA_INT_LAN1_LED0)] = 38, + [__bf_shf(OMNIA_INT_LAN1_LED1)] = 39, + [__bf_shf(OMNIA_INT_LAN2_LED0)] = 40, + [__bf_shf(OMNIA_INT_LAN2_LED1)] = 41, + [__bf_shf(OMNIA_INT_LAN3_LED0)] = 42, + [__bf_shf(OMNIA_INT_LAN3_LED1)] = 43, + [__bf_shf(OMNIA_INT_LAN4_LED0)] = 44, + [__bf_shf(OMNIA_INT_LAN4_LED1)] = 45, + [__bf_shf(OMNIA_INT_LAN5_LED0)] = 46, + [__bf_shf(OMNIA_INT_LAN5_LED1)] = 47, +}; + +/* index of PHY_SFP GPIO in the omnia_gpios array */ +#define OMNIA_GPIO_PHY_SFP_OFFSET 54 + +static int omnia_ctl_cmd_locked(struct omnia_mcu *mcu, u8 cmd, u16 val, u16 mask) +{ + unsigned int len; + u8 buf[5]; + + buf[0] = cmd; + + switch (cmd) { + case OMNIA_CMD_GENERAL_CONTROL: + buf[1] = val; + buf[2] = mask; + len = 3; + break; + + case OMNIA_CMD_EXT_CONTROL: + put_unaligned_le16(val, &buf[1]); + put_unaligned_le16(mask, &buf[3]); + len = 5; + break; + + default: + BUG(); + } + + return omnia_cmd_write(mcu->client, buf, len); +} + +static int omnia_ctl_cmd(struct omnia_mcu *mcu, u8 cmd, u16 val, u16 mask) +{ + guard(mutex)(&mcu->lock); + + return omnia_ctl_cmd_locked(mcu, cmd, val, mask); +} + +static int omnia_gpio_request(struct gpio_chip *gc, unsigned int offset) +{ + if (!omnia_gpios[offset].cmd) + return -EINVAL; + + return 0; +} + +static int omnia_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + if (offset == OMNIA_GPIO_PHY_SFP_OFFSET) { + int val; + + scoped_guard(mutex, &mcu->lock) { + val = omnia_cmd_read_bit(mcu->client, + OMNIA_CMD_GET_EXT_CONTROL_STATUS, + OMNIA_EXT_CTL_PHY_SFP_AUTO); + if (val < 0) + return val; + } + + if (val) + return GPIO_LINE_DIRECTION_IN; + + return GPIO_LINE_DIRECTION_OUT; + } + + if (omnia_gpios[offset].ctl_cmd) + return GPIO_LINE_DIRECTION_OUT; + + return GPIO_LINE_DIRECTION_IN; +} + +static int omnia_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + const struct omnia_gpio *gpio = &omnia_gpios[offset]; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + if (offset == OMNIA_GPIO_PHY_SFP_OFFSET) + return omnia_ctl_cmd(mcu, OMNIA_CMD_EXT_CONTROL, + OMNIA_EXT_CTL_PHY_SFP_AUTO, + OMNIA_EXT_CTL_PHY_SFP_AUTO); + + if (gpio->ctl_cmd) + return -ENOTSUPP; + + return 0; +} + +static int omnia_gpio_direction_output(struct gpio_chip *gc, + unsigned int offset, int value) +{ + const struct omnia_gpio *gpio = &omnia_gpios[offset]; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + u16 val, mask; + + if (!gpio->ctl_cmd) + return -ENOTSUPP; + + mask = BIT(gpio->ctl_bit); + val = value ? mask : 0; + + if (offset == OMNIA_GPIO_PHY_SFP_OFFSET) + mask |= OMNIA_EXT_CTL_PHY_SFP_AUTO; + + return omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask); +} + +static int omnia_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + const struct omnia_gpio *gpio = &omnia_gpios[offset]; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + /* + * If firmware does not support the new interrupt API, we are informed + * of every change of the status word by an interrupt from MCU and save + * its value in the interrupt service routine. Simply return the saved + * value. + */ + if (gpio->cmd == OMNIA_CMD_GET_STATUS_WORD && + !(mcu->features & OMNIA_FEAT_NEW_INT_API)) + return test_bit(gpio->bit, &mcu->last_status); + + guard(mutex)(&mcu->lock); + + /* + * If firmware does support the new interrupt API, we may have cached + * the value of a GPIO in the interrupt service routine. If not, read + * the relevant bit now. + */ + if (is_int_bit_valid(gpio) && test_bit(gpio->int_bit, &mcu->is_cached)) + return test_bit(gpio->int_bit, &mcu->cached); + + return omnia_cmd_read_bit(mcu->client, gpio->cmd, BIT(gpio->bit)); +} + +static unsigned long * +_relevant_field_for_sts_cmd(u8 cmd, unsigned long *sts, unsigned long *ext_sts, + unsigned long *ext_ctl) +{ + switch (cmd) { + case OMNIA_CMD_GET_STATUS_WORD: + return sts; + case OMNIA_CMD_GET_EXT_STATUS_DWORD: + return ext_sts; + case OMNIA_CMD_GET_EXT_CONTROL_STATUS: + return ext_ctl; + default: + return NULL; + } +} + +static int omnia_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) +{ + unsigned long sts = 0, ext_sts = 0, ext_ctl = 0, *field; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + struct i2c_client *client = mcu->client; + unsigned int i; + int err; + + /* determine which bits to read from the 3 possible commands */ + for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) { + field = _relevant_field_for_sts_cmd(omnia_gpios[i].cmd, + &sts, &ext_sts, &ext_ctl); + if (!field) + continue; + + __set_bit(omnia_gpios[i].bit, field); + } + + guard(mutex)(&mcu->lock); + + if (mcu->features & OMNIA_FEAT_NEW_INT_API) { + /* read relevant bits from status */ + err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_STATUS_WORD, + sts, &sts); + if (err) + return err; + } else { + /* + * Use status word value cached in the interrupt service routine + * if firmware does not support the new interrupt API. + */ + sts = mcu->last_status; + } + + /* read relevant bits from extended status */ + err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_EXT_STATUS_DWORD, + ext_sts, &ext_sts); + if (err) + return err; + + /* read relevant bits from extended control */ + err = omnia_cmd_read_bits(client, OMNIA_CMD_GET_EXT_CONTROL_STATUS, + ext_ctl, &ext_ctl); + if (err) + return err; + + /* assign relevant bits in result */ + for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) { + field = _relevant_field_for_sts_cmd(omnia_gpios[i].cmd, + &sts, &ext_sts, &ext_ctl); + if (!field) + continue; + + __assign_bit(i, bits, test_bit(omnia_gpios[i].bit, field)); + } + + return 0; +} + +static void omnia_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + const struct omnia_gpio *gpio = &omnia_gpios[offset]; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + u16 val, mask; + + if (!gpio->ctl_cmd) + return; + + mask = BIT(gpio->ctl_bit); + val = value ? mask : 0; + + omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask); +} + +static void omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) +{ + unsigned long ctl = 0, ctl_mask = 0, ext_ctl = 0, ext_ctl_mask = 0; + struct omnia_mcu *mcu = gpiochip_get_data(gc); + unsigned int i; + + for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) { + unsigned long *field, *field_mask; + u8 bit = omnia_gpios[i].ctl_bit; + + switch (omnia_gpios[i].ctl_cmd) { + case OMNIA_CMD_GENERAL_CONTROL: + field = &ctl; + field_mask = &ctl_mask; + break; + case OMNIA_CMD_EXT_CONTROL: + field = &ext_ctl; + field_mask = &ext_ctl_mask; + break; + default: + field = field_mask = NULL; + break; + } + + if (!field) + continue; + + __set_bit(bit, field_mask); + __assign_bit(bit, field, test_bit(i, bits)); + } + + guard(mutex)(&mcu->lock); + + if (ctl_mask) + omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL, + ctl, ctl_mask); + + if (ext_ctl_mask) + omnia_ctl_cmd_locked(mcu, OMNIA_CMD_EXT_CONTROL, + ext_ctl, ext_ctl_mask); +} + +static bool omnia_gpio_available(struct omnia_mcu *mcu, + const struct omnia_gpio *gpio) +{ + if (gpio->feat_mask) + return (mcu->features & gpio->feat_mask) == gpio->feat; + + if (gpio->feat) + return mcu->features & gpio->feat; + + return true; +} + +static int omnia_gpio_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + for (unsigned int i = 0; i < ngpios; i++) { + const struct omnia_gpio *gpio = &omnia_gpios[i]; + + if (gpio->cmd || is_int_bit_valid(gpio)) + __assign_bit(i, valid_mask, + omnia_gpio_available(mcu, gpio)); + else + __clear_bit(i, valid_mask); + } + + return 0; +} + +static int omnia_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + u32 bank, gpio; + + if (WARN_ON(gpiospec->args_count != 3)) + return -EINVAL; + + if (flags) + *flags = gpiospec->args[2]; + + bank = gpiospec->args[0]; + gpio = gpiospec->args[1]; + + switch (bank) { + case 0: + return gpio < 16 ? gpio : -EINVAL; + case 1: + return gpio < 32 ? 16 + gpio : -EINVAL; + case 2: + return gpio < 16 ? 48 + gpio : -EINVAL; + default: + return -EINVAL; + } +} + +static void omnia_irq_shutdown(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u8 bit = omnia_gpios[hwirq].int_bit; + + __clear_bit(bit, &mcu->rising); + __clear_bit(bit, &mcu->falling); +} + +static void omnia_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u8 bit = omnia_gpios[hwirq].int_bit; + + if (!omnia_gpios[hwirq].cmd) + __clear_bit(bit, &mcu->rising); + __clear_bit(bit, &mcu->mask); + gpiochip_disable_irq(gc, hwirq); +} + +static void omnia_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u8 bit = omnia_gpios[hwirq].int_bit; + + gpiochip_enable_irq(gc, hwirq); + __set_bit(bit, &mcu->mask); + if (!omnia_gpios[hwirq].cmd) + __set_bit(bit, &mcu->rising); +} + +static int omnia_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct device *dev = &mcu->client->dev; + u8 bit = omnia_gpios[hwirq].int_bit; + + if (!(type & IRQ_TYPE_EDGE_BOTH)) { + dev_err(dev, "irq %u: unsupported type %u\n", d->irq, type); + return -EINVAL; + } + + __assign_bit(bit, &mcu->rising, type & IRQ_TYPE_EDGE_RISING); + __assign_bit(bit, &mcu->falling, type & IRQ_TYPE_EDGE_FALLING); + + return 0; +} + +static void omnia_irq_bus_lock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + /* nothing to do if MCU firmware does not support new interrupt API */ + if (!(mcu->features & OMNIA_FEAT_NEW_INT_API)) + return; + + mutex_lock(&mcu->lock); +} + +/** + * omnia_mask_interleave - Interleaves the bytes from @rising and @falling + * @dst: the destination u8 array of interleaved bytes + * @rising: rising mask + * @falling: falling mask + * + * Interleaves the little-endian bytes from @rising and @falling words. + * + * If @rising = (r0, r1, r2, r3) and @falling = (f0, f1, f2, f3), the result is + * @dst = (r0, f0, r1, f1, r2, f2, r3, f3). + * + * The MCU receives an interrupt mask and reports a pending interrupt bitmap in + * this interleaved format. The rationale behind this is that the low-indexed + * bits are more important - in many cases, the user will be interested only in + * interrupts with indexes 0 to 7, and so the system can stop reading after + * first 2 bytes (r0, f0), to save time on the slow I2C bus. + * + * Feel free to remove this function and its inverse, omnia_mask_deinterleave, + * and use an appropriate bitmap_*() function once such a function exists. + */ +static void +omnia_mask_interleave(u8 *dst, unsigned long rising, unsigned long falling) +{ + for (unsigned int i = 0; i < sizeof(u32); i++) { + dst[2 * i] = rising >> (8 * i); + dst[2 * i + 1] = falling >> (8 * i); + } +} + +/** + * omnia_mask_deinterleave - Deinterleaves the bytes into @rising and @falling + * @src: the source u8 array containing the interleaved bytes + * @rising: pointer where to store the rising mask gathered from @src + * @falling: pointer where to store the falling mask gathered from @src + * + * This is the inverse function to omnia_mask_interleave. + */ +static void omnia_mask_deinterleave(const u8 *src, unsigned long *rising, + unsigned long *falling) +{ + *rising = *falling = 0; + + for (unsigned int i = 0; i < sizeof(u32); i++) { + *rising |= src[2 * i] << (8 * i); + *falling |= src[2 * i + 1] << (8 * i); + } +} + +static void omnia_irq_bus_sync_unlock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct omnia_mcu *mcu = gpiochip_get_data(gc); + struct device *dev = &mcu->client->dev; + u8 cmd[1 + OMNIA_CMD_INT_ARG_LEN]; + unsigned long rising, falling; + int err; + + /* nothing to do if MCU firmware does not support new interrupt API */ + if (!(mcu->features & OMNIA_FEAT_NEW_INT_API)) + return; + + cmd[0] = OMNIA_CMD_SET_INT_MASK; + + rising = mcu->rising & mcu->mask; + falling = mcu->falling & mcu->mask; + + /* interleave the rising and falling bytes into the command arguments */ + omnia_mask_interleave(&cmd[1], rising, falling); + + dev_dbg(dev, "set int mask %8ph\n", &cmd[1]); + + err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd)); + if (err) { + dev_err(dev, "Cannot set mask: %d\n", err); + goto unlock; + } + + /* + * Remember which GPIOs have both rising and falling interrupts enabled. + * For those we will cache their value so that .get() method is faster. + * We also need to forget cached values of GPIOs that aren't cached + * anymore. + */ + mcu->both = rising & falling; + mcu->is_cached &= mcu->both; + +unlock: + mutex_unlock(&mcu->lock); +} + +static const struct irq_chip omnia_mcu_irq_chip = { + .name = "Turris Omnia MCU interrupts", + .irq_shutdown = omnia_irq_shutdown, + .irq_mask = omnia_irq_mask, + .irq_unmask = omnia_irq_unmask, + .irq_set_type = omnia_irq_set_type, + .irq_bus_lock = omnia_irq_bus_lock, + .irq_bus_sync_unlock = omnia_irq_bus_sync_unlock, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static void omnia_irq_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct omnia_mcu *mcu = gpiochip_get_data(gc); + + for (unsigned int i = 0; i < ngpios; i++) { + const struct omnia_gpio *gpio = &omnia_gpios[i]; + + if (is_int_bit_valid(gpio)) + __assign_bit(i, valid_mask, + omnia_gpio_available(mcu, gpio)); + else + __clear_bit(i, valid_mask); + } +} + +static int omnia_irq_init_hw(struct gpio_chip *gc) +{ + struct omnia_mcu *mcu = gpiochip_get_data(gc); + u8 cmd[1 + OMNIA_CMD_INT_ARG_LEN] = {}; + + cmd[0] = OMNIA_CMD_SET_INT_MASK; + + return omnia_cmd_write(mcu->client, cmd, sizeof(cmd)); +} + +/* + * Determine how many bytes we need to read from the reply to the + * OMNIA_CMD_GET_INT_AND_CLEAR command in order to retrieve all unmasked + * interrupts. + */ +static unsigned int +omnia_irq_compute_pending_length(unsigned long rising, unsigned long falling) +{ + return max(omnia_compute_reply_length(rising, true, 0), + omnia_compute_reply_length(falling, true, 1)); +} + +static bool omnia_irq_read_pending_new(struct omnia_mcu *mcu, + unsigned long *pending) +{ + struct device *dev = &mcu->client->dev; + u8 reply[OMNIA_CMD_INT_ARG_LEN] = {}; + unsigned long rising, falling; + unsigned int len; + int err; + + len = omnia_irq_compute_pending_length(mcu->rising & mcu->mask, + mcu->falling & mcu->mask); + if (!len) + return false; + + guard(mutex)(&mcu->lock); + + err = omnia_cmd_read(mcu->client, OMNIA_CMD_GET_INT_AND_CLEAR, reply, + len); + if (err) { + dev_err(dev, "Cannot read pending IRQs: %d\n", err); + return false; + } + + /* deinterleave the reply bytes into rising and falling */ + omnia_mask_deinterleave(reply, &rising, &falling); + + rising &= mcu->mask; + falling &= mcu->mask; + *pending = rising | falling; + + /* cache values for GPIOs that have both edges enabled */ + mcu->is_cached &= ~(rising & falling); + mcu->is_cached |= mcu->both & (rising ^ falling); + mcu->cached = (mcu->cached | rising) & ~falling; + + return true; +} + +static int omnia_read_status_word_old_fw(struct omnia_mcu *mcu, + unsigned long *status) +{ + u16 raw_status; + int err; + + err = omnia_cmd_read_u16(mcu->client, OMNIA_CMD_GET_STATUS_WORD, + &raw_status); + if (err) + return err; + + /* + * Old firmware has a bug wherein it never resets the USB port + * overcurrent bits back to zero. Ignore them. + */ + *status = raw_status & ~(OMNIA_STS_USB30_OVC | OMNIA_STS_USB31_OVC); + + return 0; +} + +static void button_release_emul_fn(struct work_struct *work) +{ + struct omnia_mcu *mcu = container_of(to_delayed_work(work), + struct omnia_mcu, + button_release_emul_work); + + mcu->button_pressed_emul = false; + generic_handle_irq_safe(mcu->client->irq); +} + +static void +fill_int_from_sts(unsigned long *rising, unsigned long *falling, + unsigned long rising_sts, unsigned long falling_sts, + unsigned long sts_bit, unsigned long int_bit) +{ + if (rising_sts & sts_bit) + *rising |= int_bit; + if (falling_sts & sts_bit) + *falling |= int_bit; +} + +static bool omnia_irq_read_pending_old(struct omnia_mcu *mcu, + unsigned long *pending) +{ + unsigned long status, rising_sts, falling_sts, rising, falling; + struct device *dev = &mcu->client->dev; + int err; + + guard(mutex)(&mcu->lock); + + err = omnia_read_status_word_old_fw(mcu, &status); + if (err) { + dev_err(dev, "Cannot read pending IRQs: %d\n", err); + return false; + } + + /* + * The old firmware triggers an interrupt whenever status word changes, + * but does not inform about which bits rose or fell. We need to compute + * this here by comparing with the last status word value. + * + * The OMNIA_STS_BUTTON_PRESSED bit needs special handling, because the + * old firmware clears the OMNIA_STS_BUTTON_PRESSED bit on successful + * completion of the OMNIA_CMD_GET_STATUS_WORD command, resulting in + * another interrupt: + * - first we get an interrupt, we read the status word where + * OMNIA_STS_BUTTON_PRESSED is present, + * - MCU clears the OMNIA_STS_BUTTON_PRESSED bit because we read the + * status word, + * - we get another interrupt because the status word changed again + * (the OMNIA_STS_BUTTON_PRESSED bit was cleared). + * + * The gpiolib-cdev, gpiolib-sysfs and gpio-keys input driver all call + * the gpiochip's .get() method after an edge event on a requested GPIO + * occurs. + * + * We ensure that the .get() method reads 1 for the button GPIO for some + * time. + */ + + if (status & OMNIA_STS_BUTTON_PRESSED) { + mcu->button_pressed_emul = true; + mod_delayed_work(system_wq, &mcu->button_release_emul_work, + msecs_to_jiffies(FRONT_BUTTON_RELEASE_DELAY_MS)); + } else if (mcu->button_pressed_emul) { + status |= OMNIA_STS_BUTTON_PRESSED; + } + + rising_sts = ~mcu->last_status & status; + falling_sts = mcu->last_status & ~status; + + mcu->last_status = status; + + /* + * Fill in the relevant interrupt bits from status bits for CARD_DET, + * MSATA_IND and BUTTON_PRESSED. + */ + rising = 0; + falling = 0; + fill_int_from_sts(&rising, &falling, rising_sts, falling_sts, + OMNIA_STS_CARD_DET, OMNIA_INT_CARD_DET); + fill_int_from_sts(&rising, &falling, rising_sts, falling_sts, + OMNIA_STS_MSATA_IND, OMNIA_INT_MSATA_IND); + fill_int_from_sts(&rising, &falling, rising_sts, falling_sts, + OMNIA_STS_BUTTON_PRESSED, OMNIA_INT_BUTTON_PRESSED); + + /* Use only bits that are enabled */ + rising &= mcu->rising & mcu->mask; + falling &= mcu->falling & mcu->mask; + *pending = rising | falling; + + return true; +} + +static bool omnia_irq_read_pending(struct omnia_mcu *mcu, + unsigned long *pending) +{ + if (mcu->features & OMNIA_FEAT_NEW_INT_API) + return omnia_irq_read_pending_new(mcu, pending); + else + return omnia_irq_read_pending_old(mcu, pending); +} + +static irqreturn_t omnia_irq_thread_handler(int irq, void *dev_id) +{ + struct omnia_mcu *mcu = dev_id; + struct irq_domain *domain; + unsigned long pending; + unsigned int i; + + if (!omnia_irq_read_pending(mcu, &pending)) + return IRQ_NONE; + + domain = mcu->gc.irq.domain; + + for_each_set_bit(i, &pending, 32) { + unsigned int nested_irq; + + nested_irq = irq_find_mapping(domain, omnia_int_to_gpio_idx[i]); + + handle_nested_irq(nested_irq); + } + + return IRQ_RETVAL(pending); +} + +static const char * const front_button_modes[] = { "mcu", "cpu" }; + +static ssize_t front_button_mode_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + int val; + + if (mcu->features & OMNIA_FEAT_NEW_INT_API) { + val = omnia_cmd_read_bit(mcu->client, OMNIA_CMD_GET_STATUS_WORD, + OMNIA_STS_BUTTON_MODE); + if (val < 0) + return val; + } else { + val = !!(mcu->last_status & OMNIA_STS_BUTTON_MODE); + } + + return sysfs_emit(buf, "%s\n", front_button_modes[val]); +} + +static ssize_t front_button_mode_store(struct device *dev, + struct device_attribute *a, + const char *buf, size_t count) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + int err, i; + + i = sysfs_match_string(front_button_modes, buf); + if (i < 0) + return i; + + err = omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL, + i ? OMNIA_CTL_BUTTON_MODE : 0, + OMNIA_CTL_BUTTON_MODE); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(front_button_mode); + +static struct attribute *omnia_mcu_gpio_attrs[] = { + &dev_attr_front_button_mode.attr, + NULL +}; + +const struct attribute_group omnia_mcu_gpio_group = { + .attrs = omnia_mcu_gpio_attrs, +}; + +int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu) +{ + bool new_api = mcu->features & OMNIA_FEAT_NEW_INT_API; + struct device *dev = &mcu->client->dev; + unsigned long irqflags; + int err; + + err = devm_mutex_init(dev, &mcu->lock); + if (err) + return err; + + mcu->gc.request = omnia_gpio_request; + mcu->gc.get_direction = omnia_gpio_get_direction; + mcu->gc.direction_input = omnia_gpio_direction_input; + mcu->gc.direction_output = omnia_gpio_direction_output; + mcu->gc.get = omnia_gpio_get; + mcu->gc.get_multiple = omnia_gpio_get_multiple; + mcu->gc.set = omnia_gpio_set; + mcu->gc.set_multiple = omnia_gpio_set_multiple; + mcu->gc.init_valid_mask = omnia_gpio_init_valid_mask; + mcu->gc.can_sleep = true; + mcu->gc.names = omnia_mcu_gpio_templates; + mcu->gc.base = -1; + mcu->gc.ngpio = ARRAY_SIZE(omnia_gpios); + mcu->gc.label = "Turris Omnia MCU GPIOs"; + mcu->gc.parent = dev; + mcu->gc.owner = THIS_MODULE; + mcu->gc.of_gpio_n_cells = 3; + mcu->gc.of_xlate = omnia_gpio_of_xlate; + + gpio_irq_chip_set_chip(&mcu->gc.irq, &omnia_mcu_irq_chip); + /* This will let us handle the parent IRQ in the driver */ + mcu->gc.irq.parent_handler = NULL; + mcu->gc.irq.num_parents = 0; + mcu->gc.irq.parents = NULL; + mcu->gc.irq.default_type = IRQ_TYPE_NONE; + mcu->gc.irq.handler = handle_bad_irq; + mcu->gc.irq.threaded = true; + if (new_api) + mcu->gc.irq.init_hw = omnia_irq_init_hw; + mcu->gc.irq.init_valid_mask = omnia_irq_init_valid_mask; + + err = devm_gpiochip_add_data(dev, &mcu->gc, mcu); + if (err) + return dev_err_probe(dev, err, "Cannot add GPIO chip\n"); + + /* + * Before requesting the interrupt, if firmware does not support the new + * interrupt API, we need to cache the value of the status word, so that + * when it changes, we may compare the new value with the cached one in + * the interrupt handler. + */ + if (!new_api) { + err = omnia_read_status_word_old_fw(mcu, &mcu->last_status); + if (err) + return dev_err_probe(dev, err, + "Cannot read status word\n"); + + INIT_DELAYED_WORK(&mcu->button_release_emul_work, + button_release_emul_fn); + } + + irqflags = IRQF_ONESHOT; + if (new_api) + irqflags |= IRQF_TRIGGER_LOW; + else + irqflags |= IRQF_TRIGGER_FALLING; + + err = devm_request_threaded_irq(dev, mcu->client->irq, NULL, + omnia_irq_thread_handler, irqflags, + "turris-omnia-mcu", mcu); + if (err) + return dev_err_probe(dev, err, "Cannot request IRQ\n"); + + if (!new_api) { + /* + * The button_release_emul_work has to be initialized before the + * thread is requested, and on driver remove it needs to be + * canceled before the thread is freed. Therefore we can't use + * devm_delayed_work_autocancel() directly, because the order + * devm_delayed_work_autocancel(); + * devm_request_threaded_irq(); + * would cause improper release order: + * free_irq(); + * cancel_delayed_work_sync(); + * Instead we first initialize the work above, and only now + * after IRQ is requested we add the work devm action. + */ + err = devm_add_action(dev, devm_delayed_work_drop, + &mcu->button_release_emul_work); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c b/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c new file mode 100644 index 000000000000..0e8ab15b6037 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-sys-off-wakeup.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU system off and RTC wakeup driver + * + * This is not a true RTC driver (in the sense that it does not provide a + * real-time clock), rather the MCU implements a wakeup from powered off state + * at a specified time relative to MCU boot, and we expose this feature via RTC + * alarm, so that it can be used via the rtcwake command, which is the standard + * Linux command for this. + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/kstrtox.h> +#include <linux/reboot.h> +#include <linux/rtc.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +static int omnia_get_uptime_wakeup(const struct i2c_client *client, u32 *uptime, + u32 *wakeup) +{ + __le32 reply[2]; + int err; + + err = omnia_cmd_read(client, OMNIA_CMD_GET_UPTIME_AND_WAKEUP, reply, + sizeof(reply)); + if (err) + return err; + + if (uptime) + *uptime = le32_to_cpu(reply[0]); + + if (wakeup) + *wakeup = le32_to_cpu(reply[1]); + + return 0; +} + +static int omnia_read_time(struct device *dev, struct rtc_time *tm) +{ + u32 uptime; + int err; + + err = omnia_get_uptime_wakeup(to_i2c_client(dev), &uptime, NULL); + if (err) + return err; + + rtc_time64_to_tm(uptime, tm); + + return 0; +} + +static int omnia_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct i2c_client *client = to_i2c_client(dev); + struct omnia_mcu *mcu = i2c_get_clientdata(client); + u32 wakeup; + int err; + + err = omnia_get_uptime_wakeup(client, NULL, &wakeup); + if (err) + return err; + + alrm->enabled = !!wakeup; + rtc_time64_to_tm(wakeup ?: mcu->rtc_alarm, &alrm->time); + + return 0; +} + +static int omnia_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct i2c_client *client = to_i2c_client(dev); + struct omnia_mcu *mcu = i2c_get_clientdata(client); + + mcu->rtc_alarm = rtc_tm_to_time64(&alrm->time); + + if (alrm->enabled) + return omnia_cmd_write_u32(client, OMNIA_CMD_SET_WAKEUP, + mcu->rtc_alarm); + + return 0; +} + +static int omnia_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct i2c_client *client = to_i2c_client(dev); + struct omnia_mcu *mcu = i2c_get_clientdata(client); + + return omnia_cmd_write_u32(client, OMNIA_CMD_SET_WAKEUP, + enabled ? mcu->rtc_alarm : 0); +} + +static const struct rtc_class_ops omnia_rtc_ops = { + .read_time = omnia_read_time, + .read_alarm = omnia_read_alarm, + .set_alarm = omnia_set_alarm, + .alarm_irq_enable = omnia_alarm_irq_enable, +}; + +static int omnia_power_off(struct sys_off_data *data) +{ + struct omnia_mcu *mcu = data->cb_data; + __be32 tmp; + u8 cmd[9]; + u16 arg; + int err; + + if (mcu->front_button_poweron) + arg = OMNIA_CMD_POWER_OFF_POWERON_BUTTON; + else + arg = 0; + + cmd[0] = OMNIA_CMD_POWER_OFF; + put_unaligned_le16(OMNIA_CMD_POWER_OFF_MAGIC, &cmd[1]); + put_unaligned_le16(arg, &cmd[3]); + + /* + * Although all values from and to MCU are passed in little-endian, the + * MCU's CRC unit uses big-endian CRC32 polynomial (0x04c11db7), so we + * need to use crc32_be() here. + */ + tmp = cpu_to_be32(get_unaligned_le32(&cmd[1])); + put_unaligned_le32(crc32_be(~0, (void *)&tmp, sizeof(tmp)), &cmd[5]); + + err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd)); + if (err) + dev_err(&mcu->client->dev, + "Unable to send the poweroff command: %d\n", err); + + return NOTIFY_DONE; +} + +static int omnia_restart(struct sys_off_data *data) +{ + struct omnia_mcu *mcu = data->cb_data; + u8 cmd[3]; + int err; + + cmd[0] = OMNIA_CMD_GENERAL_CONTROL; + + if (reboot_mode == REBOOT_HARD) + cmd[1] = cmd[2] = OMNIA_CTL_HARD_RST; + else + cmd[1] = cmd[2] = OMNIA_CTL_LIGHT_RST; + + err = omnia_cmd_write(mcu->client, cmd, sizeof(cmd)); + if (err) + dev_err(&mcu->client->dev, + "Unable to send the restart command: %d\n", err); + + /* + * MCU needs a little bit to process the I2C command, otherwise it will + * do a light reset based on SOC SYSRES_OUT pin. + */ + mdelay(1); + + return NOTIFY_DONE; +} + +static ssize_t front_button_poweron_show(struct device *dev, + struct device_attribute *a, char *buf) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", mcu->front_button_poweron); +} + +static ssize_t front_button_poweron_store(struct device *dev, + struct device_attribute *a, + const char *buf, size_t count) +{ + struct omnia_mcu *mcu = dev_get_drvdata(dev); + bool val; + int err; + + err = kstrtobool(buf, &val); + if (err) + return err; + + mcu->front_button_poweron = val; + + return count; +} +static DEVICE_ATTR_RW(front_button_poweron); + +static struct attribute *omnia_mcu_poweroff_attrs[] = { + &dev_attr_front_button_poweron.attr, + NULL +}; + +static umode_t poweroff_attrs_visible(struct kobject *kobj, struct attribute *a, + int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct omnia_mcu *mcu = dev_get_drvdata(dev); + + if (mcu->features & OMNIA_FEAT_POWEROFF_WAKEUP) + return a->mode; + + return 0; +} + +const struct attribute_group omnia_mcu_poweroff_group = { + .attrs = omnia_mcu_poweroff_attrs, + .is_visible = poweroff_attrs_visible, +}; + +int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu) +{ + struct device *dev = &mcu->client->dev; + int err; + + /* MCU restart is always available */ + err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART, + SYS_OFF_PRIO_FIRMWARE, + omnia_restart, mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot register system restart handler\n"); + + /* + * Poweroff and wakeup are available only if POWEROFF_WAKEUP feature is + * present. + */ + if (!(mcu->features & OMNIA_FEAT_POWEROFF_WAKEUP)) + return 0; + + err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE, + omnia_power_off, mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot register system power off handler\n"); + + mcu->rtcdev = devm_rtc_allocate_device(dev); + if (IS_ERR(mcu->rtcdev)) + return dev_err_probe(dev, PTR_ERR(mcu->rtcdev), + "Cannot allocate RTC device\n"); + + mcu->rtcdev->ops = &omnia_rtc_ops; + mcu->rtcdev->range_max = U32_MAX; + set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, mcu->rtcdev->features); + + err = devm_rtc_register_device(mcu->rtcdev); + if (err) + return dev_err_probe(dev, err, "Cannot register RTC device\n"); + + mcu->front_button_poweron = true; + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c new file mode 100644 index 000000000000..ad953fb3c37a --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU TRNG driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/bitfield.h> +#include <linux/completion.h> +#include <linux/container_of.h> +#include <linux/errno.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/hw_random.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/minmax.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +#define OMNIA_CMD_TRNG_MAX_ENTROPY_LEN 64 + +static irqreturn_t omnia_trng_irq_handler(int irq, void *dev_id) +{ + struct omnia_mcu *mcu = dev_id; + + complete(&mcu->trng_entropy_ready); + + return IRQ_HANDLED; +} + +static int omnia_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct omnia_mcu *mcu = container_of(rng, struct omnia_mcu, trng); + u8 reply[1 + OMNIA_CMD_TRNG_MAX_ENTROPY_LEN]; + int err, bytes; + + if (!wait && !completion_done(&mcu->trng_entropy_ready)) + return 0; + + do { + if (wait_for_completion_interruptible(&mcu->trng_entropy_ready)) + return -ERESTARTSYS; + + err = omnia_cmd_read(mcu->client, + OMNIA_CMD_TRNG_COLLECT_ENTROPY, + reply, sizeof(reply)); + if (err) + return err; + + bytes = min3(reply[0], max, OMNIA_CMD_TRNG_MAX_ENTROPY_LEN); + } while (wait && !bytes); + + memcpy(data, &reply[1], bytes); + + return bytes; +} + +int omnia_mcu_register_trng(struct omnia_mcu *mcu) +{ + struct device *dev = &mcu->client->dev; + u8 irq_idx, dummy; + int irq, err; + + if (!(mcu->features & OMNIA_FEAT_TRNG)) + return 0; + + irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)]; + irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx)); + if (!irq) + return dev_err_probe(dev, -ENXIO, "Cannot get TRNG IRQ\n"); + + /* + * If someone else cleared the TRNG interrupt but did not read the + * entropy, a new interrupt won't be generated, and entropy collection + * will be stuck. Ensure an interrupt will be generated by executing + * the collect entropy command (and discarding the result). + */ + err = omnia_cmd_read(mcu->client, OMNIA_CMD_TRNG_COLLECT_ENTROPY, + &dummy, 1); + if (err) + return err; + + init_completion(&mcu->trng_entropy_ready); + + err = devm_request_threaded_irq(dev, irq, NULL, omnia_trng_irq_handler, + IRQF_ONESHOT, "turris-omnia-mcu-trng", + mcu); + if (err) + return dev_err_probe(dev, err, "Cannot request TRNG IRQ\n"); + + mcu->trng.name = "turris-omnia-mcu-trng"; + mcu->trng.read = omnia_trng_read; + + err = devm_hwrng_register(dev, &mcu->trng); + if (err) + return dev_err_probe(dev, err, "Cannot register TRNG\n"); + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu-watchdog.c b/drivers/platform/cznic/turris-omnia-mcu-watchdog.c new file mode 100644 index 000000000000..3ad146ec1d80 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-watchdog.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU watchdog driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/units.h> +#include <linux/watchdog.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include "turris-omnia-mcu.h" + +#define WATCHDOG_TIMEOUT 120 + +static unsigned int timeout; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int omnia_wdt_start(struct watchdog_device *wdt) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + + return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 1); +} + +static int omnia_wdt_stop(struct watchdog_device *wdt) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + + return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 0); +} + +static int omnia_wdt_ping(struct watchdog_device *wdt) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + + return omnia_cmd_write_u8(mcu->client, OMNIA_CMD_SET_WATCHDOG_STATE, 1); +} + +static int omnia_wdt_set_timeout(struct watchdog_device *wdt, + unsigned int timeout) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + + return omnia_cmd_write_u16(mcu->client, OMNIA_CMD_SET_WDT_TIMEOUT, + timeout * DECI); +} + +static unsigned int omnia_wdt_get_timeleft(struct watchdog_device *wdt) +{ + struct omnia_mcu *mcu = watchdog_get_drvdata(wdt); + u16 timeleft; + int err; + + err = omnia_cmd_read_u16(mcu->client, OMNIA_CMD_GET_WDT_TIMELEFT, + &timeleft); + if (err) { + dev_err(&mcu->client->dev, "Cannot get watchdog timeleft: %d\n", + err); + return 0; + } + + return timeleft / DECI; +} + +static const struct watchdog_info omnia_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "Turris Omnia MCU Watchdog", +}; + +static const struct watchdog_ops omnia_wdt_ops = { + .owner = THIS_MODULE, + .start = omnia_wdt_start, + .stop = omnia_wdt_stop, + .ping = omnia_wdt_ping, + .set_timeout = omnia_wdt_set_timeout, + .get_timeleft = omnia_wdt_get_timeleft, +}; + +int omnia_mcu_register_watchdog(struct omnia_mcu *mcu) +{ + struct device *dev = &mcu->client->dev; + u8 state; + int err; + + if (!(mcu->features & OMNIA_FEAT_WDT_PING)) + return 0; + + mcu->wdt.info = &omnia_wdt_info; + mcu->wdt.ops = &omnia_wdt_ops; + mcu->wdt.parent = dev; + mcu->wdt.min_timeout = 1; + mcu->wdt.max_timeout = 65535 / DECI; + + mcu->wdt.timeout = WATCHDOG_TIMEOUT; + watchdog_init_timeout(&mcu->wdt, timeout, dev); + + watchdog_set_drvdata(&mcu->wdt, mcu); + + omnia_wdt_set_timeout(&mcu->wdt, mcu->wdt.timeout); + + err = omnia_cmd_read_u8(mcu->client, OMNIA_CMD_GET_WATCHDOG_STATE, + &state); + if (err) + return dev_err_probe(dev, err, + "Cannot get MCU watchdog state\n"); + + if (state) + set_bit(WDOG_HW_RUNNING, &mcu->wdt.status); + + watchdog_set_nowayout(&mcu->wdt, nowayout); + watchdog_stop_on_reboot(&mcu->wdt); + err = devm_watchdog_register_device(dev, &mcu->wdt); + if (err) + return dev_err_probe(dev, err, + "Cannot register MCU watchdog\n"); + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h new file mode 100644 index 000000000000..2ca56ae13aa9 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CZ.NIC's Turris Omnia MCU driver + * + * 2024 by Marek Behún <kabel@kernel.org> + */ + +#ifndef __TURRIS_OMNIA_MCU_H +#define __TURRIS_OMNIA_MCU_H + +#include <linux/bitops.h> +#include <linux/completion.h> +#include <linux/gpio/driver.h> +#include <linux/hw_random.h> +#include <linux/if_ether.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/watchdog.h> +#include <linux/workqueue.h> +#include <asm/byteorder.h> +#include <asm/unaligned.h> + +struct i2c_client; +struct rtc_device; + +struct omnia_mcu { + struct i2c_client *client; + const char *type; + u32 features; + + /* board information */ + u64 board_serial_number; + u8 board_first_mac[ETH_ALEN]; + u8 board_revision; + + /* GPIO chip */ + struct gpio_chip gc; + struct mutex lock; + unsigned long mask, rising, falling, both, cached, is_cached; + /* Old MCU firmware handling needs the following */ + struct delayed_work button_release_emul_work; + unsigned long last_status; + bool button_pressed_emul; + + /* RTC device for configuring wake-up */ + struct rtc_device *rtcdev; + u32 rtc_alarm; + bool front_button_poweron; + + /* MCU watchdog */ + struct watchdog_device wdt; + + /* true random number generator */ + struct hwrng trng; + struct completion trng_entropy_ready; +}; + +int omnia_cmd_write_read(const struct i2c_client *client, + void *cmd, unsigned int cmd_len, + void *reply, unsigned int reply_len); + +static inline int omnia_cmd_write(const struct i2c_client *client, void *cmd, + unsigned int len) +{ + return omnia_cmd_write_read(client, cmd, len, NULL, 0); +} + +static inline int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, + u8 val) +{ + u8 buf[2] = { cmd, val }; + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_write_u16(const struct i2c_client *client, u8 cmd, + u16 val) +{ + u8 buf[3]; + + buf[0] = cmd; + put_unaligned_le16(val, &buf[1]); + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_write_u32(const struct i2c_client *client, u8 cmd, + u32 val) +{ + u8 buf[5]; + + buf[0] = cmd; + put_unaligned_le32(val, &buf[1]); + + return omnia_cmd_write(client, buf, sizeof(buf)); +} + +static inline int omnia_cmd_read(const struct i2c_client *client, u8 cmd, + void *reply, unsigned int len) +{ + return omnia_cmd_write_read(client, &cmd, 1, reply, len); +} + +static inline unsigned int +omnia_compute_reply_length(unsigned long mask, bool interleaved, + unsigned int offset) +{ + if (!mask) + return 0; + + return ((__fls(mask) >> 3) << interleaved) + 1 + offset; +} + +/* Returns 0 on success */ +static inline int omnia_cmd_read_bits(const struct i2c_client *client, u8 cmd, + unsigned long bits, unsigned long *dst) +{ + __le32 reply; + int err; + + if (!bits) { + *dst = 0; + return 0; + } + + err = omnia_cmd_read(client, cmd, &reply, + omnia_compute_reply_length(bits, false, 0)); + if (err) + return err; + + *dst = le32_to_cpu(reply) & bits; + + return 0; +} + +static inline int omnia_cmd_read_bit(const struct i2c_client *client, u8 cmd, + unsigned long bit) +{ + unsigned long reply; + int err; + + err = omnia_cmd_read_bits(client, cmd, bit, &reply); + if (err) + return err; + + return !!reply; +} + +static inline int omnia_cmd_read_u32(const struct i2c_client *client, u8 cmd, + u32 *dst) +{ + __le32 reply; + int err; + + err = omnia_cmd_read(client, cmd, &reply, sizeof(reply)); + if (err) + return err; + + *dst = le32_to_cpu(reply); + + return 0; +} + +static inline int omnia_cmd_read_u16(const struct i2c_client *client, u8 cmd, + u16 *dst) +{ + __le16 reply; + int err; + + err = omnia_cmd_read(client, cmd, &reply, sizeof(reply)); + if (err) + return err; + + *dst = le16_to_cpu(reply); + + return 0; +} + +static inline int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd, + u8 *reply) +{ + return omnia_cmd_read(client, cmd, reply, sizeof(*reply)); +} + +extern const u8 omnia_int_to_gpio_idx[32]; +extern const struct attribute_group omnia_mcu_gpio_group; +extern const struct attribute_group omnia_mcu_poweroff_group; + +int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu); +int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu); +int omnia_mcu_register_trng(struct omnia_mcu *mcu); +int omnia_mcu_register_watchdog(struct omnia_mcu *mcu); + +#endif /* __TURRIS_OMNIA_MCU_H */ diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 6bb5d9e372e4..67bce340a87e 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -92,6 +92,14 @@ config RESET_IMX7 help This enables the reset controller driver for i.MX7 SoCs. +config RESET_IMX8MP_AUDIOMIX + tristate "i.MX8MP AudioMix Reset Driver" + depends on ARCH_MXC || COMPILE_TEST + select AUXILIARY_BUS + default CLK_IMX8MP + help + This enables the reset controller driver for i.MX8MP AudioMix + config RESET_INTEL_GW bool "Intel Reset Controller Driver" depends on X86 || COMPILE_TEST @@ -329,6 +337,12 @@ config RESET_ZYNQ help This enables the reset controller driver for Xilinx Zynq SoCs. +config RESET_ZYNQMP + bool "ZYNQMP Reset Driver" if COMPILE_TEST + default ARCH_ZYNQMP + help + This enables the reset controller driver for Xilinx ZynqMP SoCs. + source "drivers/reset/starfive/Kconfig" source "drivers/reset/sti/Kconfig" source "drivers/reset/hisilicon/Kconfig" diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index fd8b49fa46fc..27b0bbdfcc04 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -2,8 +2,8 @@ obj-y += core.o obj-y += hisilicon/ obj-y += starfive/ -obj-$(CONFIG_ARCH_STI) += sti/ -obj-$(CONFIG_ARCH_TEGRA) += tegra/ +obj-y += sti/ +obj-y += tegra/ obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o obj-$(CONFIG_RESET_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o @@ -14,6 +14,7 @@ obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o obj-$(CONFIG_RESET_GPIO) += reset-gpio.o obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o obj-$(CONFIG_RESET_IMX7) += reset-imx7.o +obj-$(CONFIG_RESET_IMX8MP_AUDIOMIX) += reset-imx8mp-audiomix.o obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o obj-$(CONFIG_RESET_K210) += reset-k210.o obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o @@ -41,4 +42,4 @@ obj-$(CONFIG_RESET_TN48M_CPLD) += reset-tn48m.o obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o -obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o +obj-$(CONFIG_RESET_ZYNQMP) += reset-zynqmp.o diff --git a/drivers/reset/reset-imx8mp-audiomix.c b/drivers/reset/reset-imx8mp-audiomix.c new file mode 100644 index 000000000000..6e3f3069f727 --- /dev/null +++ b/drivers/reset/reset-imx8mp-audiomix.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2024 NXP + */ + +#include <linux/auxiliary_bus.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/reset-controller.h> + +#define EARC 0x200 +#define EARC_RESET_MASK 0x3 + +struct imx8mp_audiomix_reset { + struct reset_controller_dev rcdev; + spinlock_t lock; /* protect register read-modify-write cycle */ + void __iomem *base; +}; + +static struct imx8mp_audiomix_reset *to_imx8mp_audiomix_reset(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct imx8mp_audiomix_reset, rcdev); +} + +static int imx8mp_audiomix_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct imx8mp_audiomix_reset *priv = to_imx8mp_audiomix_reset(rcdev); + void __iomem *reg_addr = priv->base; + unsigned int mask, reg; + unsigned long flags; + + mask = BIT(id); + spin_lock_irqsave(&priv->lock, flags); + reg = readl(reg_addr + EARC); + writel(reg & ~mask, reg_addr + EARC); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int imx8mp_audiomix_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct imx8mp_audiomix_reset *priv = to_imx8mp_audiomix_reset(rcdev); + void __iomem *reg_addr = priv->base; + unsigned int mask, reg; + unsigned long flags; + + mask = BIT(id); + spin_lock_irqsave(&priv->lock, flags); + reg = readl(reg_addr + EARC); + writel(reg | mask, reg_addr + EARC); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static const struct reset_control_ops imx8mp_audiomix_reset_ops = { + .assert = imx8mp_audiomix_reset_assert, + .deassert = imx8mp_audiomix_reset_deassert, +}; + +static int imx8mp_audiomix_reset_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct imx8mp_audiomix_reset *priv; + struct device *dev = &adev->dev; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->lock); + + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.nr_resets = fls(EARC_RESET_MASK); + priv->rcdev.ops = &imx8mp_audiomix_reset_ops; + priv->rcdev.of_node = dev->parent->of_node; + priv->rcdev.dev = dev; + priv->rcdev.of_reset_n_cells = 1; + priv->base = of_iomap(dev->parent->of_node, 0); + if (!priv->base) + return -ENOMEM; + + dev_set_drvdata(dev, priv); + + ret = devm_reset_controller_register(dev, &priv->rcdev); + if (ret) + goto out_unmap; + + return 0; + +out_unmap: + iounmap(priv->base); + return ret; +} + +static void imx8mp_audiomix_reset_remove(struct auxiliary_device *adev) +{ + struct imx8mp_audiomix_reset *priv = dev_get_drvdata(&adev->dev); + + iounmap(priv->base); +} + +static const struct auxiliary_device_id imx8mp_audiomix_reset_ids[] = { + { + .name = "clk_imx8mp_audiomix.reset", + }, + { } +}; +MODULE_DEVICE_TABLE(auxiliary, imx8mp_audiomix_reset_ids); + +static struct auxiliary_driver imx8mp_audiomix_reset_driver = { + .probe = imx8mp_audiomix_reset_probe, + .remove = imx8mp_audiomix_reset_remove, + .id_table = imx8mp_audiomix_reset_ids, +}; + +module_auxiliary_driver(imx8mp_audiomix_reset_driver); + +MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>"); +MODULE_DESCRIPTION("Freescale i.MX8MP Audio Block Controller reset driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c index 7891d52fa899..894ad9d37a66 100644 --- a/drivers/reset/reset-meson-audio-arb.c +++ b/drivers/reset/reset-meson-audio-arb.c @@ -129,8 +129,6 @@ static int meson_audio_arb_remove(struct platform_device *pdev) writel(0, arb->regs); spin_unlock(&arb->lock); - clk_disable_unprepare(arb->clk); - return 0; } @@ -150,7 +148,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, arb); - arb->clk = devm_clk_get(dev, NULL); + arb->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(arb->clk)) return dev_err_probe(dev, PTR_ERR(arb->clk), "failed to get clock\n"); @@ -170,11 +168,6 @@ static int meson_audio_arb_probe(struct platform_device *pdev) * In the initial state, all memory interfaces are disabled * and the general bit is on */ - ret = clk_prepare_enable(arb->clk); - if (ret) { - dev_err(dev, "failed to enable arb clock\n"); - return ret; - } writel(BIT(ARB_GENERAL_BIT), arb->regs); /* Register reset controller */ diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c index a8dde4606360..255c894a4782 100644 --- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c +++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c @@ -10,10 +10,12 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/regmap.h> #include <linux/reset.h> #include <linux/reset-controller.h> #define RESET 0x000 +#define VBENCTL 0x03c #define RESET_SEL_PLLRESET BIT(12) #define RESET_PLLRESET BIT(8) @@ -32,6 +34,7 @@ struct rzg2l_usbphy_ctrl_priv { struct reset_controller_dev rcdev; struct reset_control *rstc; void __iomem *base; + struct platform_device *vdev; spinlock_t lock; }; @@ -100,10 +103,19 @@ static const struct reset_control_ops rzg2l_usbphy_ctrl_reset_ops = { .status = rzg2l_usbphy_ctrl_status, }; +static const struct regmap_config rzg2l_usb_regconf = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = 1, +}; + static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rzg2l_usbphy_ctrl_priv *priv; + struct platform_device *vdev; + struct regmap *regmap; unsigned long flags; int error; u32 val; @@ -116,6 +128,10 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + regmap = devm_regmap_init_mmio(dev, priv->base + VBENCTL, &rzg2l_usb_regconf); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(priv->rstc)) return dev_err_probe(dev, PTR_ERR(priv->rstc), @@ -125,25 +141,14 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) if (error) return error; - priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops; - priv->rcdev.of_reset_n_cells = 1; - priv->rcdev.nr_resets = NUM_PORTS; - priv->rcdev.of_node = dev->of_node; - priv->rcdev.dev = dev; - - error = devm_reset_controller_register(dev, &priv->rcdev); - if (error) - return error; - spin_lock_init(&priv->lock); dev_set_drvdata(dev, priv); pm_runtime_enable(&pdev->dev); error = pm_runtime_resume_and_get(&pdev->dev); if (error < 0) { - pm_runtime_disable(&pdev->dev); - reset_control_assert(priv->rstc); - return dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed"); + dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed"); + goto err_pm_disable_reset_deassert; } /* put pll and phy into reset state */ @@ -153,13 +158,45 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) writel(val, priv->base + RESET); spin_unlock_irqrestore(&priv->lock, flags); + priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops; + priv->rcdev.of_reset_n_cells = 1; + priv->rcdev.nr_resets = NUM_PORTS; + priv->rcdev.of_node = dev->of_node; + priv->rcdev.dev = dev; + + error = devm_reset_controller_register(dev, &priv->rcdev); + if (error) + goto err_pm_runtime_put; + + vdev = platform_device_alloc("rzg2l-usb-vbus-regulator", pdev->id); + if (!vdev) { + error = -ENOMEM; + goto err_pm_runtime_put; + } + vdev->dev.parent = dev; + priv->vdev = vdev; + + error = platform_device_add(vdev); + if (error) + goto err_device_put; + return 0; + +err_device_put: + platform_device_put(vdev); +err_pm_runtime_put: + pm_runtime_put(&pdev->dev); +err_pm_disable_reset_deassert: + pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); + return error; } static int rzg2l_usbphy_ctrl_remove(struct platform_device *pdev) { struct rzg2l_usbphy_ctrl_priv *priv = dev_get_drvdata(&pdev->dev); + platform_device_unregister(priv->vdev); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig index a2622e146b8b..0b599f7cf6ed 100644 --- a/drivers/reset/sti/Kconfig +++ b/drivers/reset/sti/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -if ARCH_STI +if ARCH_STI || COMPILE_TEST config STIH407_RESET - bool + bool "STIH407 Reset Driver" if COMPILE_TEST endif diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig index e4a9a389e98c..4a2d26d1210a 100644 --- a/drivers/reset/tegra/Kconfig +++ b/drivers/reset/tegra/Kconfig @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only config RESET_TEGRA_BPMP - def_bool TEGRA_BPMP + bool "Tegra BPMP Reset Driver" if COMPILE_TEST + default TEGRA_BPMP diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c index 3f3039600357..a6453ffeb753 100644 --- a/drivers/soc/amlogic/meson-clk-measure.c +++ b/drivers/soc/amlogic/meson-clk-measure.c @@ -688,4 +688,5 @@ static struct platform_driver meson_msr_driver = { }, }; module_platform_driver(meson_msr_driver); +MODULE_DESCRIPTION("Amlogic Meson SoC Clock Measure driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index 6abb730344ab..8809a948201a 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -63,7 +63,9 @@ static const struct meson_gx_package_id { { "962X", 0x24, 0x10, 0xf0 }, { "962E", 0x24, 0x20, 0xf0 }, { "A113X", 0x25, 0x37, 0xff }, + { "A113X", 0x25, 0x43, 0xff }, { "A113D", 0x25, 0x22, 0xff }, + { "S905L", 0x26, 0, 0x0 }, { "S905D2", 0x28, 0x10, 0xf0 }, { "S905Y2", 0x28, 0x30, 0xf0 }, { "S905X2", 0x28, 0x40, 0xf0 }, diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index ec87d9d878f3..fe111bae38c8 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -252,4 +252,5 @@ free_soc: return ret; } device_initcall(imx8_soc_init); +MODULE_DESCRIPTION("NXP i.MX8M SoC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c index 35825ee95dff..34a6f187c220 100644 --- a/drivers/soc/ixp4xx/ixp4xx-npe.c +++ b/drivers/soc/ixp4xx/ixp4xx-npe.c @@ -764,6 +764,7 @@ static struct platform_driver ixp4xx_npe_driver = { module_platform_driver(ixp4xx_npe_driver); MODULE_AUTHOR("Krzysztof Halasa"); +MODULE_DESCRIPTION("Intel IXP4xx Network Processor Engine driver"); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(NPE_A_FIRMWARE); MODULE_FIRMWARE(NPE_B_FIRMWARE); diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c index 244ad8d7e80b..cb112f3643e9 100644 --- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c +++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c @@ -465,6 +465,7 @@ static struct platform_driver ixp4xx_qmgr_driver = { }; module_platform_driver(ixp4xx_qmgr_driver); +MODULE_DESCRIPTION("Intel IXP4xx Queue Manager driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Krzysztof Halasa"); diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 046522664dc1..a8fccedba83f 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -15,6 +15,7 @@ /* dedicate the last GPR_R15 to assign the register address to be poll */ #define CMDQ_POLL_ADDR_GPR (15) #define CMDQ_EOC_IRQ_EN BIT(0) +#define CMDQ_IMMEDIATE_VALUE 0 #define CMDQ_REG_TYPE 1 #define CMDQ_JUMP_RELATIVE 0 #define CMDQ_JUMP_ABSOLUTE 1 @@ -45,6 +46,16 @@ struct cmdq_instruction { u8 op; }; +static inline u8 cmdq_operand_get_type(struct cmdq_operand *op) +{ + return op->reg ? CMDQ_REG_TYPE : CMDQ_IMMEDIATE_VALUE; +} + +static inline u16 cmdq_operand_get_idx_value(struct cmdq_operand *op) +{ + return op->reg ? op->idx : op->value; +} + int cmdq_dev_get_client_reg(struct device *dev, struct cmdq_client_reg *client_reg, int idx) { @@ -461,6 +472,29 @@ int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mas } EXPORT_SYMBOL(cmdq_pkt_poll_addr); +int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx, + struct cmdq_operand *left_operand, + enum cmdq_logic_op s_op, + struct cmdq_operand *right_operand) +{ + struct cmdq_instruction inst = { {0} }; + + if (!left_operand || !right_operand || s_op >= CMDQ_LOGIC_MAX) + return -EINVAL; + + inst.op = CMDQ_CODE_LOGIC; + inst.dst_t = CMDQ_REG_TYPE; + inst.src_t = cmdq_operand_get_type(left_operand); + inst.arg_c_t = cmdq_operand_get_type(right_operand); + inst.sop = s_op; + inst.reg_dst = result_reg_idx; + inst.src_reg = cmdq_operand_get_idx_value(left_operand); + inst.arg_c = cmdq_operand_get_idx_value(right_operand); + + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_logic_command); + int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) { struct cmdq_instruction inst = {}; @@ -526,4 +560,5 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt) } EXPORT_SYMBOL(cmdq_pkt_finalize); +MODULE_DESCRIPTION("MediaTek Command Queue (CMDQ) driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c index f370f4ec4b88..938240714e54 100644 --- a/drivers/soc/mediatek/mtk-mmsys.c +++ b/drivers/soc/mediatek/mtk-mmsys.c @@ -236,6 +236,7 @@ void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_ALPHA + (idx - 1) * 4, ~0, alpha << 16 | alpha, cmdq_pkt); + mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(15 + idx), 0, cmdq_pkt); mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(19 + idx), alpha_sel << (19 + idx), cmdq_pkt); mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4, diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index b5af1fb5847e..01b129caf1eb 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -524,6 +524,7 @@ static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_PAD0] = MT8195_MUTEX_MOD_MDP_PAD0, [MUTEX_MOD_IDX_MDP_PAD2] = MT8195_MUTEX_MOD_MDP_PAD2, [MUTEX_MOD_IDX_MDP_PAD3] = MT8195_MUTEX_MOD_MDP_PAD3, + [MUTEX_MOD_IDX_MDP_TCC0] = MT8195_MUTEX_MOD_MDP_TCC0, [MUTEX_MOD_IDX_MDP_WROT0] = MT8195_MUTEX_MOD_MDP_WROT0, [MUTEX_MOD_IDX_MDP_WROT2] = MT8195_MUTEX_MOD_MDP_WROT2, [MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3, diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 5af33b0e3470..7f02f0525933 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -72,11 +72,28 @@ config QCOM_OCMEM requirements. This is typically used by the GPU, camera/video, and audio components on some Snapdragon SoCs. +config QCOM_PD_MAPPER + tristate "Qualcomm Protection Domain Mapper" + select QCOM_QMI_HELPERS + select QCOM_PDR_MSG + select AUXILIARY_BUS + depends on NET && QRTR + default QCOM_RPROC_COMMON + help + The Protection Domain Mapper maps registered services to the domains + and instances handled by the remote DSPs. This is a kernel-space + implementation of the service. It is a simpler alternative to the + userspace daemon. + config QCOM_PDR_HELPERS tristate select QCOM_QMI_HELPERS + select QCOM_PDR_MSG depends on NET +config QCOM_PDR_MSG + tristate + config QCOM_PMIC_PDCHARGER_ULOG tristate "Qualcomm PMIC PDCharger ULOG driver" depends on RPMSG @@ -194,6 +211,7 @@ config QCOM_SMP2P config QCOM_SMSM tristate "Qualcomm Shared Memory State Machine" + depends on MAILBOX depends on QCOM_SMEM select QCOM_SMEM_STATE select IRQ_DOMAIN diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index ca0bece0dfff..d3560f861085 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -7,7 +7,9 @@ obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_OCMEM) += ocmem.o +obj-$(CONFIG_QCOM_PD_MAPPER) += qcom_pd_mapper.o obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o +obj-$(CONFIG_QCOM_PDR_MSG) += qcom_pdr_msg.o obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o obj-$(CONFIG_QCOM_PMIC_PDCHARGER_ULOG) += pmic_pdcharger_ulog.o diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c index fb323b3364db..e7851974084b 100644 --- a/drivers/soc/qcom/icc-bwmon.c +++ b/drivers/soc/qcom/icc-bwmon.c @@ -565,7 +565,7 @@ static void bwmon_start(struct icc_bwmon *bwmon) int window; /* No need to check for errors, as this must have succeeded before. */ - dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0); + dev_pm_opp_put(dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0)); bwmon_clear_counters(bwmon, true); @@ -772,18 +772,25 @@ static int bwmon_probe(struct platform_device *pdev) opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); if (IS_ERR(opp)) return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n"); + dev_pm_opp_put(opp); bwmon->min_bw_kbps = 0; opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); if (IS_ERR(opp)) return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n"); + dev_pm_opp_put(opp); bwmon->dev = dev; bwmon_disable(bwmon); - ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, - bwmon_intr_thread, - IRQF_ONESHOT, dev_name(dev), bwmon); + + /* + * SoCs with multiple cpu-bwmon instances can end up using a shared interrupt + * line. Using the devm_ variant might result in the IRQ handler being executed + * after bwmon_disable in bwmon_remove() + */ + ret = request_threaded_irq(bwmon->irq, bwmon_intr, bwmon_intr_thread, + IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), bwmon); if (ret) return dev_err_probe(dev, ret, "failed to request IRQ\n"); @@ -798,6 +805,7 @@ static void bwmon_remove(struct platform_device *pdev) struct icc_bwmon *bwmon = platform_get_drvdata(pdev); bwmon_disable(bwmon); + free_irq(bwmon->irq, bwmon); } static const struct icc_bwmon_data msm8998_bwmon_data = { diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index cbef0dea1d5d..37e11e501728 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -7,6 +7,7 @@ #include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/bitops.h> +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/io.h> #include <linux/kernel.h> @@ -150,6 +151,25 @@ enum llcc_reg_offset { LLCC_COMMON_STATUS0, }; +static const struct llcc_slice_config sa8775p_data[] = { + {LLCC_CPUSS, 1, 2048, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 1, 0, 0}, + {LLCC_VIDSC0, 2, 512, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_CPUSS1, 3, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_CPUHWT, 5, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_AUDIO, 6, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CMPT, 10, 4096, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_GPUHTW, 11, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_GPU, 12, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 1, 0}, + {LLCC_MMUHWT, 13, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0}, + {LLCC_CMPTDMA, 15, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_DISP, 16, 4096, 2, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_VIDFW, 17, 3072, 1, 0, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_AUDHW, 22, 1024, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CVP, 28, 256, 3, 1, 0x00FF, 0x0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0}, + {LLCC_WRCACHE, 31, 512, 1, 1, 0x00FF, 0x0, 0, 0, 0, 0, 1, 0, 0}, +}; + static const struct llcc_slice_config sc7180_data[] = { { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 }, { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 }, @@ -552,6 +572,16 @@ static const struct qcom_llcc_config qdu1000_cfg[] = { }, }; +static const struct qcom_llcc_config sa8775p_cfg[] = { + { + .sct_data = sa8775p_data, + .size = ARRAY_SIZE(sa8775p_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v2_1_reg_offset, + .edac_reg_offset = &llcc_v2_1_edac_reg_offset, + }, +}; + static const struct qcom_llcc_config sc7180_cfg[] = { { .sct_data = sc7180_data, @@ -698,6 +728,11 @@ static const struct qcom_sct_config qdu1000_cfgs = { .num_config = ARRAY_SIZE(qdu1000_cfg), }; +static const struct qcom_sct_config sa8775p_cfgs = { + .llcc_config = sa8775p_cfg, + .num_config = ARRAY_SIZE(sa8775p_cfg), +}; + static const struct qcom_sct_config sc7180_cfgs = { .llcc_config = sc7180_cfg, .num_config = ARRAY_SIZE(sc7180_cfg), @@ -821,6 +856,7 @@ EXPORT_SYMBOL_GPL(llcc_slice_putd); static int llcc_update_act_ctrl(u32 sid, u32 act_ctrl_reg_val, u32 status) { + struct regmap *regmap; u32 act_ctrl_reg; u32 act_clear_reg; u32 status_reg; @@ -849,7 +885,8 @@ static int llcc_update_act_ctrl(u32 sid, return ret; if (drv_data->version >= LLCC_VERSION_4_1_0_0) { - ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg, + regmap = drv_data->bcast_and_regmap ?: drv_data->bcast_regmap; + ret = regmap_read_poll_timeout(regmap, status_reg, slice_status, (slice_status & ACT_COMPLETE), 0, LLCC_STATUS_READ_DELAY); if (ret) @@ -1258,16 +1295,13 @@ static int qcom_llcc_probe(struct platform_device *pdev) /* Initialize rest of LLCC bank regmaps */ for (i = 1; i < num_banks; i++) { - char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i); + char *base __free(kfree) = kasprintf(GFP_KERNEL, "llcc%d_base", i); drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base); if (IS_ERR(drv_data->regmaps[i])) { ret = PTR_ERR(drv_data->regmaps[i]); - kfree(base); goto err; } - - kfree(base); } drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base"); @@ -1284,6 +1318,18 @@ static int qcom_llcc_probe(struct platform_device *pdev) drv_data->version = version; + /* Applicable only when drv_data->version >= 4.1 */ + if (drv_data->version >= LLCC_VERSION_4_1_0_0) { + drv_data->bcast_and_regmap = qcom_llcc_init_mmio(pdev, i + 1, "llcc_broadcast_and_base"); + if (IS_ERR(drv_data->bcast_and_regmap)) { + ret = PTR_ERR(drv_data->bcast_and_regmap); + if (ret == -EINVAL) + drv_data->bcast_and_regmap = NULL; + else + goto err; + } + } + llcc_cfg = cfg->sct_data; sz = cfg->size; @@ -1332,6 +1378,7 @@ err: static const struct of_device_id qcom_llcc_of_match[] = { { .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs}, + { .compatible = "qcom,sa8775p-llcc", .data = &sa8775p_cfgs }, { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfgs }, { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfgs }, { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfgs }, diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 6f177e46fa0f..b2c0fb55d4ae 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -7,6 +7,7 @@ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. */ +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/elf.h> #include <linux/firmware.h> @@ -37,13 +38,12 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, { const struct elf32_phdr *phdr = &phdrs[segment]; const struct firmware *seg_fw; - char *seg_name; ssize_t ret; if (strlen(fw_name) < 4) return -EINVAL; - seg_name = kstrdup(fw_name, GFP_KERNEL); + char *seg_name __free(kfree) = kstrdup(fw_name, GFP_KERNEL); if (!seg_name) return -ENOMEM; @@ -52,7 +52,6 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, ptr, phdr->p_filesz); if (ret) { dev_err(dev, "error %zd loading %s\n", ret, seg_name); - kfree(seg_name); return ret; } @@ -64,7 +63,6 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, } release_firmware(seg_fw); - kfree(seg_name); return ret; } diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index e8841d247953..6b6dd80cbc0f 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -10,6 +10,7 @@ */ #include <linux/bitfield.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/kernel.h> @@ -216,7 +217,6 @@ EXPORT_SYMBOL_GPL(of_get_ocmem); struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, unsigned long size) { - struct ocmem_buf *buf; int ret; /* TODO: add support for other clients... */ @@ -229,7 +229,7 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations)) return ERR_PTR(-EBUSY); - buf = kzalloc(sizeof(*buf), GFP_KERNEL); + struct ocmem_buf *buf __free(kfree) = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_unlock; @@ -247,7 +247,7 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, if (ret) { dev_err(ocmem->dev, "could not lock: %d\n", ret); ret = -EINVAL; - goto err_kfree; + goto err_unlock; } } else { ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset); @@ -258,10 +258,8 @@ struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n", size / 1024, buf->addr, client); - return buf; + return_ptr(buf); -err_kfree: - kfree(buf); err_unlock: clear_bit_unlock(BIT(client), &ocmem->active_allocations); diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index a1b6a4081dea..328b6153b2be 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -3,6 +3,7 @@ * Copyright (C) 2020 The Linux Foundation. All rights reserved. */ +#include <linux/cleanup.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -76,12 +77,12 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, locator_hdl); struct pdr_service *pds; + mutex_lock(&pdr->lock); /* Create a local client port for QMI communication */ pdr->locator_addr.sq_family = AF_QIPCRTR; pdr->locator_addr.sq_node = svc->node; pdr->locator_addr.sq_port = svc->port; - mutex_lock(&pdr->lock); pdr->locator_init_complete = true; mutex_unlock(&pdr->lock); @@ -104,10 +105,10 @@ static void pdr_locator_del_server(struct qmi_handle *qmi, mutex_lock(&pdr->lock); pdr->locator_init_complete = false; - mutex_unlock(&pdr->lock); pdr->locator_addr.sq_node = 0; pdr->locator_addr.sq_port = 0; + mutex_unlock(&pdr->lock); } static const struct qmi_ops pdr_locator_ops = { @@ -365,12 +366,14 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, if (ret < 0) return ret; + mutex_lock(&pdr->lock); ret = qmi_send_request(&pdr->locator_hdl, &pdr->locator_addr, &txn, SERVREG_GET_DOMAIN_LIST_REQ, SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, servreg_get_domain_list_req_ei, req); + mutex_unlock(&pdr->lock); if (ret < 0) { qmi_txn_cancel(&txn); return ret; @@ -394,13 +397,13 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) { - struct servreg_get_domain_list_resp *resp; struct servreg_get_domain_list_req req; struct servreg_location_entry *entry; int domains_read = 0; int ret, i; - resp = kzalloc(sizeof(*resp), GFP_KERNEL); + struct servreg_get_domain_list_resp *resp __free(kfree) = kzalloc(sizeof(*resp), + GFP_KERNEL); if (!resp) return -ENOMEM; @@ -413,9 +416,9 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) req.domain_offset = domains_read; ret = pdr_get_domain_list(&req, resp, pdr); if (ret < 0) - goto out; + return ret; - for (i = domains_read; i < resp->domain_list_len; i++) { + for (i = 0; i < resp->domain_list_len; i++) { entry = &resp->domain_list[i]; if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name)) @@ -425,7 +428,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) pds->service_data_valid = entry->service_data_valid; pds->service_data = entry->service_data; pds->instance = entry->instance; - goto out; + return 0; } } @@ -438,8 +441,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) domains_read += resp->domain_list_len; } while (domains_read < resp->total_domains); -out: - kfree(resp); + return ret; } @@ -515,8 +517,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, const char *service_name, const char *service_path) { - struct pdr_service *pds, *tmp; - int ret; + struct pdr_service *tmp; if (IS_ERR_OR_NULL(pdr)) return ERR_PTR(-EINVAL); @@ -525,7 +526,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, !service_path || strlen(service_path) > SERVREG_NAME_LENGTH) return ERR_PTR(-EINVAL); - pds = kzalloc(sizeof(*pds), GFP_KERNEL); + struct pdr_service *pds __free(kfree) = kzalloc(sizeof(*pds), GFP_KERNEL); if (!pds) return ERR_PTR(-ENOMEM); @@ -540,8 +541,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, continue; mutex_unlock(&pdr->list_lock); - ret = -EALREADY; - goto err; + return ERR_PTR(-EALREADY); } list_add(&pds->node, &pdr->lookups); @@ -549,10 +549,7 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, schedule_work(&pdr->locator_work); - return pds; -err: - kfree(pds); - return ERR_PTR(ret); + return_ptr(pds); } EXPORT_SYMBOL_GPL(pdr_add_lookup); @@ -649,13 +646,12 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state, char *service_path, void *priv), void *priv) { - struct pdr_handle *pdr; int ret; if (!status) return ERR_PTR(-EINVAL); - pdr = kzalloc(sizeof(*pdr), GFP_KERNEL); + struct pdr_handle *pdr __free(kfree) = kzalloc(sizeof(*pdr), GFP_KERNEL); if (!pdr) return ERR_PTR(-ENOMEM); @@ -674,10 +670,8 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state, INIT_WORK(&pdr->indack_work, pdr_indack_work); pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq"); - if (!pdr->notifier_wq) { - ret = -ENOMEM; - goto free_pdr_handle; - } + if (!pdr->notifier_wq) + return ERR_PTR(-ENOMEM); pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI); if (!pdr->indack_wq) { @@ -702,7 +696,7 @@ struct pdr_handle *pdr_handle_alloc(void (*status)(int state, if (ret < 0) goto release_qmi_handle; - return pdr; + return_ptr(pdr); release_qmi_handle: qmi_handle_release(&pdr->locator_hdl); @@ -710,8 +704,6 @@ destroy_indack: destroy_workqueue(pdr->indack_wq); destroy_notifier: destroy_workqueue(pdr->notifier_wq); -free_pdr_handle: - kfree(pdr); return ERR_PTR(ret); } diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h index 03c282b7f17e..8d17f7fb79e7 100644 --- a/drivers/soc/qcom/pdr_internal.h +++ b/drivers/soc/qcom/pdr_internal.h @@ -13,6 +13,8 @@ #define SERVREG_SET_ACK_REQ 0x23 #define SERVREG_RESTART_PD_REQ 0x24 +#define SERVREG_LOC_PFR_REQ 0x24 + #define SERVREG_DOMAIN_LIST_LENGTH 32 #define SERVREG_RESTART_PD_REQ_MAX_LEN 67 #define SERVREG_REGISTER_LISTENER_REQ_LEN 71 @@ -20,6 +22,7 @@ #define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74 #define SERVREG_STATE_UPDATED_IND_MAX_LEN 79 #define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389 +#define SERVREG_LOC_PFR_RESP_MAX_LEN 10 struct servreg_location_entry { char name[SERVREG_NAME_LENGTH + 1]; @@ -28,83 +31,12 @@ struct servreg_location_entry { u32 instance; }; -static const struct qmi_elem_info servreg_location_entry_ei[] = { - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0, - .offset = offsetof(struct servreg_location_entry, - name), - }, - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(u32), - .array_type = NO_ARRAY, - .tlv_type = 0, - .offset = offsetof(struct servreg_location_entry, - instance), - }, - { - .data_type = QMI_UNSIGNED_1_BYTE, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0, - .offset = offsetof(struct servreg_location_entry, - service_data_valid), - }, - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(u32), - .array_type = NO_ARRAY, - .tlv_type = 0, - .offset = offsetof(struct servreg_location_entry, - service_data), - }, - {} -}; - struct servreg_get_domain_list_req { char service_name[SERVREG_NAME_LENGTH + 1]; u8 domain_offset_valid; u32 domain_offset; }; -static const struct qmi_elem_info servreg_get_domain_list_req_ei[] = { - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_get_domain_list_req, - service_name), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_get_domain_list_req, - domain_offset_valid), - }, - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(u32), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_get_domain_list_req, - domain_offset), - }, - {} -}; - struct servreg_get_domain_list_resp { struct qmi_response_type_v01 resp; u8 total_domains_valid; @@ -116,264 +48,60 @@ struct servreg_get_domain_list_resp { struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH]; }; -static const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { - { - .data_type = QMI_STRUCT, - .elem_len = 1, - .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_get_domain_list_resp, - resp), - .ei_array = qmi_response_type_v01_ei, - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_get_domain_list_resp, - total_domains_valid), - }, - { - .data_type = QMI_UNSIGNED_2_BYTE, - .elem_len = 1, - .elem_size = sizeof(u16), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_get_domain_list_resp, - total_domains), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x11, - .offset = offsetof(struct servreg_get_domain_list_resp, - db_rev_count_valid), - }, - { - .data_type = QMI_UNSIGNED_2_BYTE, - .elem_len = 1, - .elem_size = sizeof(u16), - .array_type = NO_ARRAY, - .tlv_type = 0x11, - .offset = offsetof(struct servreg_get_domain_list_resp, - db_rev_count), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x12, - .offset = offsetof(struct servreg_get_domain_list_resp, - domain_list_valid), - }, - { - .data_type = QMI_DATA_LEN, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x12, - .offset = offsetof(struct servreg_get_domain_list_resp, - domain_list_len), - }, - { - .data_type = QMI_STRUCT, - .elem_len = SERVREG_DOMAIN_LIST_LENGTH, - .elem_size = sizeof(struct servreg_location_entry), - .array_type = VAR_LEN_ARRAY, - .tlv_type = 0x12, - .offset = offsetof(struct servreg_get_domain_list_resp, - domain_list), - .ei_array = servreg_location_entry_ei, - }, - {} -}; - struct servreg_register_listener_req { u8 enable; char service_path[SERVREG_NAME_LENGTH + 1]; }; -static const struct qmi_elem_info servreg_register_listener_req_ei[] = { - { - .data_type = QMI_UNSIGNED_1_BYTE, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_register_listener_req, - enable), - }, - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_register_listener_req, - service_path), - }, - {} -}; - struct servreg_register_listener_resp { struct qmi_response_type_v01 resp; u8 curr_state_valid; enum servreg_service_state curr_state; }; -static const struct qmi_elem_info servreg_register_listener_resp_ei[] = { - { - .data_type = QMI_STRUCT, - .elem_len = 1, - .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_register_listener_resp, - resp), - .ei_array = qmi_response_type_v01_ei, - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_register_listener_resp, - curr_state_valid), - }, - { - .data_type = QMI_SIGNED_4_BYTE_ENUM, - .elem_len = 1, - .elem_size = sizeof(enum servreg_service_state), - .array_type = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct servreg_register_listener_resp, - curr_state), - }, - {} -}; - struct servreg_restart_pd_req { char service_path[SERVREG_NAME_LENGTH + 1]; }; -static const struct qmi_elem_info servreg_restart_pd_req_ei[] = { - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_restart_pd_req, - service_path), - }, - {} -}; - struct servreg_restart_pd_resp { struct qmi_response_type_v01 resp; }; -static const struct qmi_elem_info servreg_restart_pd_resp_ei[] = { - { - .data_type = QMI_STRUCT, - .elem_len = 1, - .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_restart_pd_resp, - resp), - .ei_array = qmi_response_type_v01_ei, - }, - {} -}; - struct servreg_state_updated_ind { enum servreg_service_state curr_state; char service_path[SERVREG_NAME_LENGTH + 1]; u16 transaction_id; }; -static const struct qmi_elem_info servreg_state_updated_ind_ei[] = { - { - .data_type = QMI_SIGNED_4_BYTE_ENUM, - .elem_len = 1, - .elem_size = sizeof(u32), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_state_updated_ind, - curr_state), - }, - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_state_updated_ind, - service_path), - }, - { - .data_type = QMI_UNSIGNED_2_BYTE, - .elem_len = 1, - .elem_size = sizeof(u16), - .array_type = NO_ARRAY, - .tlv_type = 0x03, - .offset = offsetof(struct servreg_state_updated_ind, - transaction_id), - }, - {} -}; - struct servreg_set_ack_req { char service_path[SERVREG_NAME_LENGTH + 1]; u16 transaction_id; }; -static const struct qmi_elem_info servreg_set_ack_req_ei[] = { - { - .data_type = QMI_STRING, - .elem_len = SERVREG_NAME_LENGTH + 1, - .elem_size = sizeof(char), - .array_type = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct servreg_set_ack_req, - service_path), - }, - { - .data_type = QMI_UNSIGNED_2_BYTE, - .elem_len = 1, - .elem_size = sizeof(u16), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_set_ack_req, - transaction_id), - }, - {} -}; - struct servreg_set_ack_resp { struct qmi_response_type_v01 resp; }; -static const struct qmi_elem_info servreg_set_ack_resp_ei[] = { - { - .data_type = QMI_STRUCT, - .elem_len = 1, - .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, - .tlv_type = 0x02, - .offset = offsetof(struct servreg_set_ack_resp, - resp), - .ei_array = qmi_response_type_v01_ei, - }, - {} +struct servreg_loc_pfr_req { + char service[SERVREG_NAME_LENGTH + 1]; + char reason[257]; }; +struct servreg_loc_pfr_resp { + struct qmi_response_type_v01 rsp; +}; + +extern const struct qmi_elem_info servreg_location_entry_ei[]; +extern const struct qmi_elem_info servreg_get_domain_list_req_ei[]; +extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[]; +extern const struct qmi_elem_info servreg_register_listener_req_ei[]; +extern const struct qmi_elem_info servreg_register_listener_resp_ei[]; +extern const struct qmi_elem_info servreg_restart_pd_req_ei[]; +extern const struct qmi_elem_info servreg_restart_pd_resp_ei[]; +extern const struct qmi_elem_info servreg_state_updated_ind_ei[]; +extern const struct qmi_elem_info servreg_set_ack_req_ei[]; +extern const struct qmi_elem_info servreg_set_ack_resp_ei[]; +extern const struct qmi_elem_info servreg_loc_pfr_req_ei[]; +extern const struct qmi_elem_info servreg_loc_pfr_resp_ei[]; + #endif diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index 65279243072c..9ebc0ba35947 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -373,8 +373,17 @@ static struct platform_driver pmic_glink_driver = { static int pmic_glink_init(void) { - platform_driver_register(&pmic_glink_driver); - register_rpmsg_driver(&pmic_glink_rpmsg_driver); + int ret; + + ret = platform_driver_register(&pmic_glink_driver); + if (ret < 0) + return ret; + + ret = register_rpmsg_driver(&pmic_glink_rpmsg_driver); + if (ret < 0) { + platform_driver_unregister(&pmic_glink_driver); + return ret; + } return 0; } diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c index b3808fc24c69..1e0808b3cb93 100644 --- a/drivers/soc/qcom/pmic_glink_altmode.c +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -20,7 +20,7 @@ #include <linux/soc/qcom/pmic_glink.h> -#define PMIC_GLINK_MAX_PORTS 2 +#define PMIC_GLINK_MAX_PORTS 3 #define USBC_SC8180X_NOTIFY_IND 0x13 #define USBC_CMD_WRITE_REQ 0x15 diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c new file mode 100644 index 000000000000..a4c007080665 --- /dev/null +++ b/drivers/soc/qcom/qcom_pd_mapper.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm Protection Domain mapper + * + * Copyright (c) 2023 Linaro Ltd. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/refcount.h> +#include <linux/slab.h> +#include <linux/soc/qcom/qmi.h> + +#include "pdr_internal.h" + +#define SERVREG_QMI_VERSION 0x101 +#define SERVREG_QMI_INSTANCE 0 + +#define TMS_SERVREG_SERVICE "tms/servreg" + +struct qcom_pdm_domain_data { + const char *domain; + u32 instance_id; + /* NULL-terminated array */ + const char * services[]; +}; + +struct qcom_pdm_domain { + struct list_head list; + const char *name; + u32 instance_id; +}; + +struct qcom_pdm_service { + struct list_head list; + struct list_head domains; + const char *name; +}; + +struct qcom_pdm_data { + refcount_t refcnt; + struct qmi_handle handle; + struct list_head services; +}; + +static DEFINE_MUTEX(qcom_pdm_mutex); /* protects __qcom_pdm_data */ +static struct qcom_pdm_data *__qcom_pdm_data; + +static struct qcom_pdm_service *qcom_pdm_find(struct qcom_pdm_data *data, + const char *name) +{ + struct qcom_pdm_service *service; + + list_for_each_entry(service, &data->services, list) { + if (!strcmp(service->name, name)) + return service; + } + + return NULL; +} + +static int qcom_pdm_add_service_domain(struct qcom_pdm_data *data, + const char *service_name, + const char *domain_name, + u32 instance_id) +{ + struct qcom_pdm_service *service; + struct qcom_pdm_domain *domain; + + service = qcom_pdm_find(data, service_name); + if (service) { + list_for_each_entry(domain, &service->domains, list) { + if (!strcmp(domain->name, domain_name)) + return -EBUSY; + } + } else { + service = kzalloc(sizeof(*service), GFP_KERNEL); + if (!service) + return -ENOMEM; + + INIT_LIST_HEAD(&service->domains); + service->name = service_name; + + list_add_tail(&service->list, &data->services); + } + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) { + if (list_empty(&service->domains)) { + list_del(&service->list); + kfree(service); + } + + return -ENOMEM; + } + + domain->name = domain_name; + domain->instance_id = instance_id; + list_add_tail(&domain->list, &service->domains); + + return 0; +} + +static int qcom_pdm_add_domain(struct qcom_pdm_data *data, + const struct qcom_pdm_domain_data *domain) +{ + int ret; + int i; + + ret = qcom_pdm_add_service_domain(data, + TMS_SERVREG_SERVICE, + domain->domain, + domain->instance_id); + if (ret) + return ret; + + for (i = 0; domain->services[i]; i++) { + ret = qcom_pdm_add_service_domain(data, + domain->services[i], + domain->domain, + domain->instance_id); + if (ret) + return ret; + } + + return 0; + +} + +static void qcom_pdm_free_domains(struct qcom_pdm_data *data) +{ + struct qcom_pdm_service *service, *tservice; + struct qcom_pdm_domain *domain, *tdomain; + + list_for_each_entry_safe(service, tservice, &data->services, list) { + list_for_each_entry_safe(domain, tdomain, &service->domains, list) { + list_del(&domain->list); + kfree(domain); + } + + list_del(&service->list); + kfree(service); + } +} + +static void qcom_pdm_get_domain_list(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ + struct qcom_pdm_data *data = container_of(qmi, struct qcom_pdm_data, handle); + const struct servreg_get_domain_list_req *req = decoded; + struct servreg_get_domain_list_resp *rsp; + struct qcom_pdm_service *service; + u32 offset; + int ret; + + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); + if (!rsp) + return; + + offset = req->domain_offset_valid ? req->domain_offset : 0; + + rsp->resp.result = QMI_RESULT_SUCCESS_V01; + rsp->resp.error = QMI_ERR_NONE_V01; + + rsp->db_rev_count_valid = true; + rsp->db_rev_count = 1; + + rsp->total_domains_valid = true; + rsp->total_domains = 0; + + mutex_lock(&qcom_pdm_mutex); + + service = qcom_pdm_find(data, req->service_name); + if (service) { + struct qcom_pdm_domain *domain; + + rsp->domain_list_valid = true; + rsp->domain_list_len = 0; + + list_for_each_entry(domain, &service->domains, list) { + u32 i = rsp->total_domains++; + + if (i >= offset && i < SERVREG_DOMAIN_LIST_LENGTH) { + u32 j = rsp->domain_list_len++; + + strscpy(rsp->domain_list[j].name, domain->name, + sizeof(rsp->domain_list[i].name)); + rsp->domain_list[j].instance = domain->instance_id; + + pr_debug("PDM: found %s / %d\n", domain->name, + domain->instance_id); + } + } + } + + pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->service_name, + req->domain_offset_valid ? req->domain_offset : -1, rsp->domain_list_len, rsp->total_domains); + + ret = qmi_send_response(qmi, sq, txn, SERVREG_GET_DOMAIN_LIST_REQ, + SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN, + servreg_get_domain_list_resp_ei, rsp); + if (ret) + pr_err("Error sending servreg response: %d\n", ret); + + mutex_unlock(&qcom_pdm_mutex); + + kfree(rsp); +} + +static void qcom_pdm_pfr(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ + const struct servreg_loc_pfr_req *req = decoded; + struct servreg_loc_pfr_resp rsp = {}; + int ret; + + pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason); + + rsp.rsp.result = QMI_RESULT_SUCCESS_V01; + rsp.rsp.error = QMI_ERR_NONE_V01; + + ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR_REQ, + SERVREG_LOC_PFR_RESP_MAX_LEN, + servreg_loc_pfr_resp_ei, &rsp); + if (ret) + pr_err("Error sending servreg response: %d\n", ret); +} + +static const struct qmi_msg_handler qcom_pdm_msg_handlers[] = { + { + .type = QMI_REQUEST, + .msg_id = SERVREG_GET_DOMAIN_LIST_REQ, + .ei = servreg_get_domain_list_req_ei, + .decoded_size = sizeof(struct servreg_get_domain_list_req), + .fn = qcom_pdm_get_domain_list, + }, + { + .type = QMI_REQUEST, + .msg_id = SERVREG_LOC_PFR_REQ, + .ei = servreg_loc_pfr_req_ei, + .decoded_size = sizeof(struct servreg_loc_pfr_req), + .fn = qcom_pdm_pfr, + }, + { }, +}; + +static const struct qcom_pdm_domain_data adsp_audio_pd = { + .domain = "msm/adsp/audio_pd", + .instance_id = 74, + .services = { + "avs/audio", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data adsp_charger_pd = { + .domain = "msm/adsp/charger_pd", + .instance_id = 74, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data adsp_root_pd = { + .domain = "msm/adsp/root_pd", + .instance_id = 74, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data adsp_root_pd_pdr = { + .domain = "msm/adsp/root_pd", + .instance_id = 74, + .services = { + "tms/pdr_enabled", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data adsp_sensor_pd = { + .domain = "msm/adsp/sensor_pd", + .instance_id = 74, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data msm8996_adsp_audio_pd = { + .domain = "msm/adsp/audio_pd", + .instance_id = 4, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data msm8996_adsp_root_pd = { + .domain = "msm/adsp/root_pd", + .instance_id = 4, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data cdsp_root_pd = { + .domain = "msm/cdsp/root_pd", + .instance_id = 76, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data slpi_root_pd = { + .domain = "msm/slpi/root_pd", + .instance_id = 90, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data slpi_sensor_pd = { + .domain = "msm/slpi/sensor_pd", + .instance_id = 90, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data mpss_root_pd = { + .domain = "msm/modem/root_pd", + .instance_id = 180, + .services = { + NULL, + }, +}; + +static const struct qcom_pdm_domain_data mpss_root_pd_gps = { + .domain = "msm/modem/root_pd", + .instance_id = 180, + .services = { + "gps/gps_service", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data mpss_root_pd_gps_pdr = { + .domain = "msm/modem/root_pd", + .instance_id = 180, + .services = { + "gps/gps_service", + "tms/pdr_enabled", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data msm8996_mpss_root_pd = { + .domain = "msm/modem/root_pd", + .instance_id = 100, + .services = { NULL }, +}; + +static const struct qcom_pdm_domain_data mpss_wlan_pd = { + .domain = "msm/modem/wlan_pd", + .instance_id = 180, + .services = { + "kernel/elf_loader", + "wlan/fw", + NULL, + }, +}; + +static const struct qcom_pdm_domain_data *msm8996_domains[] = { + &msm8996_adsp_audio_pd, + &msm8996_adsp_root_pd, + &msm8996_mpss_root_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *msm8998_domains[] = { + &mpss_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *qcm2290_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *qcs404_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sc7180_domains[] = { + &adsp_audio_pd, + &adsp_root_pd_pdr, + &adsp_sensor_pd, + &mpss_root_pd_gps_pdr, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sc7280_domains[] = { + &adsp_audio_pd, + &adsp_root_pd_pdr, + &adsp_charger_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd_gps_pdr, + NULL, +}; + +static const struct qcom_pdm_domain_data *sc8180x_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_charger_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sc8280xp_domains[] = { + &adsp_audio_pd, + &adsp_root_pd_pdr, + &adsp_charger_pd, + &cdsp_root_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sdm660_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sdm670_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &cdsp_root_pd, + &mpss_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sdm845_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &cdsp_root_pd, + &mpss_root_pd, + &mpss_wlan_pd, + &slpi_root_pd, + &slpi_sensor_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm6115_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm6350_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm8150_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm8250_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &cdsp_root_pd, + &slpi_root_pd, + &slpi_sensor_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm8350_domains[] = { + &adsp_audio_pd, + &adsp_root_pd_pdr, + &adsp_charger_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &slpi_root_pd, + &slpi_sensor_pd, + NULL, +}; + +static const struct qcom_pdm_domain_data *sm8550_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_charger_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + NULL, +}; + +static const struct of_device_id qcom_pdm_domains[] = { + { .compatible = "qcom,apq8064", .data = NULL, }, + { .compatible = "qcom,apq8074", .data = NULL, }, + { .compatible = "qcom,apq8084", .data = NULL, }, + { .compatible = "qcom,apq8096", .data = msm8996_domains, }, + { .compatible = "qcom,msm8226", .data = NULL, }, + { .compatible = "qcom,msm8974", .data = NULL, }, + { .compatible = "qcom,msm8996", .data = msm8996_domains, }, + { .compatible = "qcom,msm8998", .data = msm8998_domains, }, + { .compatible = "qcom,qcm2290", .data = qcm2290_domains, }, + { .compatible = "qcom,qcs404", .data = qcs404_domains, }, + { .compatible = "qcom,sc7180", .data = sc7180_domains, }, + { .compatible = "qcom,sc7280", .data = sc7280_domains, }, + { .compatible = "qcom,sc8180x", .data = sc8180x_domains, }, + { .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, }, + { .compatible = "qcom,sda660", .data = sdm660_domains, }, + { .compatible = "qcom,sdm660", .data = sdm660_domains, }, + { .compatible = "qcom,sdm670", .data = sdm670_domains, }, + { .compatible = "qcom,sdm845", .data = sdm845_domains, }, + { .compatible = "qcom,sm4250", .data = sm6115_domains, }, + { .compatible = "qcom,sm6115", .data = sm6115_domains, }, + { .compatible = "qcom,sm6350", .data = sm6350_domains, }, + { .compatible = "qcom,sm8150", .data = sm8150_domains, }, + { .compatible = "qcom,sm8250", .data = sm8250_domains, }, + { .compatible = "qcom,sm8350", .data = sm8350_domains, }, + { .compatible = "qcom,sm8450", .data = sm8350_domains, }, + { .compatible = "qcom,sm8550", .data = sm8550_domains, }, + { .compatible = "qcom,sm8650", .data = sm8550_domains, }, + {}, +}; + +static void qcom_pdm_stop(struct qcom_pdm_data *data) +{ + qcom_pdm_free_domains(data); + + /* The server is removed automatically */ + qmi_handle_release(&data->handle); + + kfree(data); +} + +static struct qcom_pdm_data *qcom_pdm_start(void) +{ + const struct qcom_pdm_domain_data * const *domains; + const struct of_device_id *match; + struct qcom_pdm_data *data; + struct device_node *root; + int ret, i; + + root = of_find_node_by_path("/"); + if (!root) + return ERR_PTR(-ENODEV); + + match = of_match_node(qcom_pdm_domains, root); + of_node_put(root); + if (!match) { + pr_notice("PDM: no support for the platform, userspace daemon might be required.\n"); + return ERR_PTR(-ENODEV); + } + + domains = match->data; + if (!domains) { + pr_debug("PDM: no domains\n"); + return ERR_PTR(-ENODEV); + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&data->services); + + ret = qmi_handle_init(&data->handle, SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, + NULL, qcom_pdm_msg_handlers); + if (ret) { + kfree(data); + return ERR_PTR(ret); + } + + refcount_set(&data->refcnt, 1); + + for (i = 0; domains[i]; i++) { + ret = qcom_pdm_add_domain(data, domains[i]); + if (ret) + goto err_stop; + } + + ret = qmi_add_server(&data->handle, SERVREG_LOCATOR_SERVICE, + SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE); + if (ret) { + pr_err("PDM: error adding server %d\n", ret); + goto err_stop; + } + + return data; + +err_stop: + qcom_pdm_stop(data); + + return ERR_PTR(ret); +} + +static int qcom_pdm_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) + +{ + struct qcom_pdm_data *data; + int ret = 0; + + mutex_lock(&qcom_pdm_mutex); + + if (!__qcom_pdm_data) { + data = qcom_pdm_start(); + + if (IS_ERR(data)) + ret = PTR_ERR(data); + else + __qcom_pdm_data = data; + } + + auxiliary_set_drvdata(auxdev, __qcom_pdm_data); + + mutex_unlock(&qcom_pdm_mutex); + + return ret; +} + +static void qcom_pdm_remove(struct auxiliary_device *auxdev) +{ + struct qcom_pdm_data *data; + + data = auxiliary_get_drvdata(auxdev); + if (!data) + return; + + if (refcount_dec_and_mutex_lock(&data->refcnt, &qcom_pdm_mutex)) { + __qcom_pdm_data = NULL; + qcom_pdm_stop(data); + mutex_unlock(&qcom_pdm_mutex); + } +} + +static const struct auxiliary_device_id qcom_pdm_table[] = { + { .name = "qcom_common.pd-mapper" }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, qcom_pdm_table); + +static struct auxiliary_driver qcom_pdm_drv = { + .name = "qcom-pdm-mapper", + .id_table = qcom_pdm_table, + .probe = qcom_pdm_probe, + .remove = qcom_pdm_remove, +}; +module_auxiliary_driver(qcom_pdm_drv); + +MODULE_DESCRIPTION("Qualcomm Protection Domain Mapper"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/qcom_pdr_msg.c b/drivers/soc/qcom/qcom_pdr_msg.c new file mode 100644 index 000000000000..bf3e4a47165e --- /dev/null +++ b/drivers/soc/qcom/qcom_pdr_msg.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 The Linux Foundation. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/soc/qcom/qmi.h> + +#include "pdr_internal.h" + +const struct qmi_elem_info servreg_location_entry_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + name), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + instance), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_location_entry_ei); + +const struct qmi_elem_info servreg_get_domain_list_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_get_domain_list_req, + service_name), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_get_domain_list_req_ei); + +const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_get_domain_list_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = SERVREG_DOMAIN_LIST_LENGTH, + .elem_size = sizeof(struct servreg_location_entry), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list), + .ei_array = servreg_location_entry_ei, + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_get_domain_list_resp_ei); + +const struct qmi_elem_info servreg_register_listener_req_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_register_listener_req, + enable), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_req, + service_path), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_register_listener_req_ei); + +const struct qmi_elem_info servreg_register_listener_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum servreg_service_state), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_register_listener_resp_ei); + +const struct qmi_elem_info servreg_restart_pd_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_restart_pd_req, + service_path), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_restart_pd_req_ei); + +const struct qmi_elem_info servreg_restart_pd_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_restart_pd_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_restart_pd_resp_ei); + +const struct qmi_elem_info servreg_state_updated_ind_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_state_updated_ind, + curr_state), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_state_updated_ind, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct servreg_state_updated_ind, + transaction_id), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_state_updated_ind_ei); + +const struct qmi_elem_info servreg_set_ack_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_set_ack_req, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_req, + transaction_id), + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_set_ack_req_ei); + +const struct qmi_elem_info servreg_set_ack_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_set_ack_resp_ei); + +const struct qmi_elem_info servreg_loc_pfr_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_loc_pfr_req, service) + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_loc_pfr_req, reason) + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_loc_pfr_req_ei); + +const struct qmi_elem_info servreg_loc_pfr_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof_field(struct servreg_loc_pfr_resp, rsp), + .tlv_type = 0x02, + .offset = offsetof(struct servreg_loc_pfr_resp, rsp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; +EXPORT_SYMBOL_GPL(servreg_loc_pfr_resp_ei); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Qualcomm Protection Domain messages data"); diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index 561d8037b50a..de86009ecd91 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -646,13 +646,14 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) { struct tcs_group *tcs; int tcs_id; - unsigned long flags; + + might_sleep(); tcs = get_tcs_for_msg(drv, msg); if (IS_ERR(tcs)) return PTR_ERR(tcs); - spin_lock_irqsave(&drv->lock, flags); + spin_lock_irq(&drv->lock); /* Wait forever for a free tcs. It better be there eventually! */ wait_event_lock_irq(drv->tcs_wait, @@ -670,7 +671,7 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0); enable_tcs_irq(drv, tcs_id, true); } - spin_unlock_irqrestore(&drv->lock, flags); + spin_unlock_irq(&drv->lock); /* * These two can be done after the lock is released because: diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index 9f26d7f9b9dc..8903ed956312 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -183,7 +183,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state, } if (state == RPMH_ACTIVE_ONLY_STATE) { - WARN_ON(irqs_disabled()); ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); } else { /* Clean up our call by spoofing tx_done */ diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 7191fa0c087f..e40aac281b06 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -795,6 +795,39 @@ int qcom_smem_get_soc_id(u32 *id) } EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id); +/** + * qcom_smem_get_feature_code() - return the feature code + * @code: On success, return the feature code here. + * + * Look up the feature code identifier from SMEM and return it. + * + * Return: 0 on success, negative errno on failure. + */ +int qcom_smem_get_feature_code(u32 *code) +{ + struct socinfo *info; + u32 raw_code; + + info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL); + if (IS_ERR(info)) + return PTR_ERR(info); + + /* This only makes sense for socinfo >= 16 */ + if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16)) + return -EOPNOTSUPP; + + raw_code = __le32_to_cpu(info->feature_code); + + /* Ensure the value makes sense */ + if (raw_code > SOCINFO_FC_INT_MAX) + raw_code = SOCINFO_FC_UNKNOWN; + + *code = raw_code; + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_smem_get_feature_code); + static int qcom_smem_get_sbl_version(struct qcom_smem *smem) { struct smem_header *header; diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index a21241cbeec7..696c2a8387d0 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/pm_wakeirq.h> #include <linux/regmap.h> +#include <linux/seq_file.h> #include <linux/soc/qcom/smem.h> #include <linux/soc/qcom/smem_state.h> #include <linux/spinlock.h> @@ -353,11 +354,19 @@ static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type) return 0; } +static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p) +{ + struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); + + seq_printf(p, " %8s", dev_name(entry->smp2p->dev)); +} + static struct irq_chip smp2p_irq_chip = { .name = "smp2p", .irq_mask = smp2p_mask_irq, .irq_unmask = smp2p_unmask_irq, .irq_set_type = smp2p_set_irq_type, + .irq_print_chip = smp2p_irq_print_chip, }; static int smp2p_irq_map(struct irq_domain *d, @@ -617,7 +626,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qcom_smp2p_intr, IRQF_ONESHOT, - "smp2p", (void *)smp2p); + NULL, (void *)smp2p); if (ret) { dev_err(&pdev->dev, "failed to request interrupt\n"); goto unwind_interfaces; diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index e7c7e9a640a6..ffe78ae34386 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -5,6 +5,7 @@ */ #include <linux/interrupt.h> +#include <linux/mailbox_client.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_irq.h> @@ -71,6 +72,7 @@ struct smsm_host; * @lock: spinlock for read-modify-write of the outgoing state * @entries: context for each of the entries * @hosts: context for each of the hosts + * @mbox_client: mailbox client handle */ struct qcom_smsm { struct device *dev; @@ -88,6 +90,8 @@ struct qcom_smsm { struct smsm_entry *entries; struct smsm_host *hosts; + + struct mbox_client mbox_client; }; /** @@ -120,11 +124,14 @@ struct smsm_entry { * @ipc_regmap: regmap for outgoing interrupt * @ipc_offset: offset in @ipc_regmap for outgoing interrupt * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt + * @mbox_chan: apcs ipc mailbox channel handle */ struct smsm_host { struct regmap *ipc_regmap; int ipc_offset; int ipc_bit; + + struct mbox_chan *mbox_chan; }; /** @@ -172,7 +179,13 @@ static int smsm_update_bits(void *data, u32 mask, u32 value) hostp = &smsm->hosts[host]; val = readl(smsm->subscription + host); - if (val & changes && hostp->ipc_regmap) { + if (!(val & changes)) + continue; + + if (hostp->mbox_chan) { + mbox_send_message(hostp->mbox_chan, NULL); + mbox_client_txdone(hostp->mbox_chan, 0); + } else if (hostp->ipc_regmap) { regmap_write(hostp->ipc_regmap, hostp->ipc_offset, BIT(hostp->ipc_bit)); @@ -353,6 +366,28 @@ static const struct irq_domain_ops smsm_irq_ops = { }; /** + * smsm_parse_mbox() - requests an mbox channel + * @smsm: smsm driver context + * @host_id: index of the remote host to be resolved + * + * Requests the desired channel using the mbox interface which is needed for + * sending the outgoing interrupts to a remove hosts - identified by @host_id. + */ +static int smsm_parse_mbox(struct qcom_smsm *smsm, unsigned int host_id) +{ + struct smsm_host *host = &smsm->hosts[host_id]; + int ret = 0; + + host->mbox_chan = mbox_request_channel(&smsm->mbox_client, host_id); + if (IS_ERR(host->mbox_chan)) { + ret = PTR_ERR(host->mbox_chan); + host->mbox_chan = NULL; + } + + return ret; +} + +/** * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property * @smsm: smsm driver context * @host_id: index of the remote host to be resolved @@ -521,8 +556,16 @@ static int qcom_smsm_probe(struct platform_device *pdev) "qcom,local-host", &smsm->local_host); + smsm->mbox_client.dev = &pdev->dev; + smsm->mbox_client.knows_txdone = true; + /* Parse the host properties */ for (id = 0; id < smsm->num_hosts; id++) { + /* Try using mbox interface first, otherwise fall back to syscon */ + ret = smsm_parse_mbox(smsm, id); + if (!ret) + continue; + ret = smsm_parse_ipc(smsm, id); if (ret < 0) goto out_put; @@ -609,6 +652,9 @@ unwind_interfaces: qcom_smem_state_unregister(smsm->state); out_put: + for (id = 0; id < smsm->num_hosts; id++) + mbox_free_channel(smsm->hosts[id].mbox_chan); + of_node_put(local_node); return ret; } @@ -622,6 +668,9 @@ static void qcom_smsm_remove(struct platform_device *pdev) if (smsm->entries[id].domain) irq_domain_remove(smsm->entries[id].domain); + for (id = 0; id < smsm->num_hosts; id++) + mbox_free_channel(smsm->hosts[id].mbox_chan); + qcom_smem_state_unregister(smsm->state); } diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 277c07a6603d..d7359a235e3c 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -21,14 +21,6 @@ #include <dt-bindings/arm/qcom,ids.h> -/* - * SoC version type with major number in the upper 16 bits and minor - * number in the lower 16 bits. - */ -#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff) -#define SOCINFO_MINOR(ver) ((ver) & 0xffff) -#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff)) - /* Helper macros to create soc_id table */ #define qcom_board_id(id) QCOM_ID_ ## id, __stringify(id) #define qcom_board_id_named(id, name) QCOM_ID_ ## id, (name) @@ -124,6 +116,7 @@ static const char *const pmic_models[] = { [50] = "PM8350B", [51] = "PMR735A", [52] = "PMR735B", + [54] = "PM6350", [55] = "PM4125", [58] = "PM8450", [65] = "PM8010", @@ -133,7 +126,8 @@ static const char *const pmic_models[] = { [72] = "PMR735D", [73] = "PM8550", [74] = "PMK8550", - [82] = "SMB2360", + [82] = "PMC8380", + [83] = "SMB2360", }; struct socinfo_params { @@ -348,6 +342,7 @@ static const struct soc_id soc_id[] = { { qcom_board_id(SDA630) }, { qcom_board_id(MSM8905) }, { qcom_board_id(SDX202) }, + { qcom_board_id(SDM670) }, { qcom_board_id(SDM450) }, { qcom_board_id(SM8150) }, { qcom_board_id(SDA845) }, @@ -445,6 +440,7 @@ static const struct soc_id soc_id[] = { { qcom_board_id(QCS8550) }, { qcom_board_id(QCM8550) }, { qcom_board_id(IPQ5300) }, + { qcom_board_id(IPQ5321) }, }; static const char *socinfo_machine(struct device *dev, unsigned int id) diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index 06e2c4c2a4a8..f75659fff287 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -572,4 +572,5 @@ static int __init qcom_spm_init(void) } arch_initcall(qcom_spm_init); +MODULE_DESCRIPTION("Qualcomm Subsystem Power Manager (SPM)"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index 148bcbac332d..62b424e90d90 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -3,6 +3,7 @@ * Copyright (c) 2016, Linaro Ltd. * Copyright (c) 2015, Sony Mobile Communications Inc. */ +#include <linux/cleanup.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/slab.h> @@ -198,7 +199,6 @@ static int wcnss_request_version(struct wcnss_ctrl *wcnss) */ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) { - struct wcnss_download_nv_req *req; const struct firmware *fw; struct device *dev = wcnss->dev; const char *nvbin = NVBIN_FILE; @@ -206,18 +206,19 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) ssize_t left; int ret; - req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL); + struct wcnss_download_nv_req *req __free(kfree) = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, + GFP_KERNEL); if (!req) return -ENOMEM; ret = of_property_read_string(dev->of_node, "firmware-name", &nvbin); if (ret < 0 && ret != -EINVAL) - goto free_req; + return ret; ret = request_firmware(&fw, nvbin, dev); if (ret < 0) { dev_err(dev, "Failed to load nv file %s: %d\n", nvbin, ret); - goto free_req; + return ret; } data = fw->data; @@ -263,8 +264,6 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) release_fw: release_firmware(fw); -free_req: - kfree(req); return ret; } diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index fd8b6ac06656..a0123070a816 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -129,14 +129,30 @@ static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val, return ret; } -static int tensor_sec_update_bits(void *ctx, unsigned int reg, - unsigned int mask, unsigned int val) +static bool tensor_is_atomic(unsigned int reg) { /* * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF) - * as the target registers can be accessed by multiple masters. + * as the target registers can be accessed by multiple masters. SFRs + * that don't support atomic are added to the switch statement below. */ if (reg > PMUALIVE_MASK) + return false; + + switch (reg) { + case GS101_SYSIP_DAT0: + case GS101_SYSTEM_CONFIGURATION: + return false; + default: + return true; + } +} + +static int tensor_sec_update_bits(void *ctx, unsigned int reg, + unsigned int mask, unsigned int val) +{ + + if (!tensor_is_atomic(reg)) return tensor_sec_reg_rmw(ctx, reg, mask, val); return tensor_set_bits_atomic(ctx, reg, val, mask); diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c index 6eb6cf06278e..2781a091a6a6 100644 --- a/drivers/soc/sunxi/sunxi_sram.c +++ b/drivers/soc/sunxi/sunxi_sram.c @@ -33,7 +33,6 @@ struct sunxi_sram_data { u8 offset; u8 width; struct sunxi_sram_func *func; - struct list_head list; }; struct sunxi_sram_desc { @@ -103,7 +102,6 @@ static const struct of_device_id sunxi_sram_dt_ids[] = { }; static struct device *sram_dev; -static LIST_HEAD(claimed_sram); static DEFINE_SPINLOCK(sram_lock); static void __iomem *base; @@ -346,7 +344,7 @@ static void sunxi_sram_unlock(void *_lock) spin_unlock(lock); } -static struct regmap_config sunxi_sram_regmap_config = { +static const struct regmap_config sunxi_sram_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 91d0ad6ddefc..6c37d6eb8b49 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2891,15 +2891,11 @@ static int tegra_pmc_probe(struct platform_device *pdev) pmc->aotag = base; pmc->scratch = base; } else { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "wake"); - pmc->wake = devm_ioremap_resource(&pdev->dev, res); + pmc->wake = devm_platform_ioremap_resource_byname(pdev, "wake"); if (IS_ERR(pmc->wake)) return PTR_ERR(pmc->wake); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "aotag"); - pmc->aotag = devm_ioremap_resource(&pdev->dev, res); + pmc->aotag = devm_platform_ioremap_resource_byname(pdev, "aotag"); if (IS_ERR(pmc->aotag)) return PTR_ERR(pmc->aotag); diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c index 59101bf7cf23..4fb0f0a24828 100644 --- a/drivers/soc/ti/k3-socinfo.c +++ b/drivers/soc/ti/k3-socinfo.c @@ -61,7 +61,7 @@ static const struct k3_soc_id { }; static const char * const j721e_rev_string_map[] = { - "1.0", "1.1", + "1.0", "1.1", "2.0", }; static int diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h index a01eda720bf6..9325e8ce2e25 100644 --- a/drivers/soc/ti/knav_qmss.h +++ b/drivers/soc/ti/knav_qmss.h @@ -333,7 +333,7 @@ struct knav_range_info { void *queue_base_inst; unsigned flags; struct list_head list; - struct knav_range_ops *ops; + const struct knav_range_ops *ops; struct knav_acc_info acc_info; struct knav_acc_channel *acc; unsigned num_irqs; diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c index 3d388646ed43..269b4e75ae40 100644 --- a/drivers/soc/ti/knav_qmss_acc.c +++ b/drivers/soc/ti/knav_qmss_acc.c @@ -450,7 +450,7 @@ static int knav_acc_free_range(struct knav_range_info *range) return 0; } -static struct knav_range_ops knav_acc_range_ops = { +static const struct knav_range_ops knav_acc_range_ops = { .set_notify = knav_acc_set_notify, .init_queue = knav_acc_init_queue, .open_queue = knav_acc_open_queue, diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 06fb5505c22c..f2055a76f84c 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -411,7 +411,7 @@ static int knav_gp_close_queue(struct knav_range_info *range, return 0; } -static struct knav_range_ops knav_gp_range_ops = { +static const struct knav_range_ops knav_gp_range_ops = { .set_notify = knav_gp_set_notify, .open_queue = knav_gp_open_queue, .close_queue = knav_gp_close_queue, diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c index 8e983c3c4e03..3a56bbf3268a 100644 --- a/drivers/soc/ti/pm33xx.c +++ b/drivers/soc/ti/pm33xx.c @@ -450,14 +450,14 @@ static int am33xx_pm_rtc_setup(void) rtc_base_virt = of_iomap(np, 0); if (!rtc_base_virt) { - pr_warn("PM: could not iomap rtc"); + pr_warn("PM: could not iomap rtc\n"); error = -ENODEV; goto err_clk_put; } omap_rtc = rtc_class_open("rtc0"); if (!omap_rtc) { - pr_warn("PM: rtc0 not available"); + pr_warn("PM: rtc0 not available\n"); error = -EPROBE_DEFER; goto err_iounmap; } diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 253299e4214d..f529e1346247 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -3,6 +3,7 @@ * Xilinx Event Management Driver * * Copyright (C) 2021 Xilinx, Inc. + * Copyright (C) 2024 Advanced Micro Devices, Inc. * * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com> */ @@ -19,7 +20,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> -static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1); +static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number); static int virq_sgi; static int event_manager_availability = -EACCES; @@ -35,7 +36,6 @@ static int event_manager_availability = -EACCES; #define MAX_BITS (32U) /* Number of bits available for error mask */ -#define FIRMWARE_VERSION_MASK (0xFFFFU) #define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U) static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER); @@ -570,7 +570,6 @@ static void xlnx_disable_percpu_irq(void *data) static int xlnx_event_init_sgi(struct platform_device *pdev) { int ret = 0; - int cpu; /* * IRQ related structures are used for the following: * for each SGI interrupt ensure its mapped by GIC IRQ domain @@ -607,11 +606,8 @@ static int xlnx_event_init_sgi(struct platform_device *pdev) sgi_fwspec.param[0] = sgi_num; virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec); - cpu = get_cpu(); - per_cpu(cpu_number1, cpu) = cpu; ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt", - &cpu_number1); - put_cpu(); + &dummy_cpu_number); WARN_ON(ret); if (ret) { @@ -627,16 +623,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev) static void xlnx_event_cleanup_sgi(struct platform_device *pdev) { - int cpu = smp_processor_id(); - - per_cpu(cpu_number1, cpu) = cpu; - cpuhp_remove_state(CPUHP_AP_ONLINE_DYN); on_each_cpu(xlnx_disable_percpu_irq, NULL, 1); irq_clear_status_flags(virq_sgi, IRQ_PER_CPU); - free_percpu_irq(virq_sgi, &cpu_number1); + free_percpu_irq(virq_sgi, &dummy_cpu_number); irq_dispose_mapping(virq_sgi); } diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index 965b1143936a..411d33f2fb05 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -30,9 +30,27 @@ struct zynqmp_pm_work_struct { u32 args[CB_ARG_CNT]; }; -static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work; +/** + * struct zynqmp_pm_event_info - event related information + * @cb_fun: Function pointer to store the callback function. + * @cb_type: Type of callback from pm_api_cb_id, + * PM_NOTIFY_CB - for Error Events, + * PM_INIT_SUSPEND_CB - for suspend callback. + * @node_id: Node-Id related to event. + * @event: Event Mask for the Error Event. + * @wake: Flag specifying whether the subsystem should be woken upon + * event notification. + */ +struct zynqmp_pm_event_info { + event_cb_func_t cb_fun; + enum pm_api_cb_id cb_type; + u32 node_id; + u32 event; + bool wake; +}; + +static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work, *zynqmp_pm_init_restart_work; static struct mbox_chan *rx_chan; -static bool event_registered; enum pm_suspend_mode { PM_SUSPEND_MODE_FIRST = 0, @@ -54,6 +72,19 @@ static void zynqmp_pm_get_callback_data(u32 *buf) zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0); } +static void subsystem_restart_event_callback(const u32 *payload, void *data) +{ + /* First element is callback API ID, others are callback arguments */ + if (work_pending(&zynqmp_pm_init_restart_work->callback_work)) + return; + + /* Copy callback arguments into work's structure */ + memcpy(zynqmp_pm_init_restart_work->args, &payload[0], + sizeof(zynqmp_pm_init_restart_work->args)); + + queue_work(system_unbound_wq, &zynqmp_pm_init_restart_work->callback_work); +} + static void suspend_event_callback(const u32 *payload, void *data) { /* First element is callback API ID, others are callback arguments */ @@ -120,6 +151,37 @@ static void ipi_receive_callback(struct mbox_client *cl, void *data) } /** + * zynqmp_pm_subsystem_restart_work_fn - Initiate Subsystem restart + * @work: Pointer to work_struct + * + * Bottom-half of PM callback IRQ handler. + */ +static void zynqmp_pm_subsystem_restart_work_fn(struct work_struct *work) +{ + int ret; + struct zynqmp_pm_work_struct *pm_work = container_of(work, struct zynqmp_pm_work_struct, + callback_work); + + /* First element is callback API ID, others are callback arguments */ + if (pm_work->args[0] == PM_NOTIFY_CB) { + if (pm_work->args[2] == EVENT_SUBSYSTEM_RESTART) { + ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM); + if (ret) { + pr_err("unable to set shutdown scope\n"); + return; + } + + kernel_restart(NULL); + } else { + pr_err("%s Unsupported Event - %d\n", __func__, pm_work->args[2]); + } + } else { + pr_err("%s() Unsupported Callback %d\n", __func__, pm_work->args[0]); + } +} + +/** * zynqmp_pm_init_suspend_work_fn - Initialize suspend * @work: Pointer to work_struct * @@ -184,13 +246,51 @@ static ssize_t suspend_mode_store(struct device *dev, static DEVICE_ATTR_RW(suspend_mode); +static void unregister_event(struct device *dev, void *res) +{ + struct zynqmp_pm_event_info *event_info = res; + + xlnx_unregister_event(event_info->cb_type, event_info->node_id, + event_info->event, event_info->cb_fun, NULL); +} + +static int register_event(struct device *dev, const enum pm_api_cb_id cb_type, const u32 node_id, + const u32 event, const bool wake, event_cb_func_t cb_fun) +{ + int ret; + struct zynqmp_pm_event_info *event_info; + + event_info = devres_alloc(unregister_event, sizeof(struct zynqmp_pm_event_info), + GFP_KERNEL); + if (!event_info) + return -ENOMEM; + + event_info->cb_type = cb_type; + event_info->node_id = node_id; + event_info->event = event; + event_info->wake = wake; + event_info->cb_fun = cb_fun; + + ret = xlnx_register_event(event_info->cb_type, event_info->node_id, + event_info->event, event_info->wake, event_info->cb_fun, NULL); + if (ret) { + devres_free(event_info); + return ret; + } + + devres_add(dev, event_info); + return 0; +} + static int zynqmp_pm_probe(struct platform_device *pdev) { int ret, irq; - u32 pm_api_version; + u32 pm_api_version, pm_family_code, pm_sub_family_code, node_id; struct mbox_client *client; - zynqmp_pm_get_api_version(&pm_api_version); + ret = zynqmp_pm_get_api_version(&pm_api_version); + if (ret) + return ret; /* Check PM API version number */ if (pm_api_version < ZYNQMP_PM_VERSION) @@ -203,21 +303,43 @@ static int zynqmp_pm_probe(struct platform_device *pdev) * is not available to use) or -ENODEV(Xilinx Event Manager not compiled), * then use ipi-mailbox or interrupt method. */ - ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false, - suspend_event_callback, NULL); + ret = register_event(&pdev->dev, PM_INIT_SUSPEND_CB, 0, 0, false, + suspend_event_callback); if (!ret) { zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev, sizeof(struct zynqmp_pm_work_struct), GFP_KERNEL); - if (!zynqmp_pm_init_suspend_work) { - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, - suspend_event_callback, NULL); + if (!zynqmp_pm_init_suspend_work) return -ENOMEM; - } - event_registered = true; INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work, zynqmp_pm_init_suspend_work_fn); + + ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); + if (ret < 0) + return ret; + + if (pm_sub_family_code == VERSALNET_SUB_FAMILY_CODE) + node_id = PM_DEV_ACPU_0_0; + else + node_id = PM_DEV_ACPU_0; + + ret = register_event(&pdev->dev, PM_NOTIFY_CB, node_id, EVENT_SUBSYSTEM_RESTART, + false, subsystem_restart_event_callback); + if (ret) { + dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", + ret); + return ret; + } + + zynqmp_pm_init_restart_work = devm_kzalloc(&pdev->dev, + sizeof(struct zynqmp_pm_work_struct), + GFP_KERNEL); + if (!zynqmp_pm_init_restart_work) + return -ENOMEM; + + INIT_WORK(&zynqmp_pm_init_restart_work->callback_work, + zynqmp_pm_subsystem_restart_work_fn); } else if (ret != -EACCES && ret != -ENODEV) { dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret); return ret; @@ -264,15 +386,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev) } ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); - if (ret) { - if (event_registered) { - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, - NULL); - event_registered = false; - } - dev_err(&pdev->dev, "unable to create sysfs interface\n"); + if (ret) return ret; - } return 0; } @@ -280,8 +395,6 @@ static int zynqmp_pm_probe(struct platform_device *pdev) static void zynqmp_pm_remove(struct platform_device *pdev) { sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); - if (event_registered) - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL); if (!rx_chan) mbox_free_channel(rx_chan); diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c index 0d7878e770cd..1970880c796f 100644 --- a/drivers/tee/optee/notif.c +++ b/drivers/tee/optee/notif.c @@ -29,7 +29,7 @@ static bool have_key(struct optee *optee, u_int key) return false; } -int optee_notif_wait(struct optee *optee, u_int key) +int optee_notif_wait(struct optee *optee, u_int key, u32 timeout) { unsigned long flags; struct notif_entry *entry; @@ -70,7 +70,12 @@ int optee_notif_wait(struct optee *optee, u_int key) * Unlock temporarily and wait for completion. */ spin_unlock_irqrestore(&optee->notif.lock, flags); - wait_for_completion(&entry->c); + if (timeout != 0) { + if (!wait_for_completion_timeout(&entry->c, timeout)) + rc = -ETIMEDOUT; + } else { + wait_for_completion(&entry->c); + } spin_lock_irqsave(&optee->notif.lock, flags); list_del(&entry->link); diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 429cc20be5cc..424898cdc4e9 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -26,6 +26,9 @@ #define TEEC_ERROR_BUSY 0xFFFF000D #define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010 +/* API Return Codes are from the GP TEE Internal Core API Specification */ +#define TEE_ERROR_TIMEOUT 0xFFFF3001 + #define TEEC_ORIGIN_COMMS 0x00000002 /* @@ -252,7 +255,7 @@ struct optee_call_ctx { int optee_notif_init(struct optee *optee, u_int max_key); void optee_notif_uninit(struct optee *optee); -int optee_notif_wait(struct optee *optee, u_int key); +int optee_notif_wait(struct optee *optee, u_int key, u32 timeout); int optee_notif_send(struct optee *optee, u_int key); u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h index f3f06e0994a7..4576751b490c 100644 --- a/drivers/tee/optee/optee_rpc_cmd.h +++ b/drivers/tee/optee/optee_rpc_cmd.h @@ -41,6 +41,7 @@ * Waiting on notification * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT * [in] value[0].b notification value + * [in] value[0].c timeout in milliseconds or 0 if no timeout * * Sending a synchronous notification * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index f086812f1179..5de4504665be 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -130,6 +130,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, static void handle_rpc_func_cmd_wq(struct optee *optee, struct optee_msg_arg *arg) { + int rc = 0; + if (arg->num_params != 1) goto bad; @@ -139,7 +141,8 @@ static void handle_rpc_func_cmd_wq(struct optee *optee, switch (arg->params[0].u.value.a) { case OPTEE_RPC_NOTIFICATION_WAIT: - if (optee_notif_wait(optee, arg->params[0].u.value.b)) + rc = optee_notif_wait(optee, arg->params[0].u.value.b, arg->params[0].u.value.c); + if (rc) goto bad; break; case OPTEE_RPC_NOTIFICATION_SEND: @@ -153,7 +156,10 @@ static void handle_rpc_func_cmd_wq(struct optee *optee, arg->ret = TEEC_SUCCESS; return; bad: - arg->ret = TEEC_ERROR_BAD_PARAMETERS; + if (rc == -ETIMEDOUT) + arg->ret = TEE_ERROR_TIMEOUT; + else + arg->ret = TEEC_ERROR_BAD_PARAMETERS; } static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) |